Edit

Share via


Avx10v1.MaskLoad Method

Definition

Overloads

Name Description
MaskLoad(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

__m128i _mm_mask_loadu_epi64 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 xmm1 {k1}{z}, m128

MaskLoad(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

__m256i _mm256_mask_loadu_epi32 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

MaskLoad(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

__m128i _mm_mask_loadu_epi32 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

MaskLoad(UInt16*, Vector256<UInt16>, Vector256<UInt16>)

__m256i _mm256_mask_loadu_epi16 (__m256i s, __mmask16 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

MaskLoad(UInt16*, Vector128<UInt16>, Vector128<UInt16>)

__m128i _mm_mask_loadu_epi16 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

MaskLoad(Single*, Vector256<Single>, Vector256<Single>)

__m256 _mm256_mask_loadu_ps (__m256 s, __mmask8 k, void const * mem_addr)

VMOVUPS ymm1 {k1}{z}, m256

MaskLoad(Single*, Vector128<Single>, Vector128<Single>)

__m128 _mm_mask_loadu_ps (__m128 s, __mmask8 k, void const * mem_addr)

VMOVUPS xmm1 {k1}{z}, m128

MaskLoad(SByte*, Vector256<SByte>, Vector256<SByte>)

__m256i _mm256_mask_loadu_epi8 (__m256i s, __mmask32 k, void const * mem_addr)

VMOVDQU8 ymm1 {k1}{z}, m256

MaskLoad(SByte*, Vector128<SByte>, Vector128<SByte>)

__m128i _mm_mask_loadu_epi8 (__m128i s, __mmask16 k, void const * mem_addr)

VMOVDQU8 xmm1 {k1}{z}, m128

MaskLoad(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

__m256i _mm256_mask_loadu_epi64 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 ymm1 {k1}{z}, m256

MaskLoad(Int64*, Vector128<Int64>, Vector128<Int64>)

__m128i _mm_mask_loadu_epi64 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 xmm1 {k1}{z}, m128

MaskLoad(Int32*, Vector256<Int32>, Vector256<Int32>)

__m256i _mm256_mask_loadu_epi32 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

MaskLoad(Int32*, Vector128<Int32>, Vector128<Int32>)

__m128i _mm_mask_loadu_epi32 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

MaskLoad(Int16*, Vector256<Int16>, Vector256<Int16>)

__m256i _mm256_mask_loadu_epi16 (__m256i s, __mmask16 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

MaskLoad(Int16*, Vector128<Int16>, Vector128<Int16>)

__m128i _mm_mask_loadu_epi16 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

MaskLoad(Double*, Vector256<Double>, Vector256<Double>)

__m256d _mm256_mask_loadu_pd (__m256d s, __mmask8 k, void const * mem_addr)

VMOVUPD ymm1 {k1}{z}, m256

MaskLoad(Double*, Vector128<Double>, Vector128<Double>)

__m128d _mm_mask_loadu_pd (__m128d s, __mmask8 k, void const * mem_addr)

VMOVUPD xmm1 {k1}{z}, m128

MaskLoad(Byte*, Vector256<Byte>, Vector256<Byte>)

__m256i _mm256_mask_loadu_epi8 (__m256i s, __mmask32 k, void const * mem_addr)

VMOVDQU8 ymm1 {k1}{z}, m256

MaskLoad(Byte*, Vector128<Byte>, Vector128<Byte>)

__m128i _mm_mask_loadu_epi8 (__m128i s, __mmask16 k, void const * mem_addr)

VMOVDQU8 xmm1 {k1}{z}, m128

MaskLoad(Int64*, Vector256<Int64>, Vector256<Int64>)

__m256i _mm256_mask_loadu_epi64 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 ymm1 {k1}{z}, m256

MaskLoad(UInt64*, Vector128<UInt64>, Vector128<UInt64>)

Source:
Avx10v1.cs

__m128i _mm_mask_loadu_epi64 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<System::UInt64> MaskLoad(System::UInt64* address, System::Runtime::Intrinsics::Vector128<System::UInt64> mask, System::Runtime::Intrinsics::Vector128<System::UInt64> merge);
public static System.Runtime.Intrinsics.Vector128<ulong> MaskLoad(ulong* address, System.Runtime.Intrinsics.Vector128<ulong> mask, System.Runtime.Intrinsics.Vector128<ulong> merge);
static member MaskLoad : nativeptr<uint64> * System.Runtime.Intrinsics.Vector128<uint64> * System.Runtime.Intrinsics.Vector128<uint64> -> System.Runtime.Intrinsics.Vector128<uint64>

Parameters

address
UInt64*
merge
Vector128<UInt64>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(UInt32*, Vector256<UInt32>, Vector256<UInt32>)

Source:
Avx10v1.cs

__m256i _mm256_mask_loadu_epi32 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<System::UInt32> MaskLoad(System::UInt32* address, System::Runtime::Intrinsics::Vector256<System::UInt32> mask, System::Runtime::Intrinsics::Vector256<System::UInt32> merge);
public static System.Runtime.Intrinsics.Vector256<uint> MaskLoad(uint* address, System.Runtime.Intrinsics.Vector256<uint> mask, System.Runtime.Intrinsics.Vector256<uint> merge);
static member MaskLoad : nativeptr<uint32> * System.Runtime.Intrinsics.Vector256<uint32> * System.Runtime.Intrinsics.Vector256<uint32> -> System.Runtime.Intrinsics.Vector256<uint32>

Parameters

address
UInt32*
merge
Vector256<UInt32>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(UInt32*, Vector128<UInt32>, Vector128<UInt32>)

Source:
Avx10v1.cs

__m128i _mm_mask_loadu_epi32 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<System::UInt32> MaskLoad(System::UInt32* address, System::Runtime::Intrinsics::Vector128<System::UInt32> mask, System::Runtime::Intrinsics::Vector128<System::UInt32> merge);
public static System.Runtime.Intrinsics.Vector128<uint> MaskLoad(uint* address, System.Runtime.Intrinsics.Vector128<uint> mask, System.Runtime.Intrinsics.Vector128<uint> merge);
static member MaskLoad : nativeptr<uint32> * System.Runtime.Intrinsics.Vector128<uint32> * System.Runtime.Intrinsics.Vector128<uint32> -> System.Runtime.Intrinsics.Vector128<uint32>

Parameters

address
UInt32*
merge
Vector128<UInt32>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(UInt16*, Vector256<UInt16>, Vector256<UInt16>)

Source:
Avx10v1.cs

__m256i _mm256_mask_loadu_epi16 (__m256i s, __mmask16 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<System::UInt16> MaskLoad(System::UInt16* address, System::Runtime::Intrinsics::Vector256<System::UInt16> mask, System::Runtime::Intrinsics::Vector256<System::UInt16> merge);
public static System.Runtime.Intrinsics.Vector256<ushort> MaskLoad(ushort* address, System.Runtime.Intrinsics.Vector256<ushort> mask, System.Runtime.Intrinsics.Vector256<ushort> merge);
static member MaskLoad : nativeptr<uint16> * System.Runtime.Intrinsics.Vector256<uint16> * System.Runtime.Intrinsics.Vector256<uint16> -> System.Runtime.Intrinsics.Vector256<uint16>

Parameters

address
UInt16*
merge
Vector256<UInt16>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(UInt16*, Vector128<UInt16>, Vector128<UInt16>)

Source:
Avx10v1.cs

__m128i _mm_mask_loadu_epi16 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<System::UInt16> MaskLoad(System::UInt16* address, System::Runtime::Intrinsics::Vector128<System::UInt16> mask, System::Runtime::Intrinsics::Vector128<System::UInt16> merge);
public static System.Runtime.Intrinsics.Vector128<ushort> MaskLoad(ushort* address, System.Runtime.Intrinsics.Vector128<ushort> mask, System.Runtime.Intrinsics.Vector128<ushort> merge);
static member MaskLoad : nativeptr<uint16> * System.Runtime.Intrinsics.Vector128<uint16> * System.Runtime.Intrinsics.Vector128<uint16> -> System.Runtime.Intrinsics.Vector128<uint16>

Parameters

address
UInt16*
merge
Vector128<UInt16>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(Single*, Vector256<Single>, Vector256<Single>)

Source:
Avx10v1.cs

__m256 _mm256_mask_loadu_ps (__m256 s, __mmask8 k, void const * mem_addr)

VMOVUPS ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<float> MaskLoad(float* address, System::Runtime::Intrinsics::Vector256<float> mask, System::Runtime::Intrinsics::Vector256<float> merge);
public static System.Runtime.Intrinsics.Vector256<float> MaskLoad(float* address, System.Runtime.Intrinsics.Vector256<float> mask, System.Runtime.Intrinsics.Vector256<float> merge);
static member MaskLoad : nativeptr<single> * System.Runtime.Intrinsics.Vector256<single> * System.Runtime.Intrinsics.Vector256<single> -> System.Runtime.Intrinsics.Vector256<single>

Parameters

address
Single*
merge
Vector256<Single>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(Single*, Vector128<Single>, Vector128<Single>)

Source:
Avx10v1.cs

__m128 _mm_mask_loadu_ps (__m128 s, __mmask8 k, void const * mem_addr)

VMOVUPS xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<float> MaskLoad(float* address, System::Runtime::Intrinsics::Vector128<float> mask, System::Runtime::Intrinsics::Vector128<float> merge);
public static System.Runtime.Intrinsics.Vector128<float> MaskLoad(float* address, System.Runtime.Intrinsics.Vector128<float> mask, System.Runtime.Intrinsics.Vector128<float> merge);
static member MaskLoad : nativeptr<single> * System.Runtime.Intrinsics.Vector128<single> * System.Runtime.Intrinsics.Vector128<single> -> System.Runtime.Intrinsics.Vector128<single>

Parameters

address
Single*
merge
Vector128<Single>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(SByte*, Vector256<SByte>, Vector256<SByte>)

Source:
Avx10v1.cs

__m256i _mm256_mask_loadu_epi8 (__m256i s, __mmask32 k, void const * mem_addr)

VMOVDQU8 ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<System::SByte> MaskLoad(System::SByte* address, System::Runtime::Intrinsics::Vector256<System::SByte> mask, System::Runtime::Intrinsics::Vector256<System::SByte> merge);
public static System.Runtime.Intrinsics.Vector256<sbyte> MaskLoad(sbyte* address, System.Runtime.Intrinsics.Vector256<sbyte> mask, System.Runtime.Intrinsics.Vector256<sbyte> merge);
static member MaskLoad : nativeptr<sbyte> * System.Runtime.Intrinsics.Vector256<sbyte> * System.Runtime.Intrinsics.Vector256<sbyte> -> System.Runtime.Intrinsics.Vector256<sbyte>

Parameters

address
SByte*
merge
Vector256<SByte>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(SByte*, Vector128<SByte>, Vector128<SByte>)

Source:
Avx10v1.cs

__m128i _mm_mask_loadu_epi8 (__m128i s, __mmask16 k, void const * mem_addr)

VMOVDQU8 xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<System::SByte> MaskLoad(System::SByte* address, System::Runtime::Intrinsics::Vector128<System::SByte> mask, System::Runtime::Intrinsics::Vector128<System::SByte> merge);
public static System.Runtime.Intrinsics.Vector128<sbyte> MaskLoad(sbyte* address, System.Runtime.Intrinsics.Vector128<sbyte> mask, System.Runtime.Intrinsics.Vector128<sbyte> merge);
static member MaskLoad : nativeptr<sbyte> * System.Runtime.Intrinsics.Vector128<sbyte> * System.Runtime.Intrinsics.Vector128<sbyte> -> System.Runtime.Intrinsics.Vector128<sbyte>

Parameters

address
SByte*
merge
Vector128<SByte>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(UInt64*, Vector256<UInt64>, Vector256<UInt64>)

Source:
Avx10v1.cs

__m256i _mm256_mask_loadu_epi64 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<System::UInt64> MaskLoad(System::UInt64* address, System::Runtime::Intrinsics::Vector256<System::UInt64> mask, System::Runtime::Intrinsics::Vector256<System::UInt64> merge);
public static System.Runtime.Intrinsics.Vector256<ulong> MaskLoad(ulong* address, System.Runtime.Intrinsics.Vector256<ulong> mask, System.Runtime.Intrinsics.Vector256<ulong> merge);
static member MaskLoad : nativeptr<uint64> * System.Runtime.Intrinsics.Vector256<uint64> * System.Runtime.Intrinsics.Vector256<uint64> -> System.Runtime.Intrinsics.Vector256<uint64>

Parameters

address
UInt64*
merge
Vector256<UInt64>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(Int64*, Vector128<Int64>, Vector128<Int64>)

Source:
Avx10v1.cs

__m128i _mm_mask_loadu_epi64 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<long> MaskLoad(long* address, System::Runtime::Intrinsics::Vector128<long> mask, System::Runtime::Intrinsics::Vector128<long> merge);
public static System.Runtime.Intrinsics.Vector128<long> MaskLoad(long* address, System.Runtime.Intrinsics.Vector128<long> mask, System.Runtime.Intrinsics.Vector128<long> merge);
static member MaskLoad : nativeptr<int64> * System.Runtime.Intrinsics.Vector128<int64> * System.Runtime.Intrinsics.Vector128<int64> -> System.Runtime.Intrinsics.Vector128<int64>

Parameters

address
Int64*
merge
Vector128<Int64>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(Int32*, Vector256<Int32>, Vector256<Int32>)

Source:
Avx10v1.cs

__m256i _mm256_mask_loadu_epi32 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<int> MaskLoad(int* address, System::Runtime::Intrinsics::Vector256<int> mask, System::Runtime::Intrinsics::Vector256<int> merge);
public static System.Runtime.Intrinsics.Vector256<int> MaskLoad(int* address, System.Runtime.Intrinsics.Vector256<int> mask, System.Runtime.Intrinsics.Vector256<int> merge);
static member MaskLoad : nativeptr<int> * System.Runtime.Intrinsics.Vector256<int> * System.Runtime.Intrinsics.Vector256<int> -> System.Runtime.Intrinsics.Vector256<int>

Parameters

address
Int32*
merge
Vector256<Int32>

Returns

Applies to

MaskLoad(Int32*, Vector128<Int32>, Vector128<Int32>)

Source:
Avx10v1.cs

__m128i _mm_mask_loadu_epi32 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<int> MaskLoad(int* address, System::Runtime::Intrinsics::Vector128<int> mask, System::Runtime::Intrinsics::Vector128<int> merge);
public static System.Runtime.Intrinsics.Vector128<int> MaskLoad(int* address, System.Runtime.Intrinsics.Vector128<int> mask, System.Runtime.Intrinsics.Vector128<int> merge);
static member MaskLoad : nativeptr<int> * System.Runtime.Intrinsics.Vector128<int> * System.Runtime.Intrinsics.Vector128<int> -> System.Runtime.Intrinsics.Vector128<int>

Parameters

address
Int32*
merge
Vector128<Int32>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(Int16*, Vector256<Int16>, Vector256<Int16>)

Source:
Avx10v1.cs

__m256i _mm256_mask_loadu_epi16 (__m256i s, __mmask16 k, void const * mem_addr)

VMOVDQU32 ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<short> MaskLoad(short* address, System::Runtime::Intrinsics::Vector256<short> mask, System::Runtime::Intrinsics::Vector256<short> merge);
public static System.Runtime.Intrinsics.Vector256<short> MaskLoad(short* address, System.Runtime.Intrinsics.Vector256<short> mask, System.Runtime.Intrinsics.Vector256<short> merge);
static member MaskLoad : nativeptr<int16> * System.Runtime.Intrinsics.Vector256<int16> * System.Runtime.Intrinsics.Vector256<int16> -> System.Runtime.Intrinsics.Vector256<int16>

Parameters

address
Int16*
merge
Vector256<Int16>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(Int16*, Vector128<Int16>, Vector128<Int16>)

Source:
Avx10v1.cs

__m128i _mm_mask_loadu_epi16 (__m128i s, __mmask8 k, void const * mem_addr)

VMOVDQU32 xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<short> MaskLoad(short* address, System::Runtime::Intrinsics::Vector128<short> mask, System::Runtime::Intrinsics::Vector128<short> merge);
public static System.Runtime.Intrinsics.Vector128<short> MaskLoad(short* address, System.Runtime.Intrinsics.Vector128<short> mask, System.Runtime.Intrinsics.Vector128<short> merge);
static member MaskLoad : nativeptr<int16> * System.Runtime.Intrinsics.Vector128<int16> * System.Runtime.Intrinsics.Vector128<int16> -> System.Runtime.Intrinsics.Vector128<int16>

Parameters

address
Int16*
merge
Vector128<Int16>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(Double*, Vector256<Double>, Vector256<Double>)

Source:
Avx10v1.cs

__m256d _mm256_mask_loadu_pd (__m256d s, __mmask8 k, void const * mem_addr)

VMOVUPD ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<double> MaskLoad(double* address, System::Runtime::Intrinsics::Vector256<double> mask, System::Runtime::Intrinsics::Vector256<double> merge);
public static System.Runtime.Intrinsics.Vector256<double> MaskLoad(double* address, System.Runtime.Intrinsics.Vector256<double> mask, System.Runtime.Intrinsics.Vector256<double> merge);
static member MaskLoad : nativeptr<double> * System.Runtime.Intrinsics.Vector256<double> * System.Runtime.Intrinsics.Vector256<double> -> System.Runtime.Intrinsics.Vector256<double>

Parameters

address
Double*
merge
Vector256<Double>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(Double*, Vector128<Double>, Vector128<Double>)

Source:
Avx10v1.cs

__m128d _mm_mask_loadu_pd (__m128d s, __mmask8 k, void const * mem_addr)

VMOVUPD xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<double> MaskLoad(double* address, System::Runtime::Intrinsics::Vector128<double> mask, System::Runtime::Intrinsics::Vector128<double> merge);
public static System.Runtime.Intrinsics.Vector128<double> MaskLoad(double* address, System.Runtime.Intrinsics.Vector128<double> mask, System.Runtime.Intrinsics.Vector128<double> merge);
static member MaskLoad : nativeptr<double> * System.Runtime.Intrinsics.Vector128<double> * System.Runtime.Intrinsics.Vector128<double> -> System.Runtime.Intrinsics.Vector128<double>

Parameters

address
Double*
merge
Vector128<Double>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(Byte*, Vector256<Byte>, Vector256<Byte>)

Source:
Avx10v1.cs

__m256i _mm256_mask_loadu_epi8 (__m256i s, __mmask32 k, void const * mem_addr)

VMOVDQU8 ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<System::Byte> MaskLoad(System::Byte* address, System::Runtime::Intrinsics::Vector256<System::Byte> mask, System::Runtime::Intrinsics::Vector256<System::Byte> merge);
public static System.Runtime.Intrinsics.Vector256<byte> MaskLoad(byte* address, System.Runtime.Intrinsics.Vector256<byte> mask, System.Runtime.Intrinsics.Vector256<byte> merge);
static member MaskLoad : nativeptr<byte> * System.Runtime.Intrinsics.Vector256<byte> * System.Runtime.Intrinsics.Vector256<byte> -> System.Runtime.Intrinsics.Vector256<byte>

Parameters

address
Byte*
mask
Vector256<Byte>
merge
Vector256<Byte>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(Byte*, Vector128<Byte>, Vector128<Byte>)

Source:
Avx10v1.cs

__m128i _mm_mask_loadu_epi8 (__m128i s, __mmask16 k, void const * mem_addr)

VMOVDQU8 xmm1 {k1}{z}, m128

public:
 static System::Runtime::Intrinsics::Vector128<System::Byte> MaskLoad(System::Byte* address, System::Runtime::Intrinsics::Vector128<System::Byte> mask, System::Runtime::Intrinsics::Vector128<System::Byte> merge);
public static System.Runtime.Intrinsics.Vector128<byte> MaskLoad(byte* address, System.Runtime.Intrinsics.Vector128<byte> mask, System.Runtime.Intrinsics.Vector128<byte> merge);
static member MaskLoad : nativeptr<byte> * System.Runtime.Intrinsics.Vector128<byte> * System.Runtime.Intrinsics.Vector128<byte> -> System.Runtime.Intrinsics.Vector128<byte>

Parameters

address
Byte*
mask
Vector128<Byte>
merge
Vector128<Byte>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to

MaskLoad(Int64*, Vector256<Int64>, Vector256<Int64>)

Source:
Avx10v1.cs

__m256i _mm256_mask_loadu_epi64 (__m256i s, __mmask8 k, void const * mem_addr)

VMOVDQU64 ymm1 {k1}{z}, m256

public:
 static System::Runtime::Intrinsics::Vector256<long> MaskLoad(long* address, System::Runtime::Intrinsics::Vector256<long> mask, System::Runtime::Intrinsics::Vector256<long> merge);
public static System.Runtime.Intrinsics.Vector256<long> MaskLoad(long* address, System.Runtime.Intrinsics.Vector256<long> mask, System.Runtime.Intrinsics.Vector256<long> merge);
static member MaskLoad : nativeptr<int64> * System.Runtime.Intrinsics.Vector256<int64> * System.Runtime.Intrinsics.Vector256<int64> -> System.Runtime.Intrinsics.Vector256<int64>

Parameters

address
Int64*
merge
Vector256<Int64>

Returns

Remarks

The native and managed intrinsics have different order of parameters.

Applies to