mirror of
				https://github.com/rust-lang/rust.git
				synced 2025-11-04 06:56:14 +00:00 
			
		
		
		
	This improves the codegen for vector `select`, `gather`, `scatter` and boolean reduction intrinsics and fixes rust-lang/portable-simd#316. The current behavior of most mask operations during llvm codegen is to truncate the mask vector to <N x i1>, telling llvm to use the least significat bit. The exception is the `simd_bitmask` intrinsics, which already used the most signifiant bit. Since sse/avx instructions are defined to use the most significant bit, truncating means that llvm has to insert a left shift to move the bit into the most significant position, before the mask can actually be used. Similarly on aarch64, mask operations like blend work bit by bit, repeating the least significant bit across the whole lane involves shifting it into the sign position and then comparing against zero. By shifting before truncating to <N x i1>, we tell llvm that we only consider the most significant bit, removing the need for additional shift instructions in the assembly.
		
			
				
	
	
		
			39 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			Rust
		
	
	
	
	
	
			
		
		
	
	
			39 lines
		
	
	
		
			1.3 KiB
		
	
	
	
		
			Rust
		
	
	
	
	
	
//@ compile-flags: -C no-prepopulate-passes
 | 
						|
 | 
						|
#![crate_type = "lib"]
 | 
						|
 | 
						|
#![feature(repr_simd, intrinsics)]
 | 
						|
#![allow(non_camel_case_types)]
 | 
						|
 | 
						|
#[repr(simd)]
 | 
						|
#[derive(Copy, Clone, PartialEq, Debug)]
 | 
						|
pub struct Vec2<T>(pub [T; 2]);
 | 
						|
 | 
						|
#[repr(simd)]
 | 
						|
#[derive(Copy, Clone, PartialEq, Debug)]
 | 
						|
pub struct Vec4<T>(pub [T; 4]);
 | 
						|
 | 
						|
extern "rust-intrinsic" {
 | 
						|
    fn simd_masked_load<M, P, T>(mask: M, pointer: P, values: T) -> T;
 | 
						|
}
 | 
						|
 | 
						|
// CHECK-LABEL: @load_f32x2
 | 
						|
#[no_mangle]
 | 
						|
pub unsafe fn load_f32x2(mask: Vec2<i32>, pointer: *const f32,
 | 
						|
                         values: Vec2<f32>) -> Vec2<f32> {
 | 
						|
    // CHECK: [[A:%[0-9]+]] = lshr <2 x i32> {{.*}}, <i32 31, i32 31>
 | 
						|
    // CHECK: [[B:%[0-9]+]] = trunc <2 x i32> [[A]] to <2 x i1>
 | 
						|
    // CHECK: call <2 x float> @llvm.masked.load.v2f32.p0(ptr {{.*}}, i32 4, <2 x i1> [[B]], <2 x float> {{.*}})
 | 
						|
    simd_masked_load(mask, pointer, values)
 | 
						|
}
 | 
						|
 | 
						|
// CHECK-LABEL: @load_pf32x4
 | 
						|
#[no_mangle]
 | 
						|
pub unsafe fn load_pf32x4(mask: Vec4<i32>, pointer: *const *const f32,
 | 
						|
                          values: Vec4<*const f32>) -> Vec4<*const f32> {
 | 
						|
    // CHECK: [[A:%[0-9]+]] = lshr <4 x i32> {{.*}}, <i32 31, i32 31, i32 31, i32 31>
 | 
						|
    // CHECK: [[B:%[0-9]+]] = trunc <4 x i32> [[A]] to <4 x i1>
 | 
						|
    // CHECK: call <4 x ptr> @llvm.masked.load.v4p0.p0(ptr {{.*}}, i32 {{.*}}, <4 x i1> [[B]], <4 x ptr> {{.*}})
 | 
						|
    simd_masked_load(mask, pointer, values)
 | 
						|
}
 |