-
Notifications
You must be signed in to change notification settings - Fork 15.4k
[DirectX] Add lowering support for llvm.fsh[l|r].*
#170570
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Conversation
|
@llvm/pr-subscribers-backend-directx Author: Finn Plummer (inbelic) ChangesThis pr adds support to emulate the Note: the implementation does not create a mask for the first shift as this is automatically done in the DXIL shift equivalents. Resolves #165750. Full diff: https://github.com/llvm/llvm-project/pull/170570.diff 3 Files Affected:
diff --git a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp
index e0d2dbde92150..4616f0c98bb9a 100644
--- a/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp
+++ b/llvm/lib/Target/DirectX/DXILIntrinsicExpansion.cpp
@@ -15,6 +15,7 @@
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/CodeGen/Passes.h"
+#include "llvm/IR/Constants.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
@@ -200,6 +201,8 @@ static bool isIntrinsicExpansion(Function &F) {
case Intrinsic::assume:
case Intrinsic::abs:
case Intrinsic::atan2:
+ case Intrinsic::fshl:
+ case Intrinsic::fshr:
case Intrinsic::exp:
case Intrinsic::is_fpclass:
case Intrinsic::log:
@@ -656,6 +659,32 @@ static Value *expandAtan2Intrinsic(CallInst *Orig) {
return Result;
}
+template <bool LeftFunnel>
+static Value *expandFunnelShiftIntrinsic(CallInst *Orig) {
+ Type *Ty = Orig->getType();
+ Value *A = Orig->getOperand(0);
+ Value *B = Orig->getOperand(1);
+ Value *Shift = Orig->getOperand(2);
+
+ IRBuilder<> Builder(Orig);
+
+ unsigned BitWidth = Ty->getScalarSizeInBits();
+ Constant *Mask = ConstantInt::get(Ty, BitWidth - 1);
+ Constant *Size = ConstantInt::get(Ty, BitWidth);
+
+ // The shift is not required to be masked as DXIL op will do so automatically
+ Value *Left =
+ LeftFunnel ? Builder.CreateShl(A, Shift) : Builder.CreateLShr(B, Shift);
+
+ Value *MaskedShift = Builder.CreateAnd(Shift, Mask);
+ Value *InverseShift = Builder.CreateSub(Size, MaskedShift);
+ Value *Right = LeftFunnel ? Builder.CreateLShr(B, InverseShift)
+ : Builder.CreateShl(A, InverseShift);
+
+ Value *Result = Builder.CreateOr(Left, Right);
+ return Result;
+}
+
static Value *expandPowIntrinsic(CallInst *Orig, Intrinsic::ID IntrinsicId) {
Value *X = Orig->getOperand(0);
@@ -995,6 +1024,12 @@ static bool expandIntrinsic(Function &F, CallInst *Orig) {
case Intrinsic::atan2:
Result = expandAtan2Intrinsic(Orig);
break;
+ case Intrinsic::fshl:
+ Result = expandFunnelShiftIntrinsic<true>(Orig);
+ break;
+ case Intrinsic::fshr:
+ Result = expandFunnelShiftIntrinsic<false>(Orig);
+ break;
case Intrinsic::exp:
Result = expandExpIntrinsic(Orig);
break;
diff --git a/llvm/test/CodeGen/DirectX/fshl.ll b/llvm/test/CodeGen/DirectX/fshl.ll
new file mode 100644
index 0000000000000..0b542525faea6
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/fshl.ll
@@ -0,0 +1,82 @@
+; RUN: opt -S -scalarizer -dxil-intrinsic-expansion -mtriple=dxil-pc-shadermodel6.3-library %s | FileCheck %s
+; RUN: opt -S -scalarizer -dxil-intrinsic-expansion -dxil-op-lower -mtriple=dxil-pc-shadermodel6.3-library %s | FileCheck %s
+;
+; Make sure dxil operation function calls for funnel shifts left are generated.
+
+; CHECK-LABEL: define{{.*}}@fshl_i16(
+; CHECK-SAME: i16 %[[A:.*]], i16 %[[B:.*]], i16 %[[SHIFT:.*]])
+define noundef i16 @fshl_i16(i16 %a, i16 %b, i16 %shift) {
+entry:
+; CHECK: %[[LEFT:.*]] = shl i16 %[[A]], %[[SHIFT]]
+; CHECK: %[[MASKED_SHIFT:.*]] = and i16 %[[SHIFT]], 15
+; CHECK: %[[INVERSE_SHIFT:.*]] = sub i16 16, %[[MASKED_SHIFT]]
+; CHECK: %[[RIGHT:.*]] = lshr i16 %[[B]], %[[INVERSE_SHIFT]]
+; CHECK: %[[RES:.*]] = or i16 %[[LEFT]], %[[RIGHT]]
+; CHECK: ret i16 %[[RES]]
+ %fsh = call i16 @llvm.fshl.i16(i16 %a, i16 %b, i16 %shift)
+ ret i16 %fsh
+}
+
+declare i16 @llvm.fshl.i16(i16, i16, i16)
+
+; CHECK-LABEL: define{{.*}}@fshl_v1i32(
+; CHECK-SAME: <1 x i32> %[[A_VEC:.*]], <1 x i32> %[[B_VEC:.*]], <1 x i32> %[[SHIFT_VEC:.*]])
+define noundef <1 x i32> @fshl_v1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %shift) {
+entry:
+; CHECK: %[[A:.*]] = extractelement <1 x i32> %[[A_VEC]], i64 0
+; CHECK: %[[B:.*]] = extractelement <1 x i32> %[[B_VEC]], i64 0
+; CHECK: %[[SHIFT:.*]] = extractelement <1 x i32> %[[SHIFT_VEC]], i64 0
+; CHECK: %[[LEFT:.*]] = shl i32 %[[A]], %[[SHIFT]]
+; CHECK: %[[MASKED_SHIFT:.*]] = and i32 %[[SHIFT]], 31
+; CHECK: %[[INVERSE_SHIFT:.*]] = sub i32 32, %[[MASKED_SHIFT]]
+; CHECK: %[[RIGHT:.*]] = lshr i32 %[[B]], %[[INVERSE_SHIFT]]
+; CHECK: %[[RES:.*]] = or i32 %[[LEFT]], %[[RIGHT]]
+; CHECK: %[[RES_VEC:.*]] = insertelement <1 x i32> poison, i32 %[[RES]], i64 0
+; CHECK: ret <1 x i32> %[[RES_VEC]]
+ %fsh = call <1 x i32> @llvm.fshl.v1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %shift)
+ ret <1 x i32> %fsh
+}
+
+declare <1 x i32> @llvm.fshl.v1i32(<1 x i32>, <1 x i32>, <1 x i32>)
+
+; CHECK-LABEL: define{{.*}}@fshl_v1i64(
+; CHECK-SAME: <3 x i64> %[[A_VEC:.*]], <3 x i64> %[[B_VEC:.*]], <3 x i64> %[[SHIFT_VEC:.*]])
+define noundef <3 x i64> @fshl_v1i64(<3 x i64> %a, <3 x i64> %b, <3 x i64> %shift) {
+entry:
+; CHECK: %[[A0:.*]] = extractelement <3 x i64> %[[A_VEC]], i64 0
+; CHECK: %[[B0:.*]] = extractelement <3 x i64> %[[B_VEC]], i64 0
+; CHECK: %[[SHIFT0:.*]] = extractelement <3 x i64> %[[SHIFT_VEC]], i64 0
+; CHECK: %[[LEFT0:.*]] = shl i64 %[[A0]], %[[SHIFT0]]
+; CHECK: %[[MASKED_SHIFT0:.*]] = and i64 %[[SHIFT0]], 63
+; CHECK: %[[INVERSE_SHIFT0:.*]] = sub i64 64, %[[MASKED_SHIFT0]]
+; CHECK: %[[RIGHT0:.*]] = lshr i64 %[[B0]], %[[INVERSE_SHIFT0]]
+; CHECK: %[[RES0:.*]] = or i64 %[[LEFT0]], %[[RIGHT0]]
+;
+; CHECK: %[[A1:.*]] = extractelement <3 x i64> %[[A_VEC]], i64 1
+; CHECK: %[[B1:.*]] = extractelement <3 x i64> %[[B_VEC]], i64 1
+; CHECK: %[[SHIFT1:.*]] = extractelement <3 x i64> %[[SHIFT_VEC]], i64 1
+; CHECK: %[[LEFT1:.*]] = shl i64 %[[A1]], %[[SHIFT1]]
+; CHECK: %[[MASKED_SHIFT1:.*]] = and i64 %[[SHIFT1]], 63
+; CHECK: %[[INVERSE_SHIFT1:.*]] = sub i64 64, %[[MASKED_SHIFT1]]
+; CHECK: %[[RIGHT1:.*]] = lshr i64 %[[B1]], %[[INVERSE_SHIFT1]]
+; CHECK: %[[RES1:.*]] = or i64 %[[LEFT1]], %[[RIGHT1]]
+;
+; CHECK: %[[A2:.*]] = extractelement <3 x i64> %[[A_VEC]], i64 2
+; CHECK: %[[B2:.*]] = extractelement <3 x i64> %[[B_VEC]], i64 2
+; CHECK: %[[SHIFT2:.*]] = extractelement <3 x i64> %[[SHIFT_VEC]], i64 2
+; CHECK: %[[LEFT2:.*]] = shl i64 %[[A2]], %[[SHIFT2]]
+; CHECK: %[[MASKED_SHIFT2:.*]] = and i64 %[[SHIFT2]], 63
+; CHECK: %[[INVERSE_SHIFT2:.*]] = sub i64 64, %[[MASKED_SHIFT2]]
+; CHECK: %[[RIGHT2:.*]] = lshr i64 %[[B2]], %[[INVERSE_SHIFT2]]
+; CHECK: %[[RES2:.*]] = or i64 %[[LEFT2]], %[[RIGHT2]]
+;
+; CHECK: %[[INSERT0:.*]] = insertelement <3 x i64> poison, i64 %[[RES0]], i64 0
+; CHECK: %[[INSERT1:.*]] = insertelement <3 x i64> %[[INSERT0]], i64 %[[RES1]], i64 1
+; CHECK: %[[RES_VEC:.*]] = insertelement <3 x i64> %[[INSERT1]], i64 %[[RES2]], i64 2
+;
+; CHECK: ret <3 x i64> %[[RES_VEC]]
+ %fsh = call <3 x i64> @llvm.fshl.v1i64(<3 x i64> %a, <3 x i64> %b, <3 x i64> %shift)
+ ret <3 x i64> %fsh
+}
+
+declare <3 x i64> @llvm.fshl.v1i64(<3 x i64>, <3 x i64>, <3 x i64>)
diff --git a/llvm/test/CodeGen/DirectX/fshr.ll b/llvm/test/CodeGen/DirectX/fshr.ll
new file mode 100644
index 0000000000000..c99c38297a7f6
--- /dev/null
+++ b/llvm/test/CodeGen/DirectX/fshr.ll
@@ -0,0 +1,82 @@
+; RUN: opt -S -scalarizer -dxil-intrinsic-expansion -mtriple=dxil-pc-shadermodel6.3-library %s | FileCheck %s
+; RUN: opt -S -scalarizer -dxil-intrinsic-expansion -dxil-op-lower -mtriple=dxil-pc-shadermodel6.3-library %s | FileCheck %s
+;
+; Make sure dxil operation function calls for funnel shifts right are generated.
+
+; CHECK-LABEL: define{{.*}}@fshr_i16(
+; CHECK-SAME: i16 %[[A:.*]], i16 %[[B:.*]], i16 %[[SHIFT:.*]])
+define noundef i16 @fshr_i16(i16 %a, i16 %b, i16 %shift) {
+entry:
+; CHECK: %[[LEFT:.*]] = lshr i16 %[[B]], %[[SHIFT]]
+; CHECK: %[[MASKED_SHIFT:.*]] = and i16 %[[SHIFT]], 15
+; CHECK: %[[INVERSE_SHIFT:.*]] = sub i16 16, %[[MASKED_SHIFT]]
+; CHECK: %[[RIGHT:.*]] = shl i16 %[[A]], %[[INVERSE_SHIFT]]
+; CHECK: %[[RES:.*]] = or i16 %[[LEFT]], %[[RIGHT]]
+; CHECK: ret i16 %[[RES]]
+ %fsh = call i16 @llvm.fshr.i16(i16 %a, i16 %b, i16 %shift)
+ ret i16 %fsh
+}
+
+declare i16 @llvm.fshr.i16(i16, i16, i16)
+
+; CHECK-LABEL: define{{.*}}@fshr_v1i32(
+; CHECK-SAME: <1 x i32> %[[A_VEC:.*]], <1 x i32> %[[B_VEC:.*]], <1 x i32> %[[SHIFT_VEC:.*]])
+define noundef <1 x i32> @fshr_v1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %shift) {
+entry:
+; CHECK: %[[A:.*]] = extractelement <1 x i32> %[[A_VEC]], i64 0
+; CHECK: %[[B:.*]] = extractelement <1 x i32> %[[B_VEC]], i64 0
+; CHECK: %[[SHIFT:.*]] = extractelement <1 x i32> %[[SHIFT_VEC]], i64 0
+; CHECK: %[[LEFT:.*]] = lshr i32 %[[B]], %[[SHIFT]]
+; CHECK: %[[MASKED_SHIFT:.*]] = and i32 %[[SHIFT]], 31
+; CHECK: %[[INVERSE_SHIFT:.*]] = sub i32 32, %[[MASKED_SHIFT]]
+; CHECK: %[[RIGHT:.*]] = shl i32 %[[A]], %[[INVERSE_SHIFT]]
+; CHECK: %[[RES:.*]] = or i32 %[[LEFT]], %[[RIGHT]]
+; CHECK: %[[RES_VEC:.*]] = insertelement <1 x i32> poison, i32 %[[RES]], i64 0
+; CHECK: ret <1 x i32> %[[RES_VEC]]
+ %fsh = call <1 x i32> @llvm.fshr.v1i32(<1 x i32> %a, <1 x i32> %b, <1 x i32> %shift)
+ ret <1 x i32> %fsh
+}
+
+declare <1 x i32> @llvm.fshr.v1i32(<1 x i32>, <1 x i32>, <1 x i32>)
+
+; CHECK-LABEL: define{{.*}}@fshr_v1i64(
+; CHECK-SAME: <3 x i64> %[[A_VEC:.*]], <3 x i64> %[[B_VEC:.*]], <3 x i64> %[[SHIFT_VEC:.*]])
+define noundef <3 x i64> @fshr_v1i64(<3 x i64> %a, <3 x i64> %b, <3 x i64> %shift) {
+entry:
+; CHECK: %[[A0:.*]] = extractelement <3 x i64> %[[A_VEC]], i64 0
+; CHECK: %[[B0:.*]] = extractelement <3 x i64> %[[B_VEC]], i64 0
+; CHECK: %[[SHIFT0:.*]] = extractelement <3 x i64> %[[SHIFT_VEC]], i64 0
+; CHECK: %[[LEFT0:.*]] = lshr i64 %[[B0]], %[[SHIFT0]]
+; CHECK: %[[MASKED_SHIFT0:.*]] = and i64 %[[SHIFT0]], 63
+; CHECK: %[[INVERSE_SHIFT0:.*]] = sub i64 64, %[[MASKED_SHIFT0]]
+; CHECK: %[[RIGHT0:.*]] = shl i64 %[[A0]], %[[INVERSE_SHIFT0]]
+; CHECK: %[[RES0:.*]] = or i64 %[[LEFT0]], %[[RIGHT0]]
+;
+; CHECK: %[[A1:.*]] = extractelement <3 x i64> %[[A_VEC]], i64 1
+; CHECK: %[[B1:.*]] = extractelement <3 x i64> %[[B_VEC]], i64 1
+; CHECK: %[[SHIFT1:.*]] = extractelement <3 x i64> %[[SHIFT_VEC]], i64 1
+; CHECK: %[[LEFT1:.*]] = lshr i64 %[[B1]], %[[SHIFT1]]
+; CHECK: %[[MASKED_SHIFT1:.*]] = and i64 %[[SHIFT1]], 63
+; CHECK: %[[INVERSE_SHIFT1:.*]] = sub i64 64, %[[MASKED_SHIFT1]]
+; CHECK: %[[RIGHT1:.*]] = shl i64 %[[A1]], %[[INVERSE_SHIFT1]]
+; CHECK: %[[RES1:.*]] = or i64 %[[LEFT1]], %[[RIGHT1]]
+;
+; CHECK: %[[A2:.*]] = extractelement <3 x i64> %[[A_VEC]], i64 2
+; CHECK: %[[B2:.*]] = extractelement <3 x i64> %[[B_VEC]], i64 2
+; CHECK: %[[SHIFT2:.*]] = extractelement <3 x i64> %[[SHIFT_VEC]], i64 2
+; CHECK: %[[LEFT2:.*]] = lshr i64 %[[B2]], %[[SHIFT2]]
+; CHECK: %[[MASKED_SHIFT2:.*]] = and i64 %[[SHIFT2]], 63
+; CHECK: %[[INVERSE_SHIFT2:.*]] = sub i64 64, %[[MASKED_SHIFT2]]
+; CHECK: %[[RIGHT2:.*]] = shl i64 %[[A2]], %[[INVERSE_SHIFT2]]
+; CHECK: %[[RES2:.*]] = or i64 %[[LEFT2]], %[[RIGHT2]]
+;
+; CHECK: %[[INSERT0:.*]] = insertelement <3 x i64> poison, i64 %[[RES0]], i64 0
+; CHECK: %[[INSERT1:.*]] = insertelement <3 x i64> %[[INSERT0]], i64 %[[RES1]], i64 1
+; CHECK: %[[RES_VEC:.*]] = insertelement <3 x i64> %[[INSERT1]], i64 %[[RES2]], i64 2
+;
+; CHECK: ret <3 x i64> %[[RES_VEC]]
+ %fsh = call <3 x i64> @llvm.fshr.v1i64(<3 x i64> %a, <3 x i64> %b, <3 x i64> %shift)
+ ret <3 x i64> %fsh
+}
+
+declare <3 x i64> @llvm.fshr.v1i64(<3 x i64>, <3 x i64>, <3 x i64>)
|
| Constant *Mask = ConstantInt::get(Ty, BitWidth - 1); | ||
| Constant *Size = ConstantInt::get(Ty, BitWidth); | ||
|
|
||
| // The shift is not required to be masked as DXIL op will do so automatically |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Is there a doc you can link for this?
| Value *MaskedShift = Builder.CreateAnd(Shift, Mask); | ||
| Value *InverseShift = Builder.CreateSub(Size, MaskedShift); | ||
| Value *Right = LeftFunnel ? Builder.CreateLShr(B, InverseShift) | ||
| : Builder.CreateShl(A, InverseShift); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Shouldn't there be an edge-case for when the shift operand is 0 or N (where N is the number of bits)?
Consider 0 for example:
If Shift == 0 then InverseShift = N - (Shift mod N) = N, but shl and [l|a]shr in LLVM returns poison if the number of bits to shift by is equal to or greater than the number of bits in its operand (N).
In LLVM 3.7, the result is undefined instead of returning poison when the number of bits to shift by is >= N.
If the result is not undefined or poison in DXIL and I assume that DXIL will mask/modulo the shift amount for shl and [l|a]shr, then InverseShift = N mod N = 0 and then the result becomes (A << 0) | (B >> 0) == A | B or (A >> 0) | (B << 0) == A | B when the result should just be A.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
for the case where 'Shift == 0' B needs to be shifted to be all zeroes (shifted by the size), but as @lcohedron pointed out that isn't valid. Is it possible to always detect at compile time when Shift is zero?
spall
left a comment
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
From reading the documentation and @lcohedron comment about when Shift == 0 I think this implementation is wrong.
|
|
||
| // The shift is not required to be masked as DXIL op will do so automatically | ||
| Value *Left = | ||
| LeftFunnel ? Builder.CreateShl(A, Shift) : Builder.CreateLShr(B, Shift); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
From the docs (https://llvm.org/docs/LangRef.html#llvm-fshl-intrinsic) it looks like Shift should be mod the Size, ensuring its a valid shift amount.
| Value *Left = | ||
| LeftFunnel ? Builder.CreateShl(A, Shift) : Builder.CreateLShr(B, Shift); | ||
|
|
||
| Value *MaskedShift = Builder.CreateAnd(Shift, Mask); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
It looks like this is the modulo value you actually want to be shifting by?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
would be helpful to comment this line with what is happening.
| Value *MaskedShift = Builder.CreateAnd(Shift, Mask); | ||
| Value *InverseShift = Builder.CreateSub(Size, MaskedShift); | ||
| Value *Right = LeftFunnel ? Builder.CreateLShr(B, InverseShift) | ||
| : Builder.CreateShl(A, InverseShift); |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
for the case where 'Shift == 0' B needs to be shifted to be all zeroes (shifted by the size), but as @lcohedron pointed out that isn't valid. Is it possible to always detect at compile time when Shift is zero?
This pr adds support to emulate the
llvm.fshl.*andllvm.fshr.*intrinsics by expanding them, as described here.Note: the implementation does not create a mask for the first shift as this is automatically done in the DXIL shift equivalents.
Resolves #165750.