Conversation
Contributor
|
Your PR requires formatting changes to meet the project's style guidelines. Click here to view the suggested changes.diff --git a/src/context.jl b/src/context.jl
index 88035e7..73c417a 100644
--- a/src/context.jl
+++ b/src/context.jl
@@ -140,7 +140,7 @@ is_integrated(dev)
See also: [`device`](@ref), [`devices`](@ref)
"""
-function is_integrated(dev::ZeDevice=device())
+function is_integrated(dev::ZeDevice = device())
props = oneL0.properties(dev)
return (props.flags & oneL0.ZE_DEVICE_PROPERTY_FLAG_INTEGRATED) != 0
end
diff --git a/test/device/intrinsics.jl b/test/device/intrinsics.jl
index bd2fadd..0abdf5b 100644
--- a/test/device/intrinsics.jl
+++ b/test/device/intrinsics.jl
@@ -276,159 +276,159 @@ end
# @testset "atomics (low level)" begin
- @testset "atomic_add($T)" for T in [Int32, UInt32, Float32]
- if oneAPI.is_integrated() && T == Float32
- continue
- end
- a = oneArray([zero(T)])
+@testset "atomic_add($T)" for T in [Int32, UInt32, Float32]
+ if oneAPI.is_integrated() && T == Float32
+ continue
+ end
+ a = oneArray([zero(T)])
- function kernel(a, b)
- oneAPI.atomic_add!(pointer(a), b)
- return
- end
+ function kernel(a, b)
+ oneAPI.atomic_add!(pointer(a), b)
+ return
+ end
- @oneapi items=256 kernel(a, one(T))
- @test Array(a)[1] == T(256)
+ @oneapi items = 256 kernel(a, one(T))
+ @test Array(a)[1] == T(256)
end
- @testset "atomic_sub($T)" for T in [Int32, UInt32, Float32]
- if oneAPI.is_integrated() && T == Float32
- continue
- end
- a = oneArray([T(256)])
+@testset "atomic_sub($T)" for T in [Int32, UInt32, Float32]
+ if oneAPI.is_integrated() && T == Float32
+ continue
+ end
+ a = oneArray([T(256)])
- function kernel(a, b)
- oneAPI.atomic_sub!(pointer(a), b)
- return
- end
+ function kernel(a, b)
+ oneAPI.atomic_sub!(pointer(a), b)
+ return
+ end
- @oneapi items=256 kernel(a, one(T))
- @test Array(a)[1] == T(0)
+ @oneapi items = 256 kernel(a, one(T))
+ @test Array(a)[1] == T(0)
end
- @testset "atomic_inc($T)" for T in [Int32, UInt32]
- a = oneArray([zero(T)])
+@testset "atomic_inc($T)" for T in [Int32, UInt32]
+ a = oneArray([zero(T)])
- function kernel(a)
- oneAPI.atomic_inc!(pointer(a))
- return
- end
+ function kernel(a)
+ oneAPI.atomic_inc!(pointer(a))
+ return
+ end
- @oneapi items=256 kernel(a)
- @test Array(a)[1] == T(256)
+ @oneapi items = 256 kernel(a)
+ @test Array(a)[1] == T(256)
end
- @testset "atomic_dec($T)" for T in [Int32, UInt32]
- a = oneArray([T(256)])
+@testset "atomic_dec($T)" for T in [Int32, UInt32]
+ a = oneArray([T(256)])
- function kernel(a)
- oneAPI.atomic_dec!(pointer(a))
- return
- end
+ function kernel(a)
+ oneAPI.atomic_dec!(pointer(a))
+ return
+ end
- @oneapi items=256 kernel(a)
- @test Array(a)[1] == T(0)
+ @oneapi items = 256 kernel(a)
+ @test Array(a)[1] == T(0)
end
- @testset "atomic_min($T)" for T in [Int32, UInt32, Float32]
- if oneAPI.is_integrated() && T == Float32
- continue
- end
- a = oneArray([T(256)])
+@testset "atomic_min($T)" for T in [Int32, UInt32, Float32]
+ if oneAPI.is_integrated() && T == Float32
+ continue
+ end
+ a = oneArray([T(256)])
- function kernel(a, T)
- i = get_global_id()
- oneAPI.atomic_min!(pointer(a), T(i))
- return
- end
+ function kernel(a, T)
+ i = get_global_id()
+ oneAPI.atomic_min!(pointer(a), T(i))
+ return
+ end
- @oneapi items=256 kernel(a, T)
- @test Array(a)[1] == one(T)
+ @oneapi items = 256 kernel(a, T)
+ @test Array(a)[1] == one(T)
end
- @testset "atomic_max($T)" for T in [Int32, UInt32, Float32]
- if oneAPI.is_integrated() && T == Float32
- continue
- end
- a = oneArray([zero(T)])
+@testset "atomic_max($T)" for T in [Int32, UInt32, Float32]
+ if oneAPI.is_integrated() && T == Float32
+ continue
+ end
+ a = oneArray([zero(T)])
- function kernel(a, T)
- i = get_global_id()
- oneAPI.atomic_max!(pointer(a), T(i))
- return
- end
+ function kernel(a, T)
+ i = get_global_id()
+ oneAPI.atomic_max!(pointer(a), T(i))
+ return
+ end
- @oneapi items=256 kernel(a, T)
- @test Array(a)[1] == T(256)
+ @oneapi items = 256 kernel(a, T)
+ @test Array(a)[1] == T(256)
end
- @testset "atomic_and($T)" for T in [Int32, UInt32]
- a = oneArray([T(1023)])
+@testset "atomic_and($T)" for T in [Int32, UInt32]
+ a = oneArray([T(1023)])
- function kernel(a, T)
- i = get_global_id() - 1
- k = 1
- for i = 1:i
- k *= 2
- end
- b = 1023 - k # 1023 - 2^i
- oneAPI.atomic_and!(pointer(a), T(b))
- return
+ function kernel(a, T)
+ i = get_global_id() - 1
+ k = 1
+ for i in 1:i
+ k *= 2
end
-
- @oneapi items=10 kernel(a, T)
- @test Array(a)[1] == zero(T)
+ b = 1023 - k # 1023 - 2^i
+ oneAPI.atomic_and!(pointer(a), T(b))
+ return
end
- @testset "atomic_or($T)" for T in [Int32, UInt32]
- a = oneArray([zero(T)])
+ @oneapi items = 10 kernel(a, T)
+ @test Array(a)[1] == zero(T)
+end
- function kernel(a, T)
- i = get_global_id()
- b = 1 # 2^(i-1)
- for i = 1:i
- b *= 2
- end
- b ÷= 2
- oneAPI.atomic_or!(pointer(a), T(b))
- return
- end
+@testset "atomic_or($T)" for T in [Int32, UInt32]
+ a = oneArray([zero(T)])
- @oneapi items=10 kernel(a, T)
- @test Array(a)[1] == T(1023)
+ function kernel(a, T)
+ i = get_global_id()
+ b = 1 # 2^(i-1)
+ for i in 1:i
+ b *= 2
+ end
+ b ÷= 2
+ oneAPI.atomic_or!(pointer(a), T(b))
+ return
end
- @testset "atomic_xor($T)" for T in [Int32, UInt32]
- a = oneArray([T(1023)])
+ @oneapi items = 10 kernel(a, T)
+ @test Array(a)[1] == T(1023)
+end
- function kernel(a, T)
- i = get_global_id()
- b = 1 # 2^(i-1)
- for i = 1:i
- b *= 2
- end
- b ÷= 2
- oneAPI.atomic_xor!(pointer(a), T(b))
- return
+@testset "atomic_xor($T)" for T in [Int32, UInt32]
+ a = oneArray([T(1023)])
+
+ function kernel(a, T)
+ i = get_global_id()
+ b = 1 # 2^(i-1)
+ for i in 1:i
+ b *= 2
+ end
+ b ÷= 2
+ oneAPI.atomic_xor!(pointer(a), T(b))
+ return
end
- @oneapi items=10 kernel(a, T)
- @test Array(a)[1] == zero(T)
+ @oneapi items = 10 kernel(a, T)
+ @test Array(a)[1] == zero(T)
end
- @testset "atomic_xchg($T)" for T in [Int32, UInt32, Float32]
- if oneAPI.is_integrated() && T == Float32
- continue
- end
- a = oneArray([zero(T)])
+@testset "atomic_xchg($T)" for T in [Int32, UInt32, Float32]
+ if oneAPI.is_integrated() && T == Float32
+ continue
+ end
+ a = oneArray([zero(T)])
- function kernel(a, b)
- oneAPI.atomic_xchg!(pointer(a), b)
- return
- end
+ function kernel(a, b)
+ oneAPI.atomic_xchg!(pointer(a), b)
+ return
+ end
- @oneapi items=256 kernel(a, one(T))
- @test Array(a)[1] == one(T)
+ @oneapi items = 256 kernel(a, one(T))
+ @test Array(a)[1] == one(T)
end
# end |
This was referenced Oct 31, 2025
Member
Author
|
Needs tests. |
Codecov Report✅ All modified and coverable lines are covered by tests. Additional details and impacted files@@ Coverage Diff @@
## master #544 +/- ##
==========================================
+ Coverage 79.36% 79.38% +0.01%
==========================================
Files 48 48
Lines 3141 3144 +3
==========================================
+ Hits 2493 2496 +3
Misses 648 648 ☔ View full report in Codecov by Sentry. 🚀 New features to boost your workflow:
|
vchuravy
approved these changes
Oct 31, 2025
bde6ac1 to
03b4114
Compare
Member
Author
|
Some devices like the Xe graphics that Buildkite runs on don't support certain atomics. In fact, the best coverage I got at home was from my consumer-grade Arc GPU, followed by PVC. |
cc45b65 to
e8c473e
Compare
f2cb327 to
38816cc
Compare
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Add this suggestion to a batch that can be applied as a single commit.This suggestion is invalid because no changes were made to the code.Suggestions cannot be applied while the pull request is closed.Suggestions cannot be applied while viewing a subset of changes.Only one suggestion per line can be applied in a batch.Add this suggestion to a batch that can be applied as a single commit.Applying suggestions on deleted lines is not supported.You must change the existing code in this line in order to create a valid suggestion.Outdated suggestions cannot be applied.This suggestion has been applied or marked resolved.Suggestions cannot be applied from pending reviews.Suggestions cannot be applied on multi-line comments.Suggestions cannot be applied while the pull request is queued to merge.Suggestion cannot be applied right now. Please check back later.
Enable
SPV_EXT_shader_atomic_float_addFixes #508