aboutsummaryrefslogtreecommitdiffhomepage
path: root/ptx/lib
diff options
context:
space:
mode:
Diffstat (limited to 'ptx/lib')
-rw-r--r--ptx/lib/zluda_ptx_impl.bcbin34052 -> 4624 bytes
-rw-r--r--ptx/lib/zluda_ptx_impl.cl344
-rw-r--r--ptx/lib/zluda_ptx_impl.cpp151
-rw-r--r--ptx/lib/zluda_ptx_impl.spvbin106076 -> 0 bytes
4 files changed, 151 insertions, 344 deletions
diff --git a/ptx/lib/zluda_ptx_impl.bc b/ptx/lib/zluda_ptx_impl.bc
index 2d194c4..6651430 100644
--- a/ptx/lib/zluda_ptx_impl.bc
+++ b/ptx/lib/zluda_ptx_impl.bc
Binary files differ
diff --git a/ptx/lib/zluda_ptx_impl.cl b/ptx/lib/zluda_ptx_impl.cl
deleted file mode 100644
index 86bb593..0000000
--- a/ptx/lib/zluda_ptx_impl.cl
+++ /dev/null
@@ -1,344 +0,0 @@
-// Every time this file changes it must te rebuilt:
-// ocloc -file zluda_ptx_impl.cl -64 -options "-cl-std=CL2.0 -Dcl_intel_bit_instructions -DINTEL" -out_dir . -device kbl -output_no_suffix -spv_only
-// /opt/rocm/llvm/bin/clang -Wall -Wextra -Wsign-compare -Wconversion -x cl -Xclang -finclude-default-header zluda_ptx_impl.cl -cl-std=CL2.0 -c -target amdgcn-amd-amdhsa -o zluda_ptx_impl.bc -emit-llvm
-// Additionally you should strip names:
-// spirv-opt --strip-debug zluda_ptx_impl.spv -o zluda_ptx_impl.spv --target-env=spv1.3
-
-#pragma OPENCL EXTENSION cl_khr_int64_base_atomics : enable
-#pragma OPENCL EXTENSION cl_khr_int64_extended_atomics : enable
-
-#define FUNC(NAME) __zluda_ptx_impl__ ## NAME
-
-#define atomic_inc(NAME, SUCCESS, FAILURE, SCOPE, SPACE) \
- uint FUNC(NAME)(SPACE uint* ptr, uint threshold) { \
- uint expected = *ptr; \
- uint desired; \
- do { \
- desired = (expected >= threshold) ? 0 : expected + 1; \
- } while (!atomic_compare_exchange_strong_explicit((volatile SPACE atomic_uint*)ptr, &expected, desired, SUCCESS, FAILURE, SCOPE)); \
- return expected; \
- }
-
-#define atomic_dec(NAME, SUCCESS, FAILURE, SCOPE, SPACE) \
- uint FUNC(NAME)(SPACE uint* ptr, uint threshold) { \
- uint expected = *ptr; \
- uint desired; \
- do { \
- desired = (expected == 0 || expected > threshold) ? threshold : expected - 1; \
- } while (!atomic_compare_exchange_strong_explicit((volatile SPACE atomic_uint*)ptr, &expected, desired, SUCCESS, FAILURE, SCOPE)); \
- return expected; \
- }
-
-#define atomic_add(NAME, SUCCESS, FAILURE, SCOPE, SPACE, TYPE, ATOMIC_TYPE, INT_TYPE) \
- TYPE FUNC(NAME)(SPACE TYPE* ptr, TYPE value) { \
- volatile SPACE ATOMIC_TYPE* atomic_ptr = (volatile SPACE ATOMIC_TYPE*)ptr; \
- union { \
- INT_TYPE int_view; \
- TYPE float_view; \
- } expected, desired; \
- expected.float_view = *ptr; \
- do { \
- desired.float_view = expected.float_view + value; \
- } while (!atomic_compare_exchange_strong_explicit(atomic_ptr, &expected.int_view, desired.int_view, SUCCESS, FAILURE, SCOPE)); \
- return expected.float_view; \
- }
-
-// We are doing all this mess instead of accepting memory_order and memory_scope parameters
-// because ocloc emits broken (failing spirv-dis) SPIR-V when memory_order or memory_scope is a parameter
-
-// atom.inc
-atomic_inc(atom_relaxed_cta_generic_inc, memory_order_relaxed, memory_order_relaxed, memory_scope_work_group, );
-atomic_inc(atom_acquire_cta_generic_inc, memory_order_acquire, memory_order_acquire, memory_scope_work_group, );
-atomic_inc(atom_release_cta_generic_inc, memory_order_release, memory_order_acquire, memory_scope_work_group, );
-atomic_inc(atom_acq_rel_cta_generic_inc, memory_order_acq_rel, memory_order_acquire, memory_scope_work_group, );
-
-atomic_inc(atom_relaxed_gpu_generic_inc, memory_order_relaxed, memory_order_relaxed, memory_scope_device, );
-atomic_inc(atom_acquire_gpu_generic_inc, memory_order_acquire, memory_order_acquire, memory_scope_device, );
-atomic_inc(atom_release_gpu_generic_inc, memory_order_release, memory_order_acquire, memory_scope_device, );
-atomic_inc(atom_acq_rel_gpu_generic_inc, memory_order_acq_rel, memory_order_acquire, memory_scope_device, );
-
-atomic_inc(atom_relaxed_sys_generic_inc, memory_order_relaxed, memory_order_relaxed, memory_scope_device, );
-atomic_inc(atom_acquire_sys_generic_inc, memory_order_acquire, memory_order_acquire, memory_scope_device, );
-atomic_inc(atom_release_sys_generic_inc, memory_order_release, memory_order_acquire, memory_scope_device, );
-atomic_inc(atom_acq_rel_sys_generic_inc, memory_order_acq_rel, memory_order_acquire, memory_scope_device, );
-
-atomic_inc(atom_relaxed_cta_global_inc, memory_order_relaxed, memory_order_relaxed, memory_scope_work_group, __global);
-atomic_inc(atom_acquire_cta_global_inc, memory_order_acquire, memory_order_acquire, memory_scope_work_group, __global);
-atomic_inc(atom_release_cta_global_inc, memory_order_release, memory_order_acquire, memory_scope_work_group, __global);
-atomic_inc(atom_acq_rel_cta_global_inc, memory_order_acq_rel, memory_order_acquire, memory_scope_work_group, __global);
-
-atomic_inc(atom_relaxed_gpu_global_inc, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __global);
-atomic_inc(atom_acquire_gpu_global_inc, memory_order_acquire, memory_order_acquire, memory_scope_device, __global);
-atomic_inc(atom_release_gpu_global_inc, memory_order_release, memory_order_acquire, memory_scope_device, __global);
-atomic_inc(atom_acq_rel_gpu_global_inc, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __global);
-
-atomic_inc(atom_relaxed_sys_global_inc, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __global);
-atomic_inc(atom_acquire_sys_global_inc, memory_order_acquire, memory_order_acquire, memory_scope_device, __global);
-atomic_inc(atom_release_sys_global_inc, memory_order_release, memory_order_acquire, memory_scope_device, __global);
-atomic_inc(atom_acq_rel_sys_global_inc, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __global);
-
-atomic_inc(atom_relaxed_cta_shared_inc, memory_order_relaxed, memory_order_relaxed, memory_scope_work_group, __local);
-atomic_inc(atom_acquire_cta_shared_inc, memory_order_acquire, memory_order_acquire, memory_scope_work_group, __local);
-atomic_inc(atom_release_cta_shared_inc, memory_order_release, memory_order_acquire, memory_scope_work_group, __local);
-atomic_inc(atom_acq_rel_cta_shared_inc, memory_order_acq_rel, memory_order_acquire, memory_scope_work_group, __local);
-
-atomic_inc(atom_relaxed_gpu_shared_inc, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __local);
-atomic_inc(atom_acquire_gpu_shared_inc, memory_order_acquire, memory_order_acquire, memory_scope_device, __local);
-atomic_inc(atom_release_gpu_shared_inc, memory_order_release, memory_order_acquire, memory_scope_device, __local);
-atomic_inc(atom_acq_rel_gpu_shared_inc, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __local);
-
-atomic_inc(atom_relaxed_sys_shared_inc, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __local);
-atomic_inc(atom_acquire_sys_shared_inc, memory_order_acquire, memory_order_acquire, memory_scope_device, __local);
-atomic_inc(atom_release_sys_shared_inc, memory_order_release, memory_order_acquire, memory_scope_device, __local);
-atomic_inc(atom_acq_rel_sys_shared_inc, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __local);
-
-// atom.dec
-atomic_dec(atom_relaxed_cta_generic_dec, memory_order_relaxed, memory_order_relaxed, memory_scope_work_group, );
-atomic_dec(atom_acquire_cta_generic_dec, memory_order_acquire, memory_order_acquire, memory_scope_work_group, );
-atomic_dec(atom_release_cta_generic_dec, memory_order_release, memory_order_acquire, memory_scope_work_group, );
-atomic_dec(atom_acq_rel_cta_generic_dec, memory_order_acq_rel, memory_order_acquire, memory_scope_work_group, );
-
-atomic_dec(atom_relaxed_gpu_generic_dec, memory_order_relaxed, memory_order_relaxed, memory_scope_device, );
-atomic_dec(atom_acquire_gpu_generic_dec, memory_order_acquire, memory_order_acquire, memory_scope_device, );
-atomic_dec(atom_release_gpu_generic_dec, memory_order_release, memory_order_acquire, memory_scope_device, );
-atomic_dec(atom_acq_rel_gpu_generic_dec, memory_order_acq_rel, memory_order_acquire, memory_scope_device, );
-
-atomic_dec(atom_relaxed_sys_generic_dec, memory_order_relaxed, memory_order_relaxed, memory_scope_device, );
-atomic_dec(atom_acquire_sys_generic_dec, memory_order_acquire, memory_order_acquire, memory_scope_device, );
-atomic_dec(atom_release_sys_generic_dec, memory_order_release, memory_order_acquire, memory_scope_device, );
-atomic_dec(atom_acq_rel_sys_generic_dec, memory_order_acq_rel, memory_order_acquire, memory_scope_device, );
-
-atomic_dec(atom_relaxed_cta_global_dec, memory_order_relaxed, memory_order_relaxed, memory_scope_work_group, __global);
-atomic_dec(atom_acquire_cta_global_dec, memory_order_acquire, memory_order_acquire, memory_scope_work_group, __global);
-atomic_dec(atom_release_cta_global_dec, memory_order_release, memory_order_acquire, memory_scope_work_group, __global);
-atomic_dec(atom_acq_rel_cta_global_dec, memory_order_acq_rel, memory_order_acquire, memory_scope_work_group, __global);
-
-atomic_dec(atom_relaxed_gpu_global_dec, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __global);
-atomic_dec(atom_acquire_gpu_global_dec, memory_order_acquire, memory_order_acquire, memory_scope_device, __global);
-atomic_dec(atom_release_gpu_global_dec, memory_order_release, memory_order_acquire, memory_scope_device, __global);
-atomic_dec(atom_acq_rel_gpu_global_dec, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __global);
-
-atomic_dec(atom_relaxed_sys_global_dec, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __global);
-atomic_dec(atom_acquire_sys_global_dec, memory_order_acquire, memory_order_acquire, memory_scope_device, __global);
-atomic_dec(atom_release_sys_global_dec, memory_order_release, memory_order_acquire, memory_scope_device, __global);
-atomic_dec(atom_acq_rel_sys_global_dec, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __global);
-
-atomic_dec(atom_relaxed_cta_shared_dec, memory_order_relaxed, memory_order_relaxed, memory_scope_work_group, __local);
-atomic_dec(atom_acquire_cta_shared_dec, memory_order_acquire, memory_order_acquire, memory_scope_work_group, __local);
-atomic_dec(atom_release_cta_shared_dec, memory_order_release, memory_order_acquire, memory_scope_work_group, __local);
-atomic_dec(atom_acq_rel_cta_shared_dec, memory_order_acq_rel, memory_order_acquire, memory_scope_work_group, __local);
-
-atomic_dec(atom_relaxed_gpu_shared_dec, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __local);
-atomic_dec(atom_acquire_gpu_shared_dec, memory_order_acquire, memory_order_acquire, memory_scope_device, __local);
-atomic_dec(atom_acq_rel_sys_shared_dec, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __local);
-
-// atom.add.f32
-atomic_add(atom_relaxed_cta_generic_add_f32, memory_order_relaxed, memory_order_relaxed, memory_scope_work_group, , float, atomic_uint, uint);
-atomic_add(atom_acquire_cta_generic_add_f32, memory_order_acquire, memory_order_acquire, memory_scope_work_group, , float, atomic_uint, uint);
-atomic_add(atom_release_cta_generic_add_f32, memory_order_release, memory_order_acquire, memory_scope_work_group, , float, atomic_uint, uint);
-atomic_add(atom_acq_rel_cta_generic_add_f32, memory_order_acq_rel, memory_order_acquire, memory_scope_work_group, , float, atomic_uint, uint);
-
-atomic_add(atom_relaxed_gpu_generic_add_f32, memory_order_relaxed, memory_order_relaxed, memory_scope_device, , float, atomic_uint, uint);
-atomic_add(atom_acquire_gpu_generic_add_f32, memory_order_acquire, memory_order_acquire, memory_scope_device, , float, atomic_uint, uint);
-atomic_add(atom_release_gpu_generic_add_f32, memory_order_release, memory_order_acquire, memory_scope_device, , float, atomic_uint, uint);
-atomic_add(atom_acq_rel_gpu_generic_add_f32, memory_order_acq_rel, memory_order_acquire, memory_scope_device, , float, atomic_uint, uint);
-
-atomic_add(atom_relaxed_sys_generic_add_f32, memory_order_relaxed, memory_order_relaxed, memory_scope_device, , float, atomic_uint, uint);
-atomic_add(atom_acquire_sys_generic_add_f32, memory_order_acquire, memory_order_acquire, memory_scope_device, , float, atomic_uint, uint);
-atomic_add(atom_release_sys_generic_add_f32, memory_order_release, memory_order_acquire, memory_scope_device, , float, atomic_uint, uint);
-atomic_add(atom_acq_rel_sys_generic_add_f32, memory_order_acq_rel, memory_order_acquire, memory_scope_device, , float, atomic_uint, uint);
-
-atomic_add(atom_relaxed_cta_global_add_f32, memory_order_relaxed, memory_order_relaxed, memory_scope_work_group, __global, float, atomic_uint, uint);
-atomic_add(atom_acquire_cta_global_add_f32, memory_order_acquire, memory_order_acquire, memory_scope_work_group, __global, float, atomic_uint, uint);
-atomic_add(atom_release_cta_global_add_f32, memory_order_release, memory_order_acquire, memory_scope_work_group, __global, float, atomic_uint, uint);
-atomic_add(atom_acq_rel_cta_global_add_f32, memory_order_acq_rel, memory_order_acquire, memory_scope_work_group, __global, float, atomic_uint, uint);
-
-atomic_add(atom_relaxed_gpu_global_add_f32, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __global, float, atomic_uint, uint);
-atomic_add(atom_acquire_gpu_global_add_f32, memory_order_acquire, memory_order_acquire, memory_scope_device, __global, float, atomic_uint, uint);
-atomic_add(atom_release_gpu_global_add_f32, memory_order_release, memory_order_acquire, memory_scope_device, __global, float, atomic_uint, uint);
-atomic_add(atom_acq_rel_gpu_global_add_f32, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __global, float, atomic_uint, uint);
-
-atomic_add(atom_relaxed_sys_global_add_f32, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __global, float, atomic_uint, uint);
-atomic_add(atom_acquire_sys_global_add_f32, memory_order_acquire, memory_order_acquire, memory_scope_device, __global, float, atomic_uint, uint);
-atomic_add(atom_release_sys_global_add_f32, memory_order_release, memory_order_acquire, memory_scope_device, __global, float, atomic_uint, uint);
-atomic_add(atom_acq_rel_sys_global_add_f32, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __global, float, atomic_uint, uint);
-
-atomic_add(atom_relaxed_cta_shared_add_f32, memory_order_relaxed, memory_order_relaxed, memory_scope_work_group, __local, float, atomic_uint, uint);
-atomic_add(atom_acquire_cta_shared_add_f32, memory_order_acquire, memory_order_acquire, memory_scope_work_group, __local, float, atomic_uint, uint);
-atomic_add(atom_release_cta_shared_add_f32, memory_order_release, memory_order_acquire, memory_scope_work_group, __local, float, atomic_uint, uint);
-atomic_add(atom_acq_rel_cta_shared_add_f32, memory_order_acq_rel, memory_order_acquire, memory_scope_work_group, __local, float, atomic_uint, uint);
-
-atomic_add(atom_relaxed_gpu_shared_add_f32, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __local, float, atomic_uint, uint);
-atomic_add(atom_acquire_gpu_shared_add_f32, memory_order_acquire, memory_order_acquire, memory_scope_device, __local, float, atomic_uint, uint);
-atomic_add(atom_release_gpu_shared_add_f32, memory_order_release, memory_order_acquire, memory_scope_device, __local, float, atomic_uint, uint);
-atomic_add(atom_acq_rel_gpu_shared_add_f32, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __local, float, atomic_uint, uint);
-
-atomic_add(atom_relaxed_sys_shared_add_f32, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __local, float, atomic_uint, uint);
-atomic_add(atom_acquire_sys_shared_add_f32, memory_order_acquire, memory_order_acquire, memory_scope_device, __local, float, atomic_uint, uint);
-atomic_add(atom_release_sys_shared_add_f32, memory_order_release, memory_order_acquire, memory_scope_device, __local, float, atomic_uint, uint);
-atomic_add(atom_acq_rel_sys_shared_add_f32, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __local, float, atomic_uint, uint);
-
-atomic_add(atom_relaxed_cta_generic_add_f64, memory_order_relaxed, memory_order_relaxed, memory_scope_work_group, , double, atomic_ulong, ulong);
-atomic_add(atom_acquire_cta_generic_add_f64, memory_order_acquire, memory_order_acquire, memory_scope_work_group, , double, atomic_ulong, ulong);
-atomic_add(atom_release_cta_generic_add_f64, memory_order_release, memory_order_acquire, memory_scope_work_group, , double, atomic_ulong, ulong);
-atomic_add(atom_acq_rel_cta_generic_add_f64, memory_order_acq_rel, memory_order_acquire, memory_scope_work_group, , double, atomic_ulong, ulong);
-
-atomic_add(atom_relaxed_gpu_generic_add_f64, memory_order_relaxed, memory_order_relaxed, memory_scope_device, , double, atomic_ulong, ulong);
-atomic_add(atom_acquire_gpu_generic_add_f64, memory_order_acquire, memory_order_acquire, memory_scope_device, , double, atomic_ulong, ulong);
-atomic_add(atom_release_gpu_generic_add_f64, memory_order_release, memory_order_acquire, memory_scope_device, , double, atomic_ulong, ulong);
-atomic_add(atom_acq_rel_gpu_generic_add_f64, memory_order_acq_rel, memory_order_acquire, memory_scope_device, , double, atomic_ulong, ulong);
-
-atomic_add(atom_relaxed_sys_generic_add_f64, memory_order_relaxed, memory_order_relaxed, memory_scope_device, , double, atomic_ulong, ulong);
-atomic_add(atom_acquire_sys_generic_add_f64, memory_order_acquire, memory_order_acquire, memory_scope_device, , double, atomic_ulong, ulong);
-atomic_add(atom_release_sys_generic_add_f64, memory_order_release, memory_order_acquire, memory_scope_device, , double, atomic_ulong, ulong);
-atomic_add(atom_acq_rel_sys_generic_add_f64, memory_order_acq_rel, memory_order_acquire, memory_scope_device, , double, atomic_ulong, ulong);
-// atom.add.f64
-atomic_add(atom_relaxed_cta_global_add_f64, memory_order_relaxed, memory_order_relaxed, memory_scope_work_group, __global, double, atomic_ulong, ulong);
-atomic_add(atom_acquire_cta_global_add_f64, memory_order_acquire, memory_order_acquire, memory_scope_work_group, __global, double, atomic_ulong, ulong);
-atomic_add(atom_release_cta_global_add_f64, memory_order_release, memory_order_acquire, memory_scope_work_group, __global, double, atomic_ulong, ulong);
-atomic_add(atom_acq_rel_cta_global_add_f64, memory_order_acq_rel, memory_order_acquire, memory_scope_work_group, __global, double, atomic_ulong, ulong);
-
-atomic_add(atom_relaxed_gpu_global_add_f64, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __global, double, atomic_ulong, ulong);
-atomic_add(atom_acquire_gpu_global_add_f64, memory_order_acquire, memory_order_acquire, memory_scope_device, __global, double, atomic_ulong, ulong);
-atomic_add(atom_release_gpu_global_add_f64, memory_order_release, memory_order_acquire, memory_scope_device, __global, double, atomic_ulong, ulong);
-atomic_add(atom_acq_rel_gpu_global_add_f64, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __global, double, atomic_ulong, ulong);
-
-atomic_add(atom_relaxed_sys_global_add_f64, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __global, double, atomic_ulong, ulong);
-atomic_add(atom_acquire_sys_global_add_f64, memory_order_acquire, memory_order_acquire, memory_scope_device, __global, double, atomic_ulong, ulong);
-atomic_add(atom_release_sys_global_add_f64, memory_order_release, memory_order_acquire, memory_scope_device, __global, double, atomic_ulong, ulong);
-atomic_add(atom_acq_rel_sys_global_add_f64, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __global, double, atomic_ulong, ulong);
-
-atomic_add(atom_relaxed_cta_shared_add_f64, memory_order_relaxed, memory_order_relaxed, memory_scope_work_group, __local, double, atomic_ulong, ulong);
-atomic_add(atom_acquire_cta_shared_add_f64, memory_order_acquire, memory_order_acquire, memory_scope_work_group, __local, double, atomic_ulong, ulong);
-atomic_add(atom_release_cta_shared_add_f64, memory_order_release, memory_order_acquire, memory_scope_work_group, __local, double, atomic_ulong, ulong);
-atomic_add(atom_acq_rel_cta_shared_add_f64, memory_order_acq_rel, memory_order_acquire, memory_scope_work_group, __local, double, atomic_ulong, ulong);
-
-atomic_add(atom_relaxed_gpu_shared_add_f64, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __local, double, atomic_ulong, ulong);
-atomic_add(atom_acquire_gpu_shared_add_f64, memory_order_acquire, memory_order_acquire, memory_scope_device, __local, double, atomic_ulong, ulong);
-atomic_add(atom_release_gpu_shared_add_f64, memory_order_release, memory_order_acquire, memory_scope_device, __local, double, atomic_ulong, ulong);
-atomic_add(atom_acq_rel_gpu_shared_add_f64, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __local, double, atomic_ulong, ulong);
-
-atomic_add(atom_relaxed_sys_shared_add_f64, memory_order_relaxed, memory_order_relaxed, memory_scope_device, __local, double, atomic_ulong, ulong);
-atomic_add(atom_acquire_sys_shared_add_f64, memory_order_acquire, memory_order_acquire, memory_scope_device, __local, double, atomic_ulong, ulong);
-atomic_add(atom_release_sys_shared_add_f64, memory_order_release, memory_order_acquire, memory_scope_device, __local, double, atomic_ulong, ulong);
-atomic_add(atom_acq_rel_sys_shared_add_f64, memory_order_acq_rel, memory_order_acquire, memory_scope_device, __local, double, atomic_ulong, ulong);
-
-#ifdef INTEL
- uint FUNC(bfe_u32)(uint base, uint pos, uint len) {
- return intel_ubfe(base, pos, len);
- }
-
- ulong FUNC(bfe_u64)(ulong base, uint pos, uint len) {
- return intel_ubfe(base, pos, len);
- }
-
- int FUNC(bfe_s32)(int base, uint pos, uint len) {
- return intel_sbfe(base, pos, len);
- }
-
- long FUNC(bfe_s64)(long base, uint pos, uint len) {
- return intel_sbfe(base, pos, len);
- }
-
- uint FUNC(bfi_b32)(uint insert, uint base, uint offset, uint count) {
- return intel_bfi(base, insert, offset, count);
- }
-
- ulong FUNC(bfi_b64)(ulong insert, ulong base, uint offset, uint count) {
- return intel_bfi(base, insert, offset, count);
- }
-
- uint FUNC(brev_b32)(uint base) {
- return intel_bfrev(base);
- }
-
- ulong FUNC(brev_b64)(ulong base) {
- return intel_bfrev(base);
- }
-#else
- uint FUNC(bfe_u32)(uint base, uint pos, uint len) {
- return amd_bfe(base, pos, len);
- }
-
- ulong FUNC(bfe_u64)(ulong base, uint pos, uint len) {
- return (base >> pos) & len;
- }
-
- int FUNC(bfe_s32)(int base, uint pos, uint len) {
- return amd_bfe(base, pos, len);
- }
-
- long FUNC(bfe_s64)(long base, uint pos, uint len) {
- return (base >> pos) & len;
- }
-
- uint FUNC(bfi_b32)(uint insert, uint base, uint offset, uint count) {
- uint mask = amd_bfm(count, offset);
- return (~mask & base) | (mask & insert);
- }
-
- ulong FUNC(bfi_b64)(ulong insert, ulong base, uint offset, uint count) {
- ulong mask = ((1UL << (count & 0x3f)) - 1UL) << (offset & 0x3f);
- return (~mask & base) | (mask & insert);
- }
-
- extern __attribute__((const)) uint __llvm_bitreverse_i32(uint) __asm("llvm.bitreverse.i32");
- uint FUNC(brev_b32)(uint base) {
- return __llvm_bitreverse_i32(base);
- }
-
- extern __attribute__((const)) ulong __llvm_bitreverse_i64(ulong) __asm("llvm.bitreverse.i64");
- ulong FUNC(brev_b64)(ulong base) {
- return __llvm_bitreverse_i64(base);
- }
-
- // Taken from __ballot definition in hipamd/include/hip/amd_detail/amd_device_functions.h
- uint FUNC(activemask)() {
- return (uint)__builtin_amdgcn_uicmp(1, 0, 33);
- }
-
- uint FUNC(sreg_tid)(uchar dim) {
- return (uint)get_local_id(dim);
- }
-
- uint FUNC(sreg_ntid)(uchar dim) {
- return (uint)get_local_size(dim);
- }
-
- uint FUNC(sreg_ctaid)(uchar dim) {
- return (uint)get_group_id(dim);
- }
-
- uint FUNC(sreg_nctaid)(uchar dim) {
- return (uint)get_num_groups(dim);
- }
-
- uint FUNC(sreg_clock)() {
- return (uint)__builtin_amdgcn_s_memtime();
- }
-
- // Taken from __ballot definition in hipamd/include/hip/amd_detail/amd_device_functions.h
- // They return active threads, which I think is incorrect
- extern __attribute__((const)) uint __ockl_lane_u32();
- uint FUNC(sreg_lanemask_lt)() {
- uint lane_idx = __ockl_lane_u32();
- ulong mask = (1UL << lane_idx) - 1UL;
- return (uint)mask;
- }
-#endif
-
-void FUNC(__assertfail)(
- __attribute__((unused)) __private ulong* message,
- __attribute__((unused)) __private ulong* file,
- __attribute__((unused)) __private uint* line,
- __attribute__((unused)) __private ulong* function,
- __attribute__((unused)) __private ulong* charSize
-) {
-}
-
-uint FUNC(vprintf)(
- __attribute__((unused)) __generic void* format,
- __attribute__((unused)) __generic void* valist
-) {
- return 0;
-}
diff --git a/ptx/lib/zluda_ptx_impl.cpp b/ptx/lib/zluda_ptx_impl.cpp
new file mode 100644
index 0000000..f1b416d
--- /dev/null
+++ b/ptx/lib/zluda_ptx_impl.cpp
@@ -0,0 +1,151 @@
+// Every time this file changes it must te rebuilt, you need llvm-17:
+// /opt/rocm/llvm/bin/clang -Wall -Wextra -Wsign-compare -Wconversion -x hip zluda_ptx_impl.cpp -nogpulib -O3 -mno-wavefrontsize64 -o zluda_ptx_impl.bc -emit-llvm -c --offload-device-only --offload-arch=gfx1010 && llvm-dis-17 zluda_ptx_impl.bc -o - | sed '/@llvm.used/d' | sed '/wchar_size/d' | sed '/llvm.module.flags/d' | sed 's/define hidden/define linkonce_odr/g' | sed 's/\"target-cpu\"=\"gfx1010\"//g' | sed -E 's/\"target-features\"=\"[^\"]+\"//g' | llvm-as-17 - -o zluda_ptx_impl.bc && llvm-dis-17 zluda_ptx_impl.bc
+
+#include <cstddef>
+#include <cstdint>
+
+#define FUNC(NAME) __device__ __attribute__((retain)) __zluda_ptx_impl_##NAME
+
+extern "C"
+{
+ uint32_t FUNC(activemask)()
+ {
+ return __builtin_amdgcn_read_exec_lo();
+ }
+
+ size_t __ockl_get_local_id(uint32_t) __device__;
+ uint32_t FUNC(sreg_tid)(uint8_t member)
+ {
+ return (uint32_t)__ockl_get_local_id(member);
+ }
+
+ size_t __ockl_get_local_size(uint32_t) __device__;
+ uint32_t FUNC(sreg_ntid)(uint8_t member)
+ {
+ return (uint32_t)__ockl_get_local_size(member);
+ }
+
+ size_t __ockl_get_global_id(uint32_t) __device__;
+ uint32_t FUNC(sreg_ctaid)(uint8_t member)
+ {
+ return (uint32_t)__ockl_get_global_id(member);
+ }
+
+ size_t __ockl_get_global_size(uint32_t) __device__;
+ uint32_t FUNC(sreg_nctaid)(uint8_t member)
+ {
+ return (uint32_t)__ockl_get_global_size(member);
+ }
+
+ uint32_t __ockl_bfe_u32(uint32_t, uint32_t, uint32_t) __attribute__((device));
+ uint32_t FUNC(bfe_u32)(uint32_t base, uint32_t pos_32, uint32_t len_32)
+ {
+ uint32_t pos = pos_32 & 0xFFU;
+ uint32_t len = len_32 & 0xFFU;
+ if (pos >= 32)
+ return 0;
+ // V_BFE_U32 only uses bits [4:0] for len (max value is 31)
+ if (len >= 32)
+ return base >> pos;
+ len = std::min(len, 31U);
+ return __ockl_bfe_u32(base, pos, len);
+ }
+
+ // LLVM contains mentions of llvm.amdgcn.ubfe.i64 and llvm.amdgcn.sbfe.i64,
+ // but using it only leads to LLVM crashes on RDNA2
+ uint64_t FUNC(bfe_u64)(uint64_t base, uint32_t pos, uint32_t len)
+ {
+ // NVIDIA docs are incorrect. In 64 bit `bfe` both `pos` and `len`
+ // parameters use whole 32 bit number and not just bottom 8 bits
+ if (pos >= 64)
+ return 0;
+ if (len >= 64)
+ return base >> pos;
+ len = std::min(len, 63U);
+ return (base >> pos) & ((1UL << len) - 1UL);
+ }
+
+ int32_t __ockl_bfe_i32(int32_t, uint32_t, uint32_t) __attribute__((device));
+ int32_t FUNC(bfe_s32)(int32_t base, uint32_t pos_32, uint32_t len_32)
+ {
+ uint32_t pos = pos_32 & 0xFFU;
+ uint32_t len = len_32 & 0xFFU;
+ if (len == 0)
+ return 0;
+ if (pos >= 32)
+ return (base >> 31);
+ // V_BFE_I32 only uses bits [4:0] for len (max value is 31)
+ if (len >= 32)
+ return base >> pos;
+ len = std::min(len, 31U);
+ return __ockl_bfe_i32(base, pos, len);
+ }
+
+ static __device__ uint32_t add_sat(uint32_t x, uint32_t y)
+ {
+ uint32_t result;
+ if (__builtin_add_overflow(x, y, &result))
+ {
+ return UINT32_MAX;
+ }
+ else
+ {
+ return result;
+ }
+ }
+
+ static __device__ uint32_t sub_sat(uint32_t x, uint32_t y)
+ {
+ uint32_t result;
+ if (__builtin_sub_overflow(x, y, &result))
+ {
+ return 0;
+ }
+ else
+ {
+ return result;
+ }
+ }
+
+ int64_t FUNC(bfe_s64)(int64_t base, uint32_t pos, uint32_t len)
+ {
+ // NVIDIA docs are incorrect. In 64 bit `bfe` both `pos` and `len`
+ // parameters use whole 32 bit number and not just bottom 8 bits
+ if (len == 0)
+ return 0;
+ if (pos >= 64)
+ return (base >> 63U);
+ if (add_sat(pos, len) >= 64)
+ len = sub_sat(64, pos);
+ return (base << (64U - pos - len)) >> (64U - len);
+ }
+
+ uint32_t __ockl_bfm_u32(uint32_t count, uint32_t offset) __attribute__((device));
+ uint32_t FUNC(bfi_b32)(uint32_t insert, uint32_t base, uint32_t pos_32, uint32_t len_32)
+ {
+ uint32_t pos = pos_32 & 0xFFU;
+ uint32_t len = len_32 & 0xFFU;
+ if (pos >= 32)
+ return base;
+ uint32_t mask;
+ if (len >= 32)
+ mask = UINT32_MAX << pos;
+ else
+ mask = __ockl_bfm_u32(len, pos);
+ return (~mask & base) | (mask & (insert << pos));
+ }
+
+ uint64_t FUNC(bfi_b64)(uint64_t insert, uint64_t base, uint32_t pos, uint32_t len)
+ {
+ // NVIDIA docs are incorrect. In 64 bit `bfe` both `pos` and `len`
+ // parameters use whole 32 bit number and not just bottom 8 bits
+ if (pos >= 64)
+ return base;
+ uint64_t mask;
+ if (len >= 64)
+ mask = UINT64_MAX << pos;
+ else
+ mask = ((1UL << len) - 1UL) << (pos);
+ return (~mask & base) | (mask & (insert << pos));
+ }
+}
diff --git a/ptx/lib/zluda_ptx_impl.spv b/ptx/lib/zluda_ptx_impl.spv
deleted file mode 100644
index e9fc938..0000000
--- a/ptx/lib/zluda_ptx_impl.spv
+++ /dev/null
Binary files differ