diff options
author | Andrzej Janik <[email protected]> | 2020-09-14 21:45:56 +0200 |
---|---|---|
committer | Andrzej Janik <[email protected]> | 2020-09-14 21:45:56 +0200 |
commit | bb5025c9b17e3fc46e454ca8faab1e85e0361ba8 (patch) | |
tree | 07df096e1ad16e8c9464aac17c99194e7257937e /ptx/src/ptx.lalrpop | |
parent | 48dac435400117935624aed244d1442982c874e2 (diff) | |
download | ZLUDA-bb5025c9b17e3fc46e454ca8faab1e85e0361ba8.tar.gz ZLUDA-bb5025c9b17e3fc46e454ca8faab1e85e0361ba8.zip |
Refactor implicit conversions and start implementing vector extract/insert
Diffstat (limited to 'ptx/src/ptx.lalrpop')
-rw-r--r-- | ptx/src/ptx.lalrpop | 15 |
1 files changed, 9 insertions, 6 deletions
diff --git a/ptx/src/ptx.lalrpop b/ptx/src/ptx.lalrpop index fd419f5..6e5f5e3 100644 --- a/ptx/src/ptx.lalrpop +++ b/ptx/src/ptx.lalrpop @@ -269,10 +269,10 @@ ScalarType: ast::ScalarType = { ".f16" => ast::ScalarType::F16, ".f16x2" => ast::ScalarType::F16x2, ".pred" => ast::ScalarType::Pred, - MemoryType + LdStScalarType }; -MemoryType: ast::ScalarType = { +LdStScalarType: ast::ScalarType = { ".b8" => ast::ScalarType::B8, ".b16" => ast::ScalarType::B16, ".b32" => ast::ScalarType::B32, @@ -446,13 +446,12 @@ Instruction: ast::Instruction<ast::ParsedArgParams<'input>> = { // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-ld InstLd: ast::Instruction<ast::ParsedArgParams<'input>> = { - "ld" <q:LdStQualifier?> <ss:LdStateSpace?> <cop:LdCacheOperator?> <v:VectorPrefix?> <t:MemoryType> <dst:ExtendedID> "," "[" <src:Operand> "]" => { + "ld" <q:LdStQualifier?> <ss:LdStateSpace?> <cop:LdCacheOperator?> <t:LdStType> <dst:ExtendedID> "," "[" <src:Operand> "]" => { ast::Instruction::Ld( ast::LdData { qualifier: q.unwrap_or(ast::LdStQualifier::Weak), state_space: ss.unwrap_or(ast::LdStateSpace::Generic), caching: cop.unwrap_or(ast::LdCacheOperator::Cached), - vector: v, typ: t }, ast::Arg2 { dst:dst, src:src } @@ -460,6 +459,11 @@ InstLd: ast::Instruction<ast::ParsedArgParams<'input>> = { } }; +LdStType: ast::Type = { + <v:VectorPrefix> <t:LdStScalarType> => ast::Type::Vector(t, v), + <t:LdStScalarType> => ast::Type::Scalar(t), +} + LdStQualifier: ast::LdStQualifier = { ".weak" => ast::LdStQualifier::Weak, ".volatile" => ast::LdStQualifier::Volatile, @@ -895,13 +899,12 @@ ShlType: ast::ShlType = { // https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#data-movement-and-conversion-instructions-st // Warning: NVIDIA documentation is incorrect, you can specify scope only once InstSt: ast::Instruction<ast::ParsedArgParams<'input>> = { - "st" <q:LdStQualifier?> <ss:StStateSpace?> <cop:StCacheOperator?> <v:VectorPrefix?> <t:MemoryType> "[" <src1:Operand> "]" "," <src2:Operand> => { + "st" <q:LdStQualifier?> <ss:StStateSpace?> <cop:StCacheOperator?> <t:LdStType> "[" <src1:Operand> "]" "," <src2:Operand> => { ast::Instruction::St( ast::StData { qualifier: q.unwrap_or(ast::LdStQualifier::Weak), state_space: ss.unwrap_or(ast::StStateSpace::Generic), caching: cop.unwrap_or(ast::StCacheOperator::Writeback), - vector: v, typ: t }, ast::Arg2St { src1:src1, src2:src2 } |