summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Ekstrand <jason@jlekstrand.net>2020-09-24 16:28:56 -0500
committerEric Engestrom <eric@engestrom.ch>2020-10-14 19:29:32 +0200
commit7412517bd74ef2b8ca0a8762da1ebbda3071c7d9 (patch)
tree729d8e97804d5a07f1cc3b5119ec5f1bb0dc7519
parenta3de6a5869bc2a187be9ec88f95be3ba97e184fb (diff)
intel/nir: Don't try to emit vector load_scratch instructions
In 53bfcdeecf4c9, we added load/store_scratch instructions which deviate a little bit from most memory load/store instructions in that we can't use the normal untyped read/write instructions which can read and write up to a vec4 at a time. Instead, we have to use the DWORD scattered read/write instructions which are scalar. To handle this, we added code to brw_nir_lower_mem_access_bit_sizes to cause them to be scalarized. However, one case was missing: the load-as-larger-vector case. In this case, we take small bit-sized constant-offset loads replace it with a 32-bit load and shuffle the result around as needed. For scratch, this case is much trickier to get right because it often emits vec2 or wider which we would then have to lower again. We did this for other load and store ops because, for lower bit-sizes we have to scalarize thanks to the byte scattered read/write instructions being scalar. However, for scratch we're not losing as much because we can't vectorize 32-bit loads and stores either. It's easier to just disallow it whenever we have to scalarize. Fixes: 53bfcdeecf4c9 "intel/fs: Implement the new load/store_scratch..." Reviewed-by: Kenneth Graunke <kenneth@whitecape.org> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/6872> (cherry picked from commit fd04f858b0aa9f688f5dfb041ccb706da96f862a)
-rw-r--r--.pick_status.json2
-rw-r--r--src/intel/compiler/brw_nir_lower_mem_access_bit_sizes.c5
2 files changed, 5 insertions, 2 deletions
diff --git a/.pick_status.json b/.pick_status.json
index d2ab2bd05c6..ba3423ace19 100644
--- a/.pick_status.json
+++ b/.pick_status.json
@@ -7222,7 +7222,7 @@
"description": "intel/nir: Don't try to emit vector load_scratch instructions",
"nominated": true,
"nomination_type": 1,
- "resolution": 0,
+ "resolution": 1,
"master_sha": null,
"because_sha": "53bfcdeecf4c9632e09ee641d2ca02dd9ec25e34"
},
diff --git a/src/intel/compiler/brw_nir_lower_mem_access_bit_sizes.c b/src/intel/compiler/brw_nir_lower_mem_access_bit_sizes.c
index 19abc16a9c5..ea982b0a091 100644
--- a/src/intel/compiler/brw_nir_lower_mem_access_bit_sizes.c
+++ b/src/intel/compiler/brw_nir_lower_mem_access_bit_sizes.c
@@ -53,6 +53,9 @@ dup_mem_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
}
dup->num_components = num_components;
+ if (intrin->intrinsic == nir_intrinsic_load_scratch ||
+ intrin->intrinsic == nir_intrinsic_store_scratch)
+ assert(num_components == 1);
for (unsigned i = 0; i < info->num_indices; i++)
dup->const_index[i] = intrin->const_index[i];
@@ -92,7 +95,7 @@ lower_mem_load_bit_size(nir_builder *b, nir_intrinsic_instr *intrin,
nir_ssa_def *result;
nir_src *offset_src = nir_get_io_offset_src(intrin);
- if (bit_size < 32 && nir_src_is_const(*offset_src)) {
+ if (bit_size < 32 && !needs_scalar && nir_src_is_const(*offset_src)) {
/* The offset is constant so we can use a 32-bit load and just shift it
* around as needed.
*/