summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAlyssa Rosenzweig <alyssa@rosenzweig.io>2023-02-04 12:03:21 -0500
committerMarge Bot <emma+marge@anholt.net>2023-02-05 08:53:29 +0000
commitbfa7ec0aa0f317011c4573e4d4ce4d4aabe9bf07 (patch)
tree20ee25c1ce91f35f4d12d9d379dca890bd7a3a4a /src
parent7edd42cbc09d3030fed8fc073e3138084a774bec (diff)
agx: Don't scalarize preambles in NIR
Scalarizing preambles in NIR isn't really necessary, we can do it more efficiently in the backend. This makes the final NIR a lot less annoying to read; the backend IR was already nice to read thanks to all the scalarized moves being copypropped. Plus, this is a lot simpler. No shader-db changes. Signed-off-by: Alyssa Rosenzweig <alyssa@rosenzweig.io> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21122>
Diffstat (limited to 'src')
-rw-r--r--src/asahi/compiler/agx_compile.c26
-rw-r--r--src/asahi/compiler/agx_nir_opt_preamble.c59
2 files changed, 21 insertions, 64 deletions
diff --git a/src/asahi/compiler/agx_compile.c b/src/asahi/compiler/agx_compile.c
index f118c4f0042..1f9c5c40964 100644
--- a/src/asahi/compiler/agx_compile.c
+++ b/src/asahi/compiler/agx_compile.c
@@ -561,18 +561,32 @@ static agx_instr *
agx_emit_load_preamble(agx_builder *b, agx_index dst,
nir_intrinsic_instr *instr)
{
- assert(nir_dest_num_components(instr->dest) == 1 && "already scalarized");
- return agx_mov_to(b, dst, agx_uniform(nir_intrinsic_base(instr), dst.size));
+ agx_index srcs[4] = {agx_null()};
+ unsigned dim = nir_dest_num_components(instr->dest);
+ assert(dim <= ARRAY_SIZE(srcs) && "shouldn't see larger vectors");
+
+ unsigned base = nir_intrinsic_base(instr);
+ unsigned stride = agx_size_align_16(dst.size);
+
+ for (unsigned i = 0; i < dim; ++i)
+ srcs[i] = agx_uniform(base + i * stride, dst.size);
+
+ return agx_emit_collect_to(b, dst, dim, srcs);
}
static agx_instr *
agx_emit_store_preamble(agx_builder *b, nir_intrinsic_instr *instr)
{
- assert(nir_src_num_components(instr->src[0]) == 1 && "already scalarized");
+ agx_index vec = agx_src_index(&instr->src[0]);
+ unsigned base = nir_intrinsic_base(instr);
+ unsigned stride = agx_size_align_16(vec.size);
- agx_index value = agx_src_index(&instr->src[0]);
- agx_index offset = agx_immediate(nir_intrinsic_base(instr));
- return agx_uniform_store(b, value, offset);
+ for (unsigned i = 0; i < nir_src_num_components(instr->src[0]); ++i) {
+ agx_uniform_store(b, agx_extract_nir_src(b, instr->src[0], i),
+ agx_immediate(base + i * stride));
+ }
+
+ return NULL;
}
static enum agx_dim
diff --git a/src/asahi/compiler/agx_nir_opt_preamble.c b/src/asahi/compiler/agx_nir_opt_preamble.c
index f9b2040cb58..65a849bf23d 100644
--- a/src/asahi/compiler/agx_nir_opt_preamble.c
+++ b/src/asahi/compiler/agx_nir_opt_preamble.c
@@ -25,51 +25,6 @@
#include "compiler/nir/nir_builder.h"
#include "agx_compiler.h"
-static bool
-nir_scalarize_preamble(struct nir_builder *b, nir_instr *instr,
- UNUSED void *data)
-{
- if (instr->type != nir_instr_type_intrinsic)
- return false;
-
- nir_intrinsic_instr *intr = nir_instr_as_intrinsic(instr);
- if (intr->intrinsic != nir_intrinsic_load_preamble &&
- intr->intrinsic != nir_intrinsic_store_preamble)
- return false;
-
- bool is_load = (intr->intrinsic == nir_intrinsic_load_preamble);
-
- nir_ssa_def *v = is_load
- ? &intr->dest.ssa
- : nir_ssa_for_src(b, intr->src[0],
- nir_src_num_components(intr->src[0]));
-
- if (v->num_components == 1)
- return false;
-
- /* Scalarize */
- b->cursor = nir_before_instr(&intr->instr);
- unsigned stride = MAX2(v->bit_size / 16, 1);
- unsigned base = nir_intrinsic_base(intr);
-
- if (is_load) {
- nir_ssa_def *comps[NIR_MAX_VEC_COMPONENTS];
- for (unsigned i = 0; i < v->num_components; ++i)
- comps[i] =
- nir_load_preamble(b, 1, v->bit_size, .base = base + (i * stride));
-
- nir_ssa_def_rewrite_uses(v, nir_vec(b, comps, v->num_components));
- } else {
- for (unsigned i = 0; i < v->num_components; ++i)
- nir_store_preamble(b, nir_channel(b, v, i),
- .base = base + (i * stride));
-
- nir_instr_remove(instr);
- }
-
- return true;
-}
-
static void
def_size(nir_ssa_def *def, unsigned *size, unsigned *align)
{
@@ -155,17 +110,5 @@ static const nir_opt_preamble_options preamble_options = {
bool
agx_nir_opt_preamble(nir_shader *nir, unsigned *preamble_size)
{
- bool progress = nir_opt_preamble(nir, &preamble_options, preamble_size);
-
- /* If nir_opt_preamble made progress, the shader now has
- * load_preamble/store_preamble intrinsics in it. These need to be
- * scalarized for the backend to process them appropriately.
- */
- if (progress) {
- nir_shader_instructions_pass(
- nir, nir_scalarize_preamble,
- nir_metadata_block_index | nir_metadata_dominance, NULL);
- }
-
- return progress;
+ return nir_opt_preamble(nir, &preamble_options, preamble_size);
}