summaryrefslogtreecommitdiff
path: root/src/panfrost/midgard
diff options
context:
space:
mode:
authorIcecream95 <ixn@disroot.org>2021-01-01 01:29:04 +1300
committerMarge Bot <eric+marge@anholt.net>2021-01-01 02:58:49 +0000
commit3665855c2e6c8c7022495ce2ff323faf51e360e3 (patch)
tree8863337157b9f9e585cb5e5eb3b943c751193cae /src/panfrost/midgard
parentb504602370293c08e9dda4b262f03b7a3c5b537e (diff)
pan/mdg: Pass the memory type to mir_set_offset directly
We want to add support for more memory types, so replace the is_shared bool with an integer that is directly stored to load_store.arg_1. The new memory type values are off by 0x40, as that bit now comes from the index type. Reviewed-by: Alyssa Rosenzweig <alyssa.rosenzweig@collabora.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/8264>
Diffstat (limited to 'src/panfrost/midgard')
-rw-r--r--src/panfrost/midgard/compiler.h6
-rw-r--r--src/panfrost/midgard/midgard_address.c10
-rw-r--r--src/panfrost/midgard/midgard_compile.c17
3 files changed, 23 insertions, 10 deletions
diff --git a/src/panfrost/midgard/compiler.h b/src/panfrost/midgard/compiler.h
index 9b985c2474f..458d50de6a2 100644
--- a/src/panfrost/midgard/compiler.h
+++ b/src/panfrost/midgard/compiler.h
@@ -533,7 +533,11 @@ void mir_insert_instruction_after_scheduled(compiler_context *ctx, midgard_block
void mir_flip(midgard_instruction *ins);
void mir_compute_temp_count(compiler_context *ctx);
-void mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset, bool is_shared);
+#define LDST_GLOBAL 0x3E
+#define LDST_SHARED 0x2E
+#define LDST_SCRATCH 0x2A
+
+void mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset, unsigned seg);
/* 'Intrinsic' move for aliasing */
diff --git a/src/panfrost/midgard/midgard_address.c b/src/panfrost/midgard/midgard_address.c
index 88726b8c626..3dbbd72eaca 100644
--- a/src/panfrost/midgard/midgard_address.c
+++ b/src/panfrost/midgard/midgard_address.c
@@ -220,7 +220,7 @@ mir_match_offset(nir_ssa_def *offset, bool first_free)
}
void
-mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset, bool is_shared)
+mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset, unsigned seg)
{
for(unsigned i = 0; i < 16; ++i) {
ins->swizzle[1][i] = 0;
@@ -232,7 +232,7 @@ mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset,
bool force_sext = (nir_src_bit_size(*offset) < 64);
if (!offset->is_ssa) {
- ins->load_store.arg_1 |= is_shared ? 0x6E : 0x7E;
+ ins->load_store.arg_1 |= seg;
ins->src[2] = nir_src_index(ctx, offset);
ins->src_types[2] = nir_type_uint | nir_src_bit_size(*offset);
@@ -244,14 +244,16 @@ mir_set_offset(compiler_context *ctx, midgard_instruction *ins, nir_src *offset,
return;
}
- struct mir_address match = mir_match_offset(offset->ssa, !is_shared);
+ bool first_free = (seg == LDST_GLOBAL);
+
+ struct mir_address match = mir_match_offset(offset->ssa, first_free);
if (match.A.def) {
ins->src[1] = nir_ssa_index(match.A.def);
ins->swizzle[1][0] = match.A.comp;
ins->src_types[1] = nir_type_uint | match.A.def->bit_size;
} else
- ins->load_store.arg_1 |= is_shared ? 0x6E : 0x7E;
+ ins->load_store.arg_1 |= seg;
if (match.B.def) {
ins->src[2] = nir_ssa_index(match.B.def);
diff --git a/src/panfrost/midgard/midgard_compile.c b/src/panfrost/midgard/midgard_compile.c
index 64e89a1681d..2e838c14fcb 100644
--- a/src/panfrost/midgard/midgard_compile.c
+++ b/src/panfrost/midgard/midgard_compile.c
@@ -1106,7 +1106,7 @@ emit_global(
bool is_read,
unsigned srcdest,
nir_src *offset,
- bool is_shared)
+ unsigned seg)
{
/* TODO: types */
@@ -1117,7 +1117,7 @@ emit_global(
else
ins = m_st_int4(srcdest, 0);
- mir_set_offset(ctx, &ins, offset, is_shared);
+ mir_set_offset(ctx, &ins, offset, seg);
mir_set_intr_mask(instr, &ins, is_read);
/* Set a valid swizzle for masked out components */
@@ -1178,7 +1178,7 @@ emit_atomic(
if (is_shared)
ins.load_store.arg_1 |= 0x6E;
} else {
- mir_set_offset(ctx, &ins, src_offset, is_shared);
+ mir_set_offset(ctx, &ins, src_offset, is_shared ? LDST_SHARED : LDST_GLOBAL);
}
mir_set_intr_mask(&instr->instr, &ins, true);
@@ -1555,7 +1555,8 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
uint32_t uindex = nir_src_as_uint(index) + 1;
emit_ubo_read(ctx, &instr->instr, reg, offset, indirect_offset, 0, uindex);
} else if (is_global || is_shared) {
- emit_global(ctx, &instr->instr, true, reg, src_offset, is_shared);
+ unsigned seg = is_global ? LDST_GLOBAL : (is_shared ? LDST_SHARED : LDST_SCRATCH);
+ emit_global(ctx, &instr->instr, true, reg, src_offset, seg);
} else if (ctx->stage == MESA_SHADER_FRAGMENT && !ctx->is_blend) {
emit_varying_read(ctx, reg, offset, nr_comp, component, indirect_offset, t | nir_dest_bit_size(instr->dest), is_flat);
} else if (ctx->is_blend) {
@@ -1782,7 +1783,13 @@ emit_intrinsic(compiler_context *ctx, nir_intrinsic_instr *instr)
reg = nir_src_index(ctx, &instr->src[0]);
emit_explicit_constant(ctx, reg, reg);
- emit_global(ctx, &instr->instr, false, reg, &instr->src[1], instr->intrinsic == nir_intrinsic_store_shared);
+ unsigned seg;
+ if (instr->intrinsic == nir_intrinsic_store_global)
+ seg = LDST_GLOBAL;
+ else if (instr->intrinsic == nir_intrinsic_store_shared)
+ seg = LDST_SHARED;
+
+ emit_global(ctx, &instr->instr, false, reg, &instr->src[1], seg);
break;
case nir_intrinsic_load_ssbo_address: