summaryrefslogtreecommitdiff
path: root/src/compiler/nir
diff options
context:
space:
mode:
authorFaith Ekstrand <faith.ekstrand@collabora.com>2023-02-27 08:50:50 -0600
committerMarge Bot <emma+marge@anholt.net>2023-03-03 02:00:39 +0000
commit7e8a10be67d5f10b3d4828f2e282226ed03f7ce2 (patch)
treecb98df248e7cd695c0dc199e59df03df7d41ab2d /src/compiler/nir
parenteb9a56b6caffdab0569bc52aaa4b8f2fc163c6d0 (diff)
nir: Make chunk_align_offset const in lower_mem_load()
This should make things more clear than changing the value from earlier in the loop. Also, rename chunk_offset to load_offset so they match. Reviewed-by: M Henning <drawoc@darkrefraction.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/21524>
Diffstat (limited to 'src/compiler/nir')
-rw-r--r--src/compiler/nir/nir_lower_mem_access_bit_sizes.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/src/compiler/nir/nir_lower_mem_access_bit_sizes.c b/src/compiler/nir/nir_lower_mem_access_bit_sizes.c
index 4d687612b0d..a44f77430f7 100644
--- a/src/compiler/nir/nir_lower_mem_access_bit_sizes.c
+++ b/src/compiler/nir/nir_lower_mem_access_bit_sizes.c
@@ -108,7 +108,7 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
unsigned chunk_start = 0;
while (chunk_start < bytes_read) {
const unsigned bytes_left = bytes_read - chunk_start;
- uint32_t chunk_align_offset =
+ const uint32_t chunk_align_offset =
(whole_align_offset + chunk_start) % align_mul;
requested = mem_access_size_align_cb(intrin->intrinsic, bytes_left,
align_mul, chunk_align_offset,
@@ -142,14 +142,15 @@ lower_mem_load(nir_builder *b, nir_intrinsic_instr *intrin,
} else if (chunk_align_offset % requested.align) {
/* In this case, we know how much to adjust the offset */
uint32_t delta = chunk_align_offset % requested.align;
- nir_ssa_def *chunk_offset =
+ nir_ssa_def *load_offset =
nir_iadd_imm(b, offset, chunk_start - (int)delta);
- chunk_align_offset = (chunk_align_offset - delta) % align_mul;
+ const uint32_t load_align_offset =
+ (chunk_align_offset - delta) % align_mul;
nir_intrinsic_instr *load =
- dup_mem_intrinsic(b, intrin, chunk_offset,
- align_mul, chunk_align_offset, NULL,
+ dup_mem_intrinsic(b, intrin, load_offset,
+ align_mul, load_align_offset, NULL,
requested.num_components, requested.bit_size);
assert(requested.bit_size >= 8);