summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Ekstrand <jason@jlekstrand.net>2020-05-28 18:32:01 -0500
committerJason Ekstrand <jason@jlekstrand.net>2020-07-23 22:41:54 -0500
commitaf81486a8cde4dec2a695884b93b282c1710d8bd (patch)
tree2d880f716c5d32104611dc3159be81e4574cfcff
parent0d5cd1a5f419a4f793a47cb10f39ac44d2d4bb14 (diff)
spirv: Simplify our handling of NonUniform
The original implementation of SPV_EXT_descriptor_indexing was extremely paranoid about the NonUniform qualifier, trying to fetch it from every possible location and propagate it through access chains etc. However, the Vulkan spec is quite nice to us on this and has very strict rules for where the NonUniform decoration has to be placed. For image and texture operations, we can search for the decoration on the spot when we process the image or texture op. For pointers, we continue putting it on the pointer but we don't bother trying to do anything silly like propagate it through casts. Reviewed-by: Caio Marcelo de Oliveira Filho <caio.oliveira@intel.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/5278>
-rw-r--r--src/compiler/spirv/spirv_to_nir.c70
-rw-r--r--src/compiler/spirv/vtn_private.h31
-rw-r--r--src/compiler/spirv/vtn_variables.c29
3 files changed, 64 insertions, 66 deletions
diff --git a/src/compiler/spirv/spirv_to_nir.c b/src/compiler/spirv/spirv_to_nir.c
index e3469d6b48f..32a37dc3462 100644
--- a/src/compiler/spirv/spirv_to_nir.c
+++ b/src/compiler/spirv/spirv_to_nir.c
@@ -2253,6 +2253,23 @@ image_operand_arg(struct vtn_builder *b, const uint32_t *w, uint32_t count,
}
static void
+non_uniform_decoration_cb(struct vtn_builder *b,
+ struct vtn_value *val, int member,
+ const struct vtn_decoration *dec, void *void_ctx)
+{
+ enum gl_access_qualifier *access = void_ctx;
+ switch (dec->decoration) {
+ case SpvDecorationNonUniformEXT:
+ *access |= ACCESS_NON_UNIFORM;
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+static void
vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
const uint32_t *w, unsigned count)
{
@@ -2590,10 +2607,26 @@ vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
is_shadow && glsl_get_components(ret_type->type) == 1;
instr->component = gather_component;
- if (image && (image->access & ACCESS_NON_UNIFORM))
+ /* The Vulkan spec says:
+ *
+ * "If an instruction loads from or stores to a resource (including
+ * atomics and image instructions) and the resource descriptor being
+ * accessed is not dynamically uniform, then the operand corresponding
+ * to that resource (e.g. the pointer or sampled image operand) must be
+ * decorated with NonUniform."
+ *
+ * It's very careful to specify that the exact operand must be decorated
+ * NonUniform. The SPIR-V parser is not expected to chase through long
+ * chains to find the NonUniform decoration. It's either right there or we
+ * can assume it doesn't exist.
+ */
+ enum gl_access_qualifier access = 0;
+ vtn_foreach_decoration(b, sampled_val, non_uniform_decoration_cb, &access);
+
+ if (image && (access & ACCESS_NON_UNIFORM))
instr->texture_non_uniform = true;
- if (sampler && (sampler->access & ACCESS_NON_UNIFORM))
+ if (sampler && (access & ACCESS_NON_UNIFORM))
instr->sampler_non_uniform = true;
/* for non-query ops, get dest_type from sampler type */
@@ -2742,6 +2775,7 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
SpvScope scope = SpvScopeInvocation;
SpvMemorySemanticsMask semantics = 0;
+ struct vtn_value *res_val;
switch (opcode) {
case SpvOpAtomicExchange:
case SpvOpAtomicCompareExchange:
@@ -2759,26 +2793,30 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
case SpvOpAtomicOr:
case SpvOpAtomicXor:
case SpvOpAtomicFAddEXT:
- image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
+ res_val = vtn_value(b, w[3], vtn_value_type_image_pointer);
+ image = *res_val->image;
scope = vtn_constant_uint(b, w[4]);
semantics = vtn_constant_uint(b, w[5]);
break;
case SpvOpAtomicStore:
- image = *vtn_value(b, w[1], vtn_value_type_image_pointer)->image;
+ res_val = vtn_value(b, w[1], vtn_value_type_image_pointer);
+ image = *res_val->image;
scope = vtn_constant_uint(b, w[2]);
semantics = vtn_constant_uint(b, w[3]);
break;
case SpvOpImageQuerySize:
- image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ res_val = vtn_value(b, w[3], vtn_value_type_pointer);
+ image.image = res_val->pointer;
image.coord = NULL;
image.sample = NULL;
image.lod = NULL;
break;
case SpvOpImageRead: {
- image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
+ res_val = vtn_value(b, w[3], vtn_value_type_pointer);
+ image.image = res_val->pointer;
image.coord = get_image_coord(b, w[4]);
const SpvImageOperandsMask operands =
@@ -2815,7 +2853,8 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
}
case SpvOpImageWrite: {
- image.image = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
+ res_val = vtn_value(b, w[1], vtn_value_type_pointer);
+ image.image = res_val->pointer;
image.coord = get_image_coord(b, w[2]);
/* texel = w[3] */
@@ -2899,7 +2938,22 @@ vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
intrin->src[2] = nir_src_for_ssa(image.sample);
}
- nir_intrinsic_set_access(intrin, image.image->access);
+ /* The Vulkan spec says:
+ *
+ * "If an instruction loads from or stores to a resource (including
+ * atomics and image instructions) and the resource descriptor being
+ * accessed is not dynamically uniform, then the operand corresponding
+ * to that resource (e.g. the pointer or sampled image operand) must be
+ * decorated with NonUniform."
+ *
+ * It's very careful to specify that the exact operand must be decorated
+ * NonUniform. The SPIR-V parser is not expected to chase through long
+ * chains to find the NonUniform decoration. It's either right there or we
+ * can assume it doesn't exist.
+ */
+ enum gl_access_qualifier access = 0;
+ vtn_foreach_decoration(b, res_val, non_uniform_decoration_cb, &access);
+ nir_intrinsic_set_access(intrin, access);
switch (opcode) {
case SpvOpAtomicLoad:
diff --git a/src/compiler/spirv/vtn_private.h b/src/compiler/spirv/vtn_private.h
index 059c94494a1..b3a915d4c84 100644
--- a/src/compiler/spirv/vtn_private.h
+++ b/src/compiler/spirv/vtn_private.h
@@ -306,9 +306,6 @@ struct vtn_ssa_value {
struct vtn_ssa_value *transposed;
const struct glsl_type *type;
-
- /* Access qualifiers */
- enum gl_access_qualifier access;
};
enum vtn_base_type {
@@ -773,34 +770,6 @@ vtn_constant_int(struct vtn_builder *b, uint32_t value_id)
}
}
-static inline enum gl_access_qualifier vtn_value_access(struct vtn_value *value)
-{
- switch (value->value_type) {
- case vtn_value_type_invalid:
- case vtn_value_type_undef:
- case vtn_value_type_string:
- case vtn_value_type_decoration_group:
- case vtn_value_type_constant:
- case vtn_value_type_function:
- case vtn_value_type_block:
- case vtn_value_type_extension:
- return 0;
- case vtn_value_type_type:
- return value->type->access;
- case vtn_value_type_pointer:
- return value->pointer->access;
- case vtn_value_type_ssa:
- return value->ssa->access;
- case vtn_value_type_image_pointer:
- return value->image->image->access;
- case vtn_value_type_sampled_image:
- return value->sampled_image->image->access |
- value->sampled_image->sampler->access;
- }
-
- unreachable("invalid type");
-}
-
struct vtn_ssa_value *vtn_ssa_value(struct vtn_builder *b, uint32_t value_id);
struct vtn_value *vtn_push_value_pointer(struct vtn_builder *b,
diff --git a/src/compiler/spirv/vtn_variables.c b/src/compiler/spirv/vtn_variables.c
index 504f5c742d6..58ec0510e9f 100644
--- a/src/compiler/spirv/vtn_variables.c
+++ b/src/compiler/spirv/vtn_variables.c
@@ -76,28 +76,6 @@ vtn_push_value_pointer(struct vtn_builder *b, uint32_t value_id,
return val;
}
-static void
-ssa_decoration_cb(struct vtn_builder *b, struct vtn_value *val, int member,
- const struct vtn_decoration *dec, void *void_ctx)
-{
- switch (dec->decoration) {
- case SpvDecorationNonUniformEXT:
- if (val->value_type == vtn_value_type_ssa) {
- val->ssa->access |= ACCESS_NON_UNIFORM;
- } else if (val->value_type == vtn_value_type_pointer) {
- val->pointer->access |= ACCESS_NON_UNIFORM;
- } else if (val->value_type == vtn_value_type_sampled_image) {
- val->sampled_image->image->access |= ACCESS_NON_UNIFORM;
- } else if (val->value_type == vtn_value_type_image_pointer) {
- val->image->image->access |= ACCESS_NON_UNIFORM;
- }
- break;
-
- default:
- break;
- }
-}
-
struct vtn_value *
vtn_push_ssa(struct vtn_builder *b, uint32_t value_id,
struct vtn_type *type, struct vtn_ssa_value *ssa)
@@ -108,7 +86,6 @@ vtn_push_ssa(struct vtn_builder *b, uint32_t value_id,
} else {
val = vtn_push_value(b, value_id, vtn_value_type_ssa);
val->ssa = ssa;
- vtn_foreach_decoration(b, val, ssa_decoration_cb, NULL);
}
return val;
}
@@ -129,7 +106,8 @@ vtn_copy_value(struct vtn_builder *b, uint32_t src_value_id,
src_copy.type = dst->type;
*dst = src_copy;
- vtn_foreach_decoration(b, dst, ssa_decoration_cb, NULL);
+ if (dst->value_type == vtn_value_type_pointer)
+ dst->pointer = vtn_decorate_pointer(b, dst, dst->pointer);
}
static struct vtn_access_chain *
@@ -2563,7 +2541,6 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
chain->link[idx].mode = vtn_access_mode_id;
chain->link[idx].id = w[i];
}
- access |= vtn_value_access(link_val);
idx++;
}
@@ -2778,7 +2755,6 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
u_val->ssa = vtn_create_ssa_value(b, u_val->type->type);
u_val->ssa->def = nir_sloppy_bitcast(&b->nb, ptr_ssa->def, u_val->type->type);
- u_val->ssa->access |= ptr_ssa->access;
break;
}
@@ -2800,7 +2776,6 @@ vtn_handle_variables(struct vtn_builder *b, SpvOp opcode,
ptr_val->type->type);
ptr_val->pointer = vtn_pointer_from_ssa(b, ptr_ssa, ptr_val->type);
vtn_foreach_decoration(b, ptr_val, ptr_decoration_cb, ptr_val->pointer);
- ptr_val->pointer->access |= u_val->ssa->access;
break;
}