summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNicolai Hähnle <nicolai.haehnle@amd.com>2017-07-03 15:26:15 +0200
committerNicolai Hähnle <nicolai.haehnle@amd.com>2017-07-13 13:27:35 +0200
commit00d25fc9be4e06beb2722156f8d5a870648ad0e7 (patch)
tree87d59d23b73abc14755827c5f51bb6c83193f122
parent8e2060ba9536b060ed8faaf249f9a78f515d37af (diff)
TODO ac/nir: implement load/store_input intrinsics for VS/PS
-rw-r--r--src/amd/common/ac_nir_to_llvm.c66
1 files changed, 66 insertions, 0 deletions
diff --git a/src/amd/common/ac_nir_to_llvm.c b/src/amd/common/ac_nir_to_llvm.c
index e427c6d8a7..8b370b8cc5 100644
--- a/src/amd/common/ac_nir_to_llvm.c
+++ b/src/amd/common/ac_nir_to_llvm.c
@@ -2919,6 +2919,69 @@ load_gs_input(struct nir_to_llvm_context *ctx,
return result;
}
+static LLVMValueRef visit_load_input(struct ac_nir_context *ctx,
+ nir_intrinsic_instr *intr)
+{
+ /* These stages only have per-vertex inputs, or no inputs at all. */
+ assert(ctx->stage != MESA_SHADER_TESS_CTRL &&
+ ctx->stage != MESA_SHADER_GEOMETRY &&
+ ctx->stage != MESA_SHADER_COMPUTE);
+ assert(ctx->stage != MESA_SHADER_TESS_EVAL && "not implemented");
+
+ assert(intr->dest.ssa.bit_size == 32);
+// int ve = intr->dest.ssa.num_components;
+// unsigned base = nir_intrinsic_base(intr);
+ nir_const_value *const_offset;
+
+ assert(nir_intrinsic_component(intr) == 0); /* TODO??? */
+
+ const_offset = nir_src_as_const_value(intr->src[0]);
+ if (!const_offset) {
+ }
+
+ return LLVMGetUndef(ctx->ac.voidt);
+// for (unsigned chan = 0; chan < ve; chan++) {
+// if (indir_index) {
+// unsigned count = glsl_count_attribute_slots(
+// instr->variables[0]->var->type,
+// ctx->stage == MESA_SHADER_VERTEX);
+// count -= chan / 4;
+// LLVMValueRef tmp_vec = ac_build_gather_values_extended(
+// &ctx->ac, ctx->abi->inputs + idx + chan, count,
+// 4, false, true);
+//
+// values[chan] = LLVMBuildExtractElement(ctx->ac.builder,
+// tmp_vec,
+// indir_index, "");
+// } else
+// values[chan] = ctx->abi->inputs[idx + chan + const_index * 4];
+// }
+//
+// idx = nir_intrinsic_base(intr);
+// const_offset = nir_src_as_const_value(intr->src[0]);
+// if (const_offset) {
+// idx += const_offset->u32[0];
+// for (int i = 0; i < intr->num_components; i++) {
+// unsigned n = idx * 4 + i;
+// dst[i] = create_uniform(ctx, n);
+// }
+// } else {
+// src = get_src(ctx, &intr->src[0]);
+// for (int i = 0; i < intr->num_components; i++) {
+// int n = idx * 4 + i;
+// dst[i] = create_uniform_indirect(ctx, n,
+// get_addr(ctx, src[0], 4));
+// }
+// /* NOTE: if relative addressing is used, we set
+// * constlen in the compiler (to worst-case value)
+// * since we don't know in the assembler what the max
+// * addr reg value can be:
+// */
+// ctx->so->constlen = ctx->s->num_uniforms;
+// }
+
+}
+
static LLVMValueRef visit_load_var(struct ac_nir_context *ctx,
nir_intrinsic_instr *instr)
{
@@ -4038,6 +4101,9 @@ static void visit_intrinsic(struct ac_nir_context *ctx,
case nir_intrinsic_get_buffer_size:
result = visit_get_buffer_size(ctx, instr);
break;
+ case nir_intrinsic_load_input:
+ result = visit_load_input(ctx, instr);
+ break;
case nir_intrinsic_load_var:
result = visit_load_var(ctx, instr);
break;