summaryrefslogtreecommitdiff
path: root/src/gallium/drivers/radeonsi
diff options
context:
space:
mode:
authorMarek Olšák <marek.olsak@amd.com>2022-05-12 02:50:17 -0400
committerMarek Olšák <marek.olsak@amd.com>2022-05-13 14:56:22 -0400
commit39800f0fa3104c56736d5beb70a7920a33be48de (patch)
tree8c658b5a75bc52b5fe0a66c09d4f326438cf4508 /src/gallium/drivers/radeonsi
parent6dcf7f651f421c0f46ebf9c4c5904f452ea14eb1 (diff)
amd: change chip_class naming to "enum amd_gfx_level gfx_level"
This aligns the naming with PAL. Acked-by: Samuel Pitoiset <samuel.pitoiset@gmail.com> Acked-by: Pierre-Eric Pellou-Prayer <pierre-eric.pelloux-prayer@amd.com> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/16469>
Diffstat (limited to 'src/gallium/drivers/radeonsi')
-rwxr-xr-xsrc/gallium/drivers/radeonsi/ci/radeonsi-run-tests.py28
-rw-r--r--src/gallium/drivers/radeonsi/gfx10_shader_ngg.c6
-rw-r--r--src/gallium/drivers/radeonsi/radeon_uvd_enc.c2
-rw-r--r--src/gallium/drivers/radeonsi/radeon_uvd_enc_1_1.c6
-rw-r--r--src/gallium/drivers/radeonsi/radeon_vce.c4
-rw-r--r--src/gallium/drivers/radeonsi/radeon_vce_52.c4
-rwxr-xr-xsrc/gallium/drivers/radeonsi/radeon_vcn_dec.c2
-rw-r--r--src/gallium/drivers/radeonsi/radeon_vcn_enc.c10
-rw-r--r--src/gallium/drivers/radeonsi/si_blit.c20
-rw-r--r--src/gallium/drivers/radeonsi/si_buffer.c2
-rw-r--r--src/gallium/drivers/radeonsi/si_build_pm4.h22
-rw-r--r--src/gallium/drivers/radeonsi/si_clear.c46
-rw-r--r--src/gallium/drivers/radeonsi/si_compute.c44
-rw-r--r--src/gallium/drivers/radeonsi/si_compute_blit.c16
-rw-r--r--src/gallium/drivers/radeonsi/si_cp_dma.c18
-rw-r--r--src/gallium/drivers/radeonsi/si_cp_reg_shadowing.c6
-rw-r--r--src/gallium/drivers/radeonsi/si_debug.c46
-rw-r--r--src/gallium/drivers/radeonsi/si_descriptors.c66
-rw-r--r--src/gallium/drivers/radeonsi/si_fence.c12
-rw-r--r--src/gallium/drivers/radeonsi/si_get.c28
-rw-r--r--src/gallium/drivers/radeonsi/si_gfx_cs.c30
-rw-r--r--src/gallium/drivers/radeonsi/si_gpu_load.c4
-rw-r--r--src/gallium/drivers/radeonsi/si_perfcounter.c10
-rw-r--r--src/gallium/drivers/radeonsi/si_pipe.c60
-rw-r--r--src/gallium/drivers/radeonsi/si_pipe.h20
-rw-r--r--src/gallium/drivers/radeonsi/si_query.c18
-rw-r--r--src/gallium/drivers/radeonsi/si_sdma_copy_image.c16
-rw-r--r--src/gallium/drivers/radeonsi/si_shader.c46
-rw-r--r--src/gallium/drivers/radeonsi/si_shader_info.c4
-rw-r--r--src/gallium/drivers/radeonsi/si_shader_llvm.c18
-rw-r--r--src/gallium/drivers/radeonsi/si_shader_llvm_gs.c28
-rw-r--r--src/gallium/drivers/radeonsi/si_shader_llvm_ps.c10
-rw-r--r--src/gallium/drivers/radeonsi/si_shader_llvm_resources.c10
-rw-r--r--src/gallium/drivers/radeonsi/si_shader_llvm_tess.c22
-rw-r--r--src/gallium/drivers/radeonsi/si_shader_llvm_vs.c12
-rw-r--r--src/gallium/drivers/radeonsi/si_shader_nir.c2
-rw-r--r--src/gallium/drivers/radeonsi/si_sqtt.c32
-rw-r--r--src/gallium/drivers/radeonsi/si_state.c316
-rw-r--r--src/gallium/drivers/radeonsi/si_state.h2
-rw-r--r--src/gallium/drivers/radeonsi/si_state_binning.c6
-rw-r--r--src/gallium/drivers/radeonsi/si_state_draw.cpp82
-rw-r--r--src/gallium/drivers/radeonsi/si_state_shaders.cpp180
-rw-r--r--src/gallium/drivers/radeonsi/si_state_streamout.c4
-rw-r--r--src/gallium/drivers/radeonsi/si_state_viewport.c6
-rw-r--r--src/gallium/drivers/radeonsi/si_test_dma_perf.c6
-rw-r--r--src/gallium/drivers/radeonsi/si_test_image_copy_region.c2
-rw-r--r--src/gallium/drivers/radeonsi/si_texture.c74
-rw-r--r--src/gallium/drivers/radeonsi/si_uvd.c2
48 files changed, 705 insertions, 705 deletions
diff --git a/src/gallium/drivers/radeonsi/ci/radeonsi-run-tests.py b/src/gallium/drivers/radeonsi/ci/radeonsi-run-tests.py
index 9f545235e5f..086bbd02f5d 100755
--- a/src/gallium/drivers/radeonsi/ci/radeonsi-run-tests.py
+++ b/src/gallium/drivers/radeonsi/ci/radeonsi-run-tests.py
@@ -213,7 +213,7 @@ env["WAFFLE_GBM_DEVICE"] = available_gpus[args.gpu][0]
# Use piglit's glinfo to determine the GPU name
gpu_name = "unknown"
gpu_name_full = ""
-chip_class = -1
+gfx_level = -1
env["AMD_DEBUG"] = "info"
p = subprocess.run(
@@ -230,8 +230,8 @@ for line in p.stdout.decode().split("\n"):
gpu_name_full = "(".join(line.split("(")[:-1]).strip()
gpu_name = line.replace("(TM)", "").split("(")[1].split(",")[0].lower()
break
- elif "chip_class" in line:
- chip_class = int(line.split("=")[1])
+ elif "gfx_level" in line:
+ gfx_level = int(line.split("=")[1])
output_folder = args.output_folder
print_green("Tested GPU: '{}' ({}) {}".format(gpu_name_full, gpu_name, gpu_device))
@@ -251,7 +251,7 @@ logfile = open(os.path.join(output_folder, "{}-run-tests.log".format(gpu_name)),
spin = itertools.cycle("-\\|/")
-def chip_class_to_str(cl):
+def gfx_level_to_str(cl):
supported = ["gfx6", "gfx7", "gfx8", "gfx9", "gfx10", "gfx10_3", "gfx11"]
if 8 <= cl and cl < 8 + len(supported):
return supported[cl - 8]
@@ -320,31 +320,31 @@ def parse_test_filters(include_tests):
return cmd
-def select_baseline(basepath, chip_class, gpu_name):
- chip_class_str = chip_class_to_str(chip_class)
+def select_baseline(basepath, gfx_level, gpu_name):
+ gfx_level_str = gfx_level_to_str(gfx_level)
# select the best baseline we can find
# 1. exact match
- exact = os.path.join(base, "{}-{}-fail.csv".format(chip_class_str, gpu_name))
+ exact = os.path.join(base, "{}-{}-fail.csv".format(gfx_level_str, gpu_name))
if os.path.exists(exact):
return exact
- # 2. any baseline with the same chip_class
- while chip_class >= 8:
+ # 2. any baseline with the same gfx_level
+ while gfx_level >= 8:
for subdir, dirs, files in os.walk(basepath):
for file in files:
- if file.find(chip_class_str) == 0 and file.endswith("-fail.csv"):
+ if file.find(gfx_level_str) == 0 and file.endswith("-fail.csv"):
return os.path.join(base, file)
# No match. Try an earlier class
- chip_class = chip_class - 1
- chip_class_str = chip_class_to_str(chip_class)
+ gfx_level = gfx_level - 1
+ gfx_level_str = gfx_level_to_str(gfx_level)
return exact
filters_args = parse_test_filters(args.include_tests)
-baseline = select_baseline(base, chip_class, gpu_name)
+baseline = select_baseline(base, gfx_level, gpu_name)
flakes = os.path.join(
- base, "{}-{}-flakes.csv".format(chip_class_to_str(chip_class), gpu_name)
+ base, "{}-{}-flakes.csv".format(gfx_level_to_str(gfx_level), gpu_name)
)
if os.path.exists(baseline):
diff --git a/src/gallium/drivers/radeonsi/gfx10_shader_ngg.c b/src/gallium/drivers/radeonsi/gfx10_shader_ngg.c
index 1f0bfce52f8..05034684ee0 100644
--- a/src/gallium/drivers/radeonsi/gfx10_shader_ngg.c
+++ b/src/gallium/drivers/radeonsi/gfx10_shader_ngg.c
@@ -1339,7 +1339,7 @@ void gfx10_ngg_culling_build_end(struct si_shader_context *ctx)
ret = LLVMBuildInsertValue(ctx->ac.builder, ret, new_merged_wave_info, 3, "");
if (ctx->stage == MESA_SHADER_TESS_EVAL)
ret = si_insert_input_ret(ctx, ret, ctx->args.tess_offchip_offset, 4);
- if (ctx->ac.chip_class >= GFX11)
+ if (ctx->ac.gfx_level >= GFX11)
ret = si_insert_input_ret(ctx, ret, ctx->args.gs_attr_offset, 5);
ret = si_insert_input_ptr(ctx, ret, ctx->internal_bindings, 8 + SI_SGPR_INTERNAL_BINDINGS);
@@ -1349,7 +1349,7 @@ void gfx10_ngg_culling_build_end(struct si_shader_context *ctx)
8 + SI_SGPR_CONST_AND_SHADER_BUFFERS);
ret = si_insert_input_ptr(ctx, ret, ctx->samplers_and_images, 8 + SI_SGPR_SAMPLERS_AND_IMAGES);
ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits, 8 + SI_SGPR_VS_STATE_BITS);
- if (ctx->ac.chip_class >= GFX11)
+ if (ctx->ac.gfx_level >= GFX11)
ret = si_insert_input_ptr(ctx, ret, ctx->gs_attr_address, 8 + GFX9_SGPR_ATTRIBUTE_RING_ADDR);
if (ctx->stage == MESA_SHADER_VERTEX) {
@@ -2282,7 +2282,7 @@ bool gfx10_ngg_calculate_subgroup_info(struct si_shader *shader)
/* All these are per subgroup: */
const unsigned min_esverts =
- gs_sel->screen->info.chip_class >= GFX10_3 ? 29 : (24 - 1 + max_verts_per_prim);
+ gs_sel->screen->info.gfx_level >= GFX10_3 ? 29 : (24 - 1 + max_verts_per_prim);
bool max_vert_out_per_gs_instance = false;
unsigned max_gsprims_base = gs_sel->screen->ngg_subgroup_size; /* default prim group size clamp */
unsigned max_esverts_base = gs_sel->screen->ngg_subgroup_size;
diff --git a/src/gallium/drivers/radeonsi/radeon_uvd_enc.c b/src/gallium/drivers/radeonsi/radeon_uvd_enc.c
index bd08bc9e8af..07b75fa7743 100644
--- a/src/gallium/drivers/radeonsi/radeon_uvd_enc.c
+++ b/src/gallium/drivers/radeonsi/radeon_uvd_enc.c
@@ -321,7 +321,7 @@ struct pipe_video_codec *radeon_uvd_create_encoder(struct pipe_context *context,
get_buffer(((struct vl_video_buffer *)tmp_buf)->resources[0], NULL, &tmp_surf);
- cpb_size = (sscreen->info.chip_class < GFX9)
+ cpb_size = (sscreen->info.gfx_level < GFX9)
? align(tmp_surf->u.legacy.level[0].nblk_x * tmp_surf->bpe, 128) *
align(tmp_surf->u.legacy.level[0].nblk_y, 32)
: align(tmp_surf->u.gfx9.surf_pitch * tmp_surf->bpe, 256) *
diff --git a/src/gallium/drivers/radeonsi/radeon_uvd_enc_1_1.c b/src/gallium/drivers/radeonsi/radeon_uvd_enc_1_1.c
index 5d420d04a11..7678e954b8f 100644
--- a/src/gallium/drivers/radeonsi/radeon_uvd_enc_1_1.c
+++ b/src/gallium/drivers/radeonsi/radeon_uvd_enc_1_1.c
@@ -756,7 +756,7 @@ static void radeon_uvd_enc_ctx(struct radeon_uvd_encoder *enc)
struct si_screen *sscreen = (struct si_screen *)enc->screen;
enc->enc_pic.ctx_buf.swizzle_mode = 0;
- if (sscreen->info.chip_class < GFX9) {
+ if (sscreen->info.gfx_level < GFX9) {
enc->enc_pic.ctx_buf.rec_luma_pitch = (enc->luma->u.legacy.level[0].nblk_x * enc->luma->bpe);
enc->enc_pic.ctx_buf.rec_chroma_pitch =
(enc->chroma->u.legacy.level[0].nblk_x * enc->chroma->bpe);
@@ -874,7 +874,7 @@ static void radeon_uvd_enc_encode_params_hevc(struct radeon_uvd_encoder *enc)
}
enc->enc_pic.enc_params.allowed_max_bitstream_size = enc->bs_size;
- if (sscreen->info.chip_class < GFX9) {
+ if (sscreen->info.gfx_level < GFX9) {
enc->enc_pic.enc_params.input_pic_luma_pitch =
(enc->luma->u.legacy.level[0].nblk_x * enc->luma->bpe);
enc->enc_pic.enc_params.input_pic_chroma_pitch =
@@ -897,7 +897,7 @@ static void radeon_uvd_enc_encode_params_hevc(struct radeon_uvd_encoder *enc)
RADEON_ENC_CS(enc->enc_pic.enc_params.pic_type);
RADEON_ENC_CS(enc->enc_pic.enc_params.allowed_max_bitstream_size);
- if (sscreen->info.chip_class < GFX9) {
+ if (sscreen->info.gfx_level < GFX9) {
RADEON_ENC_READ(enc->handle, RADEON_DOMAIN_VRAM, (uint64_t)enc->luma->u.legacy.level[0].offset_256B * 256);
RADEON_ENC_READ(enc->handle, RADEON_DOMAIN_VRAM, (uint64_t)enc->chroma->u.legacy.level[0].offset_256B * 256);
} else {
diff --git a/src/gallium/drivers/radeonsi/radeon_vce.c b/src/gallium/drivers/radeonsi/radeon_vce.c
index d8e853d0a09..74fce82d09e 100644
--- a/src/gallium/drivers/radeonsi/radeon_vce.c
+++ b/src/gallium/drivers/radeonsi/radeon_vce.c
@@ -219,7 +219,7 @@ void si_vce_frame_offset(struct rvce_encoder *enc, struct rvce_cpb_slot *slot, s
struct si_screen *sscreen = (struct si_screen *)enc->screen;
unsigned pitch, vpitch, fsize;
- if (sscreen->info.chip_class < GFX9) {
+ if (sscreen->info.gfx_level < GFX9) {
pitch = align(enc->luma->u.legacy.level[0].nblk_x * enc->luma->bpe, 128);
vpitch = align(enc->luma->u.legacy.level[0].nblk_y, 16);
} else {
@@ -449,7 +449,7 @@ struct pipe_video_codec *si_vce_create_encoder(struct pipe_context *context,
get_buffer(((struct vl_video_buffer *)tmp_buf)->resources[0], NULL, &tmp_surf);
- cpb_size = (sscreen->info.chip_class < GFX9)
+ cpb_size = (sscreen->info.gfx_level < GFX9)
? align(tmp_surf->u.legacy.level[0].nblk_x * tmp_surf->bpe, 128) *
align(tmp_surf->u.legacy.level[0].nblk_y, 32)
:
diff --git a/src/gallium/drivers/radeonsi/radeon_vce_52.c b/src/gallium/drivers/radeonsi/radeon_vce_52.c
index 5dc6f733a38..b52e3ee6264 100644
--- a/src/gallium/drivers/radeonsi/radeon_vce_52.c
+++ b/src/gallium/drivers/radeonsi/radeon_vce_52.c
@@ -190,7 +190,7 @@ static void create(struct rvce_encoder *enc)
RVCE_CS(enc->base.width); // encImageWidth
RVCE_CS(enc->base.height); // encImageHeight
- if (sscreen->info.chip_class < GFX9) {
+ if (sscreen->info.gfx_level < GFX9) {
RVCE_CS(enc->luma->u.legacy.level[0].nblk_x * enc->luma->bpe); // encRefPicLumaPitch
RVCE_CS(enc->chroma->u.legacy.level[0].nblk_x * enc->chroma->bpe); // encRefPicChromaPitch
RVCE_CS(align(enc->luma->u.legacy.level[0].nblk_y, 16) / 8); // encRefYHeightInQw
@@ -261,7 +261,7 @@ static void encode(struct rvce_encoder *enc)
RVCE_CS(enc->enc_pic.eo.end_of_sequence);
RVCE_CS(enc->enc_pic.eo.end_of_stream);
- if (sscreen->info.chip_class < GFX9) {
+ if (sscreen->info.gfx_level < GFX9) {
RVCE_READ(enc->handle, RADEON_DOMAIN_VRAM,
(uint64_t)enc->luma->u.legacy.level[0].offset_256B * 256); // inputPictureLumaAddressHi/Lo
RVCE_READ(enc->handle, RADEON_DOMAIN_VRAM,
diff --git a/src/gallium/drivers/radeonsi/radeon_vcn_dec.c b/src/gallium/drivers/radeonsi/radeon_vcn_dec.c
index 8df5b2989ab..cd4158f4d7b 100755
--- a/src/gallium/drivers/radeonsi/radeon_vcn_dec.c
+++ b/src/gallium/drivers/radeonsi/radeon_vcn_dec.c
@@ -2770,7 +2770,7 @@ struct pipe_video_codec *radeon_create_decoder(struct pipe_context *context,
dec->ws = ws;
if (u_reduce_video_profile(templ->profile) != PIPE_VIDEO_FORMAT_JPEG &&
- sctx->chip_class >= GFX11)
+ sctx->gfx_level >= GFX11)
dec->vcn_dec_sw_ring = true;
if (!ws->cs_create(&dec->cs, sctx->ctx, ring, NULL, NULL, false)) {
diff --git a/src/gallium/drivers/radeonsi/radeon_vcn_enc.c b/src/gallium/drivers/radeonsi/radeon_vcn_enc.c
index 242a4b8c5f1..b02bda50395 100644
--- a/src/gallium/drivers/radeonsi/radeon_vcn_enc.c
+++ b/src/gallium/drivers/radeonsi/radeon_vcn_enc.c
@@ -477,7 +477,7 @@ static void radeon_enc_get_feedback(struct pipe_video_codec *encoder, void *feed
}
static int setup_dpb(struct radeon_encoder *enc, enum pipe_format buffer_format,
- enum chip_class chip_class)
+ enum amd_gfx_level gfx_level)
{
uint32_t aligned_width = align(enc->base.width, 16);
uint32_t aligned_height = align(enc->base.height, 16);
@@ -494,7 +494,7 @@ static int setup_dpb(struct radeon_encoder *enc, enum pipe_format buffer_format,
int i;
for (i = 0; i < num_reconstructed_pictures; i++) {
- if (chip_class >= GFX11) {
+ if (gfx_level >= GFX11) {
enc->enc_pic.ctx_buf.reconstructed_pictures_v4_0[i].luma_offset = offset;
offset += luma_size;
enc->enc_pic.ctx_buf.reconstructed_pictures_v4_0[i].chroma_offset = offset;
@@ -572,7 +572,7 @@ struct pipe_video_codec *radeon_create_encoder(struct pipe_context *context,
get_buffer(((struct vl_video_buffer *)tmp_buf)->resources[0], NULL, &tmp_surf);
- cpb_size = (sscreen->info.chip_class < GFX9)
+ cpb_size = (sscreen->info.gfx_level < GFX9)
? align(tmp_surf->u.legacy.level[0].nblk_x * tmp_surf->bpe, 128) *
align(tmp_surf->u.legacy.level[0].nblk_y, 32)
: align(tmp_surf->u.gfx9.surf_pitch * tmp_surf->bpe, 256) *
@@ -582,14 +582,14 @@ struct pipe_video_codec *radeon_create_encoder(struct pipe_context *context,
cpb_size = cpb_size * enc->cpb_num;
tmp_buf->destroy(tmp_buf);
- cpb_size += setup_dpb(enc, templat.buffer_format, sscreen->info.chip_class);
+ cpb_size += setup_dpb(enc, templat.buffer_format, sscreen->info.gfx_level);
if (!si_vid_create_buffer(enc->screen, &enc->cpb, cpb_size, PIPE_USAGE_DEFAULT)) {
RVID_ERR("Can't create CPB buffer.\n");
goto error;
}
- if (sscreen->info.chip_class >= GFX11)
+ if (sscreen->info.gfx_level >= GFX11)
radeon_enc_4_0_init(enc);
else if (sscreen->info.family >= CHIP_SIENNA_CICHLID)
radeon_enc_3_0_init(enc);
diff --git a/src/gallium/drivers/radeonsi/si_blit.c b/src/gallium/drivers/radeonsi/si_blit.c
index 25a897985ff..cbf4e63a890 100644
--- a/src/gallium/drivers/radeonsi/si_blit.c
+++ b/src/gallium/drivers/radeonsi/si_blit.c
@@ -99,7 +99,7 @@ void si_blitter_end(struct si_context *sctx)
* non-global VS user SGPRs. */
sctx->shader_pointers_dirty |= SI_DESCS_SHADER_MASK(VERTEX);
- if (sctx->chip_class >= GFX11)
+ if (sctx->gfx_level >= GFX11)
sctx->gs_attribute_ring_pointer_dirty = true;
/* Reset SI_SGPR_SMALL_PRIM_CULL_INFO: */
@@ -451,7 +451,7 @@ static void si_blit_decompress_color(struct si_context *sctx, struct si_texture
goto expand_fmask;
/* No color decompression is needed on GFX11. */
- assert(sctx->chip_class < GFX11 || need_dcc_decompress);
+ assert(sctx->gfx_level < GFX11 || need_dcc_decompress);
if (unlikely(sctx->log))
u_log_printf(sctx->log,
@@ -460,7 +460,7 @@ static void si_blit_decompress_color(struct si_context *sctx, struct si_texture
first_level, last_level, level_mask);
if (need_dcc_decompress) {
- assert(sctx->chip_class == GFX8 || tex->buffer.b.b.nr_storage_samples >= 2);
+ assert(sctx->gfx_level == GFX8 || tex->buffer.b.b.nr_storage_samples >= 2);
custom_blend = sctx->custom_blend_dcc_decompress;
assert(vi_dcc_enabled(tex, first_level));
@@ -540,7 +540,7 @@ static void si_blit_decompress_color(struct si_context *sctx, struct si_texture
expand_fmask:
if (need_fmask_expand && tex->surface.fmask_offset && !tex->fmask_is_identity) {
- assert(sctx->chip_class < GFX11); /* no FMASK on gfx11 */
+ assert(sctx->gfx_level < GFX11); /* no FMASK on gfx11 */
si_compute_expand_fmask(&sctx->b, &tex->buffer.b.b);
tex->fmask_is_identity = true;
}
@@ -804,7 +804,7 @@ void si_decompress_textures(struct si_context *sctx, unsigned shader_mask)
}
}
- if (sctx->chip_class == GFX10_3 && need_flush) {
+ if (sctx->gfx_level == GFX10_3 && need_flush) {
/* This fixes a corruption with the following sequence:
* - fast clear depth
* - decompress depth
@@ -903,7 +903,7 @@ static bool si_can_use_compute_blit(struct si_context *sctx, enum pipe_format fo
return false;
/* Image stores support DCC since GFX10. */
- if (has_dcc && is_store && sctx->chip_class < GFX10)
+ if (has_dcc && is_store && sctx->gfx_level < GFX10)
return false;
return true;
@@ -1168,7 +1168,7 @@ resolve_to_temp:
SI_RESOURCE_FLAG_DISABLE_DCC | SI_RESOURCE_FLAG_DRIVER_INTERNAL;
/* The src and dst microtile modes must be the same. */
- if (sctx->chip_class <= GFX8 && src->surface.micro_tile_mode == RADEON_MICRO_MODE_DISPLAY)
+ if (sctx->gfx_level <= GFX8 && src->surface.micro_tile_mode == RADEON_MICRO_MODE_DISPLAY)
templ.bind = PIPE_BIND_SCANOUT;
else
templ.bind = 0;
@@ -1206,11 +1206,11 @@ static void si_blit(struct pipe_context *ctx, const struct pipe_blit_info *info)
/* Gfx11 doesn't have CB_RESOLVE. */
/* TODO: Use compute-based resolving instead. */
- if (sctx->chip_class < GFX11 && do_hardware_msaa_resolve(ctx, info))
+ if (sctx->gfx_level < GFX11 && do_hardware_msaa_resolve(ctx, info))
return;
if ((info->dst.resource->bind & PIPE_BIND_PRIME_BLIT_DST) && sdst->surface.is_linear &&
- sctx->chip_class >= GFX7) {
+ sctx->gfx_level >= GFX7) {
struct si_texture *ssrc = (struct si_texture *)info->src.resource;
/* Use SDMA or async compute when copying to a DRI_PRIME imported linear surface. */
bool async_copy = info->dst.box.x == 0 && info->dst.box.y == 0 && info->dst.box.z == 0 &&
@@ -1345,7 +1345,7 @@ void si_decompress_dcc(struct si_context *sctx, struct si_texture *tex)
if (!tex->surface.meta_offset || !sctx->has_graphics)
return;
- if (sctx->chip_class == GFX8 || tex->buffer.b.b.nr_storage_samples >= 2) {
+ if (sctx->gfx_level == GFX8 || tex->buffer.b.b.nr_storage_samples >= 2) {
si_blit_decompress_color(sctx, tex, 0, tex->buffer.b.b.last_level, 0,
util_max_layer(&tex->buffer.b.b, 0), true, false);
} else {
diff --git a/src/gallium/drivers/radeonsi/si_buffer.c b/src/gallium/drivers/radeonsi/si_buffer.c
index d382916741e..682967aa345 100644
--- a/src/gallium/drivers/radeonsi/si_buffer.c
+++ b/src/gallium/drivers/radeonsi/si_buffer.c
@@ -143,7 +143,7 @@ void si_init_resource_fields(struct si_screen *sscreen, struct si_resource *res,
* Only CP DMA and optimized compute benefit from this.
* GFX8 and older don't support RADEON_FLAG_UNCACHED.
*/
- if (sscreen->info.chip_class >= GFX9 &&
+ if (sscreen->info.gfx_level >= GFX9 &&
res->b.b.flags & SI_RESOURCE_FLAG_UNCACHED)
res->flags |= RADEON_FLAG_UNCACHED;
diff --git a/src/gallium/drivers/radeonsi/si_build_pm4.h b/src/gallium/drivers/radeonsi/si_build_pm4.h
index a37ab1ba6ef..f0ba9317ed8 100644
--- a/src/gallium/drivers/radeonsi/si_build_pm4.h
+++ b/src/gallium/drivers/radeonsi/si_build_pm4.h
@@ -151,13 +151,13 @@
radeon_emit(value); \
} while (0)
-#define radeon_set_uconfig_reg_idx(screen, chip_class, reg, idx, value) do { \
+#define radeon_set_uconfig_reg_idx(screen, gfx_level, reg, idx, value) do { \
SI_CHECK_SHADOWED_REGS(reg, 1); \
assert((reg) >= CIK_UCONFIG_REG_OFFSET && (reg) < CIK_UCONFIG_REG_END); \
assert((idx) != 0); \
unsigned __opcode = PKT3_SET_UCONFIG_REG_INDEX; \
- if ((chip_class) < GFX9 || \
- ((chip_class) == GFX9 && (screen)->info.me_fw_version < 26)) \
+ if ((gfx_level) < GFX9 || \
+ ((gfx_level) == GFX9 && (screen)->info.me_fw_version < 26)) \
__opcode = PKT3_SET_UCONFIG_REG; \
radeon_emit(PKT3(__opcode, 1, 0)); \
radeon_emit(((reg) - CIK_UCONFIG_REG_OFFSET) >> 2 | ((idx) << 28)); \
@@ -263,7 +263,7 @@
unsigned __value = val; \
if (((sctx->tracked_regs.reg_saved >> (reg)) & 0x1) != 0x1 || \
sctx->tracked_regs.reg_value[reg] != __value) { \
- if (sctx->chip_class >= GFX10) \
+ if (sctx->gfx_level >= GFX10) \
radeon_set_sh_reg_idx3(offset, __value); \
else \
radeon_set_sh_reg(offset, __value); \
@@ -323,7 +323,7 @@ static inline void radeon_set_sh_reg_idx3_func(struct radeon_cmdbuf *cs, unsigne
/* This should be evaluated at compile time if all parameters are constants. */
static ALWAYS_INLINE unsigned
-si_get_user_data_base(enum chip_class chip_class, enum si_has_tess has_tess,
+si_get_user_data_base(enum amd_gfx_level gfx_level, enum si_has_tess has_tess,
enum si_has_gs has_gs, enum si_has_ngg ngg,
enum pipe_shader_type shader)
{
@@ -331,14 +331,14 @@ si_get_user_data_base(enum chip_class chip_class, enum si_has_tess has_tess,
case PIPE_SHADER_VERTEX:
/* VS can be bound as VS, ES, or LS. */
if (has_tess) {
- if (chip_class >= GFX10) {
+ if (gfx_level >= GFX10) {
return R_00B430_SPI_SHADER_USER_DATA_HS_0;
- } else if (chip_class == GFX9) {
+ } else if (gfx_level == GFX9) {
return R_00B430_SPI_SHADER_USER_DATA_LS_0;
} else {
return R_00B530_SPI_SHADER_USER_DATA_LS_0;
}
- } else if (chip_class >= GFX10) {
+ } else if (gfx_level >= GFX10) {
if (ngg || has_gs) {
return R_00B230_SPI_SHADER_USER_DATA_GS_0;
} else {
@@ -351,7 +351,7 @@ si_get_user_data_base(enum chip_class chip_class, enum si_has_tess has_tess,
}
case PIPE_SHADER_TESS_CTRL:
- if (chip_class == GFX9) {
+ if (gfx_level == GFX9) {
return R_00B430_SPI_SHADER_USER_DATA_LS_0;
} else {
return R_00B430_SPI_SHADER_USER_DATA_HS_0;
@@ -360,7 +360,7 @@ si_get_user_data_base(enum chip_class chip_class, enum si_has_tess has_tess,
case PIPE_SHADER_TESS_EVAL:
/* TES can be bound as ES, VS, or not bound. */
if (has_tess) {
- if (chip_class >= GFX10) {
+ if (gfx_level >= GFX10) {
if (ngg || has_gs) {
return R_00B230_SPI_SHADER_USER_DATA_GS_0;
} else {
@@ -376,7 +376,7 @@ si_get_user_data_base(enum chip_class chip_class, enum si_has_tess has_tess,
}
case PIPE_SHADER_GEOMETRY:
- if (chip_class == GFX9) {
+ if (gfx_level == GFX9) {
return R_00B330_SPI_SHADER_USER_DATA_ES_0;
} else {
return R_00B230_SPI_SHADER_USER_DATA_GS_0;
diff --git a/src/gallium/drivers/radeonsi/si_clear.c b/src/gallium/drivers/radeonsi/si_clear.c
index 93e15450714..fcf9fc78917 100644
--- a/src/gallium/drivers/radeonsi/si_clear.c
+++ b/src/gallium/drivers/radeonsi/si_clear.c
@@ -71,7 +71,7 @@ void si_execute_clears(struct si_context *sctx, struct si_clear_info *info,
sctx->flags |= SI_CONTEXT_INV_VCACHE;
/* GFX6-8: CB and DB don't use L2. */
- if (sctx->chip_class <= GFX8)
+ if (sctx->gfx_level <= GFX8)
sctx->flags |= SI_CONTEXT_INV_L2;
/* Execute clears. */
@@ -100,13 +100,13 @@ void si_execute_clears(struct si_context *sctx, struct si_clear_info *info,
sctx->flags |= SI_CONTEXT_CS_PARTIAL_FLUSH;
/* GFX6-8: CB and DB don't use L2. */
- if (sctx->chip_class <= GFX8)
+ if (sctx->gfx_level <= GFX8)
sctx->flags |= SI_CONTEXT_WB_L2;
}
static bool si_alloc_separate_cmask(struct si_screen *sscreen, struct si_texture *tex)
{
- assert(sscreen->info.chip_class < GFX11);
+ assert(sscreen->info.gfx_level < GFX11);
/* CMASK for MSAA is allocated in advance or always disabled
* by "nofmask" option.
@@ -171,7 +171,7 @@ bool vi_alpha_is_on_msb(struct si_screen *sscreen, enum pipe_format format)
{
format = si_simplify_cb_format(format);
const struct util_format_description *desc = util_format_description(format);
- unsigned comp_swap = si_translate_colorswap(sscreen->info.chip_class, format, false);
+ unsigned comp_swap = si_translate_colorswap(sscreen->info.gfx_level, format, false);
/* The following code matches the hw behavior. */
if (desc->nr_channels == 1) {
@@ -426,11 +426,11 @@ bool vi_dcc_get_clear_info(struct si_context *sctx, struct si_texture *tex, unsi
assert(vi_dcc_enabled(tex, level));
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
/* 4x and 8x MSAA needs a sophisticated compute shader for
* the clear. GFX11 doesn't need that.
*/
- if (sctx->chip_class < GFX11 && tex->buffer.b.b.nr_storage_samples >= 4)
+ if (sctx->gfx_level < GFX11 && tex->buffer.b.b.nr_storage_samples >= 4)
return false;
unsigned num_layers = util_num_layers(&tex->buffer.b.b, level);
@@ -448,7 +448,7 @@ bool vi_dcc_get_clear_info(struct si_context *sctx, struct si_texture *tex, unsi
*/
return false;
}
- } else if (sctx->chip_class == GFX9) {
+ } else if (sctx->gfx_level == GFX9) {
/* TODO: Implement DCC fast clear for level 0 of mipmapped textures. Mipmapped
* DCC has to clear a rectangular area of DCC for level 0 (because the whole miptree
* is organized in a 2D plane).
@@ -493,16 +493,16 @@ bool vi_dcc_get_clear_info(struct si_context *sctx, struct si_texture *tex, unsi
*/
static void si_set_optimal_micro_tile_mode(struct si_screen *sscreen, struct si_texture *tex)
{
- if (sscreen->info.chip_class >= GFX10 || tex->buffer.b.is_shared ||
+ if (sscreen->info.gfx_level >= GFX10 || tex->buffer.b.is_shared ||
tex->buffer.b.b.nr_samples <= 1 ||
tex->surface.micro_tile_mode == tex->last_msaa_resolve_target_micro_mode)
return;
- assert(sscreen->info.chip_class >= GFX9 ||
+ assert(sscreen->info.gfx_level >= GFX9 ||
tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_2D);
assert(tex->buffer.b.b.last_level == 0);
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
/* 4K or larger tiles only. 0 is linear. 1-3 are 256B tiles. */
assert(tex->surface.u.gfx9.swizzle_mode >= 4);
@@ -533,7 +533,7 @@ static void si_set_optimal_micro_tile_mode(struct si_screen *sscreen, struct si_
assert(!"unexpected micro mode");
return;
}
- } else if (sscreen->info.chip_class >= GFX7) {
+ } else if (sscreen->info.gfx_level >= GFX7) {
/* These magic numbers were copied from addrlib. It doesn't use
* any definitions for them either. They are all 2D_TILED_THIN1
* modes with different bpp and micro tile mode.
@@ -713,7 +713,7 @@ static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
continue;
}
- if (sctx->chip_class <= GFX8 && tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
+ if (sctx->gfx_level <= GFX8 && tex->surface.u.legacy.level[0].mode == RADEON_SURF_MODE_1D &&
!sctx->screen->info.htile_cmask_support_1d_tiling)
continue;
@@ -735,7 +735,7 @@ static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
if (sctx->screen->debug_flags & DBG(NO_DCC_CLEAR))
continue;
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
if (!gfx11_get_dcc_clear_parameters(sctx->screen, fb->cbufs[i]->format, color,
&reset_value))
continue;
@@ -783,7 +783,7 @@ static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
/* DCC fast clear with MSAA should clear CMASK to 0xC. */
if (tex->buffer.b.b.nr_samples >= 2 && tex->cmask_buffer) {
- assert(sctx->chip_class < GFX11); /* no FMASK/CMASK on GFX11 */
+ assert(sctx->gfx_level < GFX11); /* no FMASK/CMASK on GFX11 */
assert(num_clears < ARRAY_SIZE(info));
si_init_buffer_clear(&info[num_clears++], &tex->cmask_buffer->b.b,
tex->surface.cmask_offset, tex->surface.cmask_size, 0xCCCCCCCC);
@@ -792,7 +792,7 @@ static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
}
} else {
/* No CMASK on GFX11. */
- if (sctx->chip_class >= GFX11)
+ if (sctx->gfx_level >= GFX11)
continue;
if (level > 0)
@@ -824,7 +824,7 @@ static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
uint64_t cmask_offset = 0;
unsigned clear_size = 0;
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
assert(level == 0);
/* Clearing CMASK with both multiple levels and multiple layers is not
@@ -847,7 +847,7 @@ static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
} else {
assert(0); /* this is prevented above */
}
- } else if (sctx->chip_class == GFX9) {
+ } else if (sctx->gfx_level == GFX9) {
/* TODO: Implement CMASK fast clear for level 0 of mipmapped textures. Mipmapped
* CMASK has to clear a rectangular area of CMASK for level 0 (because the whole
* miptree is organized in a 2D plane).
@@ -879,7 +879,7 @@ static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
if ((eliminate_needed || fmask_decompress_needed) &&
!(tex->dirty_level_mask & (1 << level))) {
- assert(sctx->chip_class < GFX11); /* no decompression needed on GFX11 */
+ assert(sctx->gfx_level < GFX11); /* no decompression needed on GFX11 */
tex->dirty_level_mask |= 1 << level;
si_set_sampler_depth_decompress_mask(sctx, tex);
p_atomic_inc(&sctx->screen->compressed_colortex_counter);
@@ -894,7 +894,7 @@ static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
continue;
/* There are no clear color registers on GFX11. */
- assert(sctx->chip_class < GFX11);
+ assert(sctx->gfx_level < GFX11);
if (si_set_clear_color(tex, fb->cbufs[i]->format, color)) {
sctx->framebuffer.dirty_cbufs |= 1 << i;
@@ -973,7 +973,7 @@ static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
clear_value = !zstex->htile_stencil_disabled ? 0xfffff30f : 0xfffc000f;
}
- zstex->need_flush_after_depth_decompression = sctx->chip_class == GFX10_3;
+ zstex->need_flush_after_depth_decompression = sctx->gfx_level == GFX10_3;
assert(num_clears < ARRAY_SIZE(info));
si_init_buffer_clear(&info[num_clears++], &zstex->buffer.b.b,
@@ -992,7 +992,7 @@ static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
unsigned htile_size = 0;
/* Determine the HTILE subset to clear. */
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
/* This can only clear a layered texture with 1 level or a mipmap texture
* with 1 layer. Other cases are unimplemented.
*/
@@ -1080,7 +1080,7 @@ static void si_fast_clear(struct si_context *sctx, unsigned *buffers,
}
}
- zstex->need_flush_after_depth_decompression = update_db_depth_clear && sctx->chip_class == GFX10_3;
+ zstex->need_flush_after_depth_decompression = update_db_depth_clear && sctx->gfx_level == GFX10_3;
/* Update DB_DEPTH_CLEAR. */
if (update_db_depth_clear &&
@@ -1273,7 +1273,7 @@ static void si_clear_render_target(struct pipe_context *ctx, struct pipe_surface
return;
if (dst->texture->nr_samples <= 1 &&
- (sctx->chip_class >= GFX10 || !vi_dcc_enabled(sdst, dst->u.tex.level))) {
+ (sctx->gfx_level >= GFX10 || !vi_dcc_enabled(sdst, dst->u.tex.level))) {
si_compute_clear_render_target(ctx, dst, color, dstx, dsty, width, height,
render_condition_enabled);
return;
diff --git a/src/gallium/drivers/radeonsi/si_compute.c b/src/gallium/drivers/radeonsi/si_compute.c
index c19e0d4746d..7c17341dd76 100644
--- a/src/gallium/drivers/radeonsi/si_compute.c
+++ b/src/gallium/drivers/radeonsi/si_compute.c
@@ -155,7 +155,7 @@ static void si_create_compute_state_async(void *job, void *gdata, int thread_ind
/* Remove images with FMASK from the bitmask. We only care about the first
* 3 anyway, so we can take msaa_images[0] and ignore the rest.
*/
- if (sscreen->info.chip_class < GFX11)
+ if (sscreen->info.gfx_level < GFX11)
non_fmask_images &= ~sel->info.base.msaa_images[0];
for (unsigned i = 0; i < 3 && non_fmask_images & (1 << i); i++) {
@@ -200,10 +200,10 @@ static void si_create_compute_state_async(void *job, void *gdata, int thread_ind
sscreen->info.wave64_vgpr_alloc_granularity == 8) ? 8 : 4)) |
S_00B848_DX10_CLAMP(1) |
S_00B848_MEM_ORDERED(si_shader_mem_ordered(shader)) |
- S_00B848_WGP_MODE(sscreen->info.chip_class >= GFX10) |
+ S_00B848_WGP_MODE(sscreen->info.gfx_level >= GFX10) |
S_00B848_FLOAT_MODE(shader->config.float_mode);
- if (sscreen->info.chip_class < GFX10) {
+ if (sscreen->info.gfx_level < GFX10) {
shader->config.rsrc1 |= S_00B848_SGPRS((shader->config.num_sgprs - 1) / 8);
}
@@ -385,7 +385,7 @@ void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf
radeon_emit(S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
radeon_emit(S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
- if (sctx->chip_class == GFX6) {
+ if (sctx->gfx_level == GFX6) {
/* This register has been moved to R_00CD20_COMPUTE_MAX_WAVE_ID
* and is now per pipe, so it should be handled in the
* kernel if we want to use something other than the default value.
@@ -402,7 +402,7 @@ void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf
}
}
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
/* Also set R_00B858_COMPUTE_STATIC_THREAD_MGMT_SE2 / SE3 */
radeon_set_sh_reg_seq(R_00B864_COMPUTE_STATIC_THREAD_MGMT_SE2, 2);
radeon_emit(S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
@@ -428,10 +428,10 @@ void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf
/* cs_preamble_state initializes this for the gfx queue, so only do this
* if we are on a compute queue.
*/
- if (sctx->chip_class >= GFX9 && sctx->chip_class < GFX11 &&
+ if (sctx->gfx_level >= GFX9 && sctx->gfx_level < GFX11 &&
(cs != &sctx->gfx_cs || !sctx->screen->info.has_graphics)) {
radeon_set_uconfig_reg(R_0301EC_CP_COHER_START_DELAY,
- sctx->chip_class >= GFX10 ? 0x20 : 0);
+ sctx->gfx_level >= GFX10 ? 0x20 : 0);
}
if (!info->has_graphics && info->family >= CHIP_ARCTURUS) {
@@ -442,7 +442,7 @@ void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf
radeon_emit(S_00B858_SH0_CU_EN(info->spi_cu_en) | S_00B858_SH1_CU_EN(info->spi_cu_en));
}
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
radeon_set_sh_reg_seq(R_00B890_COMPUTE_USER_ACCUM_0, 4);
radeon_emit(0); /* R_00B890_COMPUTE_USER_ACCUM_0 */
radeon_emit(0); /* R_00B894_COMPUTE_USER_ACCUM_1 */
@@ -451,11 +451,11 @@ void si_emit_initial_compute_regs(struct si_context *sctx, struct radeon_cmdbuf
radeon_set_sh_reg(R_00B9F4_COMPUTE_DISPATCH_TUNNEL, 0);
- if (sctx->chip_class < GFX11)
+ if (sctx->gfx_level < GFX11)
radeon_set_sh_reg(R_00B8A0_COMPUTE_PGM_RSRC3, 0);
}
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
radeon_set_sh_reg_seq(R_00B8AC_COMPUTE_STATIC_THREAD_MGMT_SE4, 4);
radeon_emit(S_00B8AC_SA0_CU_EN(info->spi_cu_en) | S_00B8AC_SA1_CU_EN(info->spi_cu_en)); /* SE4 */
radeon_emit(S_00B8AC_SA0_CU_EN(info->spi_cu_en) | S_00B8AC_SA1_CU_EN(info->spi_cu_en)); /* SE5 */
@@ -490,7 +490,7 @@ static bool si_setup_compute_scratch_buffer(struct si_context *sctx, struct si_s
}
if (sctx->compute_scratch_buffer != shader->scratch_bo && scratch_needed) {
- if (sctx->chip_class < GFX11) {
+ if (sctx->gfx_level < GFX11) {
uint64_t scratch_va = sctx->compute_scratch_buffer->gpu_address;
if (!si_shader_binary_upload(sctx->screen, shader, scratch_va))
@@ -530,7 +530,7 @@ static bool si_switch_compute_shader(struct si_context *sctx, struct si_compute
* allocated in the shader and 4 bytes allocated by the state
* tracker, then we will set LDS_SIZE to 512 bytes rather than 256.
*/
- if (sctx->chip_class <= GFX6) {
+ if (sctx->gfx_level <= GFX6) {
lds_blocks += align(program->sel.info.base.shared_size, 256) >> 8;
} else {
lds_blocks += align(program->sel.info.base.shared_size, 512) >> 9;
@@ -569,12 +569,12 @@ static bool si_switch_compute_shader(struct si_context *sctx, struct si_compute
radeon_begin(cs);
radeon_set_sh_reg(R_00B830_COMPUTE_PGM_LO, shader_va >> 8);
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
radeon_set_sh_reg(R_00B8A0_COMPUTE_PGM_RSRC3,
S_00B8A0_INST_PREF_SIZE(si_calc_inst_pref_size(shader)));
}
- if (sctx->chip_class >= GFX11 && shader->scratch_bo) {
+ if (sctx->gfx_level >= GFX11 && shader->scratch_bo) {
radeon_set_sh_reg_seq(R_00B840_COMPUTE_DISPATCH_SCRATCH_BASE_LO, 4);
radeon_emit(sctx->compute_scratch_buffer->gpu_address >> 8);
radeon_emit(sctx->compute_scratch_buffer->gpu_address >> 40);
@@ -612,7 +612,7 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
uint32_t scratch_dword0 = scratch_va & 0xffffffff;
uint32_t scratch_dword1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32);
- if (sctx->chip_class >= GFX11)
+ if (sctx->gfx_level >= GFX11)
scratch_dword1 |= S_008F04_SWIZZLE_ENABLE_GFX11(1);
else
scratch_dword1 |= S_008F04_SWIZZLE_ENABLE_GFX6(1);
@@ -621,12 +621,12 @@ static void setup_scratch_rsrc_user_sgprs(struct si_context *sctx,
uint32_t scratch_dword2 = 0xffffffff;
uint32_t scratch_dword3 = S_008F0C_INDEX_STRIDE(3) | S_008F0C_ADD_TID_ENABLE(1);
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
assert(max_private_element_size == 1); /* only 4 bytes on GFX9 */
} else {
scratch_dword3 |= S_008F0C_ELEMENT_SIZE(max_private_element_size);
- if (sctx->chip_class < GFX8) {
+ if (sctx->gfx_level < GFX8) {
/* BUF_DATA_FORMAT is ignored, but it cannot be
* BUF_DATA_FORMAT_INVALID. */
scratch_dword3 |= S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_8);
@@ -811,7 +811,7 @@ static void si_emit_dispatch_packets(struct si_context *sctx, const struct pipe_
DIV_ROUND_UP(threads_per_threadgroup, sctx->cs_shader_state.program->shader.wave_size);
unsigned threadgroups_per_cu = 1;
- if (sctx->chip_class >= GFX10 && waves_per_threadgroup == 1)
+ if (sctx->gfx_level >= GFX10 && waves_per_threadgroup == 1)
threadgroups_per_cu = 2;
if (unlikely(sctx->thread_trace_enabled)) {
@@ -829,7 +829,7 @@ static void si_emit_dispatch_packets(struct si_context *sctx, const struct pipe_
unsigned dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1) | S_00B800_FORCE_START_AT_000(1) |
/* If the KMD allows it (there is a KMD hw register for it),
* allow launching waves out-of-order. (same as Vulkan) */
- S_00B800_ORDER_MODE(sctx->chip_class >= GFX7) |
+ S_00B800_ORDER_MODE(sctx->gfx_level >= GFX7) |
S_00B800_CS_W32_EN(sctx->cs_shader_state.program->shader.wave_size == 32);
const uint *last_block = info->last_block;
@@ -881,7 +881,7 @@ static void si_emit_dispatch_packets(struct si_context *sctx, const struct pipe_
radeon_emit(dispatch_initiator);
}
- if (unlikely(sctx->thread_trace_enabled && sctx->chip_class >= GFX9)) {
+ if (unlikely(sctx->thread_trace_enabled && sctx->gfx_level >= GFX9)) {
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(EVENT_TYPE(V_028A90_THREAD_TRACE_MARKER) | EVENT_INDEX(0));
}
@@ -969,7 +969,7 @@ static void si_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info
si_context_add_resource_size(sctx, info->indirect);
/* Indirect buffers use TC L2 on GFX9, but not older hw. */
- if (sctx->chip_class <= GFX8 && si_resource(info->indirect)->TC_L2_dirty) {
+ if (sctx->gfx_level <= GFX8 && si_resource(info->indirect)->TC_L2_dirty) {
sctx->flags |= SI_CONTEXT_WB_L2;
si_resource(info->indirect)->TC_L2_dirty = false;
}
@@ -1029,7 +1029,7 @@ static void si_launch_grid(struct pipe_context *ctx, const struct pipe_grid_info
}
/* Prefetch the compute shader to L2. */
- if (sctx->chip_class >= GFX7 && prefetch)
+ if (sctx->gfx_level >= GFX7 && prefetch)
si_cp_dma_prefetch(sctx, &program->shader.bo->b.b, 0, program->shader.bo->b.b.width0);
if (program->ir_type != PIPE_SHADER_IR_NATIVE)
diff --git a/src/gallium/drivers/radeonsi/si_compute_blit.c b/src/gallium/drivers/radeonsi/si_compute_blit.c
index 9387dc458c2..9a800adb3b5 100644
--- a/src/gallium/drivers/radeonsi/si_compute_blit.c
+++ b/src/gallium/drivers/radeonsi/si_compute_blit.c
@@ -32,10 +32,10 @@
static enum si_cache_policy get_cache_policy(struct si_context *sctx, enum si_coherency coher,
uint64_t size)
{
- if ((sctx->chip_class >= GFX9 && (coher == SI_COHERENCY_CB_META ||
+ if ((sctx->gfx_level >= GFX9 && (coher == SI_COHERENCY_CB_META ||
coher == SI_COHERENCY_DB_META ||
coher == SI_COHERENCY_CP)) ||
- (sctx->chip_class >= GFX7 && coher == SI_COHERENCY_SHADER))
+ (sctx->gfx_level >= GFX7 && coher == SI_COHERENCY_SHADER))
return L2_LRU; /* it's faster if L2 doesn't evict anything */
return L2_BYPASS;
@@ -152,7 +152,7 @@ void si_launch_grid_internal(struct si_context *sctx, struct pipe_grid_info *inf
if (flags & SI_OP_CS_IMAGE) {
/* Make sure image stores are visible to CB, which doesn't use L2 on GFX6-8. */
- sctx->flags |= sctx->chip_class <= GFX8 ? SI_CONTEXT_WB_L2 : 0;
+ sctx->flags |= sctx->gfx_level <= GFX8 ? SI_CONTEXT_WB_L2 : 0;
/* Make sure image stores are visible to all CUs. */
sctx->flags |= SI_CONTEXT_INV_VCACHE;
} else {
@@ -386,7 +386,7 @@ void si_clear_buffer(struct si_context *sctx, struct pipe_resource *dst,
if (aligned_size >= 4) {
uint64_t compute_min_size;
- if (sctx->chip_class <= GFX8) {
+ if (sctx->gfx_level <= GFX8) {
/* CP DMA clears are terribly slow with GTT on GFX6-8, which can always
* happen due to BO evictions.
*/
@@ -604,7 +604,7 @@ void si_compute_copy_image(struct si_context *sctx, struct pipe_resource *dst, u
/* src and dst have the same number of samples. */
si_make_CB_shader_coherent(sctx, src->nr_samples, true,
ssrc->surface.u.gfx9.color.dcc.pipe_aligned);
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
/* GFX10+ uses DCC stores so si_make_CB_shader_coherent is required for dst too */
si_make_CB_shader_coherent(sctx, dst->nr_samples, true,
sdst->surface.u.gfx9.color.dcc.pipe_aligned);
@@ -631,7 +631,7 @@ void si_compute_copy_image(struct si_context *sctx, struct pipe_resource *dst, u
if (is_dcc_decompress)
image[1].access |= SI_IMAGE_ACCESS_DCC_OFF;
- else if (sctx->chip_class >= GFX10)
+ else if (sctx->gfx_level >= GFX10)
image[1].access |= SI_IMAGE_ACCESS_ALLOW_DCC_STORE;
ctx->set_shader_images(ctx, PIPE_SHADER_COMPUTE, 0, 2, 0, image);
@@ -759,7 +759,7 @@ void gfx9_clear_dcc_msaa(struct si_context *sctx, struct pipe_resource *res, uin
{
struct si_texture *tex = (struct si_texture*)res;
- assert(sctx->chip_class < GFX11);
+ assert(sctx->gfx_level < GFX11);
/* Set the DCC buffer. */
assert(tex->surface.meta_offset && tex->surface.meta_offset <= UINT_MAX);
@@ -813,7 +813,7 @@ void si_compute_expand_fmask(struct pipe_context *ctx, struct pipe_resource *tex
unsigned log_samples = util_logbase2(tex->nr_samples);
assert(tex->nr_samples >= 2);
- assert(sctx->chip_class < GFX11);
+ assert(sctx->gfx_level < GFX11);
/* EQAA FMASK expansion is unimplemented. */
if (tex->nr_samples != tex->nr_storage_samples)
diff --git a/src/gallium/drivers/radeonsi/si_cp_dma.c b/src/gallium/drivers/radeonsi/si_cp_dma.c
index 88a495c3f0f..04f5a663397 100644
--- a/src/gallium/drivers/radeonsi/si_cp_dma.c
+++ b/src/gallium/drivers/radeonsi/si_cp_dma.c
@@ -43,8 +43,8 @@
static inline unsigned cp_dma_max_byte_count(struct si_context *sctx)
{
unsigned max =
- sctx->chip_class >= GFX11 ? 32767 :
- sctx->chip_class >= GFX9 ? S_415_BYTE_COUNT_GFX9(~0u) : S_415_BYTE_COUNT_GFX6(~0u);
+ sctx->gfx_level >= GFX11 ? 32767 :
+ sctx->gfx_level >= GFX9 ? S_415_BYTE_COUNT_GFX9(~0u) : S_415_BYTE_COUNT_GFX6(~0u);
/* make it aligned for optimal performance */
return max & ~(SI_CPDMA_ALIGNMENT - 1);
@@ -61,9 +61,9 @@ static void si_emit_cp_dma(struct si_context *sctx, struct radeon_cmdbuf *cs, ui
uint32_t header = 0, command = 0;
assert(size <= cp_dma_max_byte_count(sctx));
- assert(sctx->chip_class != GFX6 || cache_policy == L2_BYPASS);
+ assert(sctx->gfx_level != GFX6 || cache_policy == L2_BYPASS);
- if (sctx->chip_class >= GFX9)
+ if (sctx->gfx_level >= GFX9)
command |= S_415_BYTE_COUNT_GFX9(size);
else
command |= S_415_BYTE_COUNT_GFX6(size);
@@ -76,13 +76,13 @@ static void si_emit_cp_dma(struct si_context *sctx, struct radeon_cmdbuf *cs, ui
command |= S_415_RAW_WAIT(1);
/* Src and dst flags. */
- if (sctx->chip_class >= GFX9 && !(flags & CP_DMA_CLEAR) && src_va == dst_va) {
+ if (sctx->gfx_level >= GFX9 && !(flags & CP_DMA_CLEAR) && src_va == dst_va) {
header |= S_411_DST_SEL(V_411_NOWHERE); /* prefetch only */
} else if (flags & CP_DMA_DST_IS_GDS) {
header |= S_411_DST_SEL(V_411_GDS);
/* GDS increments the address, not CP. */
command |= S_415_DAS(V_415_REGISTER) | S_415_DAIC(V_415_NO_INCREMENT);
- } else if (sctx->chip_class >= GFX7 && cache_policy != L2_BYPASS) {
+ } else if (sctx->gfx_level >= GFX7 && cache_policy != L2_BYPASS) {
header |=
S_411_DST_SEL(V_411_DST_ADDR_TC_L2) | S_500_DST_CACHE_POLICY(cache_policy == L2_STREAM);
}
@@ -93,14 +93,14 @@ static void si_emit_cp_dma(struct si_context *sctx, struct radeon_cmdbuf *cs, ui
header |= S_411_SRC_SEL(V_411_GDS);
/* Both of these are required for GDS. It does increment the address. */
command |= S_415_SAS(V_415_REGISTER) | S_415_SAIC(V_415_NO_INCREMENT);
- } else if (sctx->chip_class >= GFX7 && cache_policy != L2_BYPASS) {
+ } else if (sctx->gfx_level >= GFX7 && cache_policy != L2_BYPASS) {
header |=
S_411_SRC_SEL(V_411_SRC_ADDR_TC_L2) | S_500_SRC_CACHE_POLICY(cache_policy == L2_STREAM);
}
radeon_begin(cs);
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
radeon_emit(PKT3(PKT3_DMA_DATA, 5, 0));
radeon_emit(header);
radeon_emit(src_va); /* SRC_ADDR_LO [31:0] */
@@ -451,7 +451,7 @@ void si_cp_write_data(struct si_context *sctx, struct si_resource *buf, unsigned
assert(offset % 4 == 0);
assert(size % 4 == 0);
- if (sctx->chip_class == GFX6 && dst_sel == V_370_MEM)
+ if (sctx->gfx_level == GFX6 && dst_sel == V_370_MEM)
dst_sel = V_370_MEM_GRBM;
radeon_add_to_buffer_list(sctx, cs, buf, RADEON_USAGE_WRITE | RADEON_PRIO_CP_DMA);
diff --git a/src/gallium/drivers/radeonsi/si_cp_reg_shadowing.c b/src/gallium/drivers/radeonsi/si_cp_reg_shadowing.c
index ca6b41d9c0b..71b83d3223c 100644
--- a/src/gallium/drivers/radeonsi/si_cp_reg_shadowing.c
+++ b/src/gallium/drivers/radeonsi/si_cp_reg_shadowing.c
@@ -35,7 +35,7 @@ static void si_build_load_reg(struct si_screen *sscreen, struct si_pm4_state *pm
unsigned packet, num_ranges, offset;
const struct ac_reg_range *ranges;
- ac_get_reg_ranges(sscreen->info.chip_class, sscreen->info.family,
+ ac_get_reg_ranges(sscreen->info.gfx_level, sscreen->info.family,
type, &num_ranges, &ranges);
switch (type) {
@@ -90,7 +90,7 @@ si_create_shadowing_ib_preamble(struct si_context *sctx)
si_pm4_cmd_add(pm4, PKT3(PKT3_EVENT_WRITE, 0, 0));
si_pm4_cmd_add(pm4, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
unsigned gcr_cntl = S_586_GL2_INV(1) | S_586_GL2_WB(1) |
S_586_GLM_INV(1) | S_586_GLM_WB(1) |
S_586_GL1_INV(1) | S_586_GLV_INV(1) |
@@ -104,7 +104,7 @@ si_create_shadowing_ib_preamble(struct si_context *sctx)
si_pm4_cmd_add(pm4, 0); /* CP_COHER_BASE_HI */
si_pm4_cmd_add(pm4, 0x0000000A); /* POLL_INTERVAL */
si_pm4_cmd_add(pm4, gcr_cntl); /* GCR_CNTL */
- } else if (sctx->chip_class == GFX9) {
+ } else if (sctx->gfx_level == GFX9) {
unsigned cp_coher_cntl = S_0301F0_SH_ICACHE_ACTION_ENA(1) |
S_0301F0_SH_KCACHE_ACTION_ENA(1) |
S_0301F0_TC_ACTION_ENA(1) |
diff --git a/src/gallium/drivers/radeonsi/si_debug.c b/src/gallium/drivers/radeonsi/si_debug.c
index 5e0e84b6a64..c53d2f589b9 100644
--- a/src/gallium/drivers/radeonsi/si_debug.c
+++ b/src/gallium/drivers/radeonsi/si_debug.c
@@ -297,7 +297,7 @@ static void si_dump_mmapped_reg(struct si_context *sctx, FILE *f, unsigned offse
uint32_t value;
if (ws->read_registers(ws, offset, 1, &value))
- ac_dump_reg(f, sctx->chip_class, offset, value, ~0);
+ ac_dump_reg(f, sctx->gfx_level, offset, value, ~0);
}
static void si_dump_debug_registers(struct si_context *sctx, FILE *f)
@@ -321,7 +321,7 @@ static void si_dump_debug_registers(struct si_context *sctx, FILE *f)
si_dump_mmapped_reg(sctx, f, R_00803C_GRBM_STATUS_SE3);
si_dump_mmapped_reg(sctx, f, R_00D034_SDMA0_STATUS_REG);
si_dump_mmapped_reg(sctx, f, R_00D834_SDMA1_STATUS_REG);
- if (sctx->chip_class <= GFX8) {
+ if (sctx->gfx_level <= GFX8) {
si_dump_mmapped_reg(sctx, f, R_000E50_SRBM_STATUS);
si_dump_mmapped_reg(sctx, f, R_000E4C_SRBM_STATUS2);
si_dump_mmapped_reg(sctx, f, R_000E54_SRBM_STATUS3);
@@ -355,7 +355,7 @@ static void si_log_chunk_type_cs_destroy(void *data)
static void si_parse_current_ib(FILE *f, struct radeon_cmdbuf *cs, unsigned begin, unsigned end,
int *last_trace_id, unsigned trace_id_count, const char *name,
- enum chip_class chip_class)
+ enum amd_gfx_level gfx_level)
{
unsigned orig_end = end;
@@ -368,7 +368,7 @@ static void si_parse_current_ib(FILE *f, struct radeon_cmdbuf *cs, unsigned begi
if (begin < chunk->cdw) {
ac_parse_ib_chunk(f, chunk->buf + begin, MIN2(end, chunk->cdw) - begin, last_trace_id,
- trace_id_count, chip_class, NULL, NULL);
+ trace_id_count, gfx_level, NULL, NULL);
}
if (end <= chunk->cdw)
@@ -384,7 +384,7 @@ static void si_parse_current_ib(FILE *f, struct radeon_cmdbuf *cs, unsigned begi
assert(end <= cs->current.cdw);
ac_parse_ib_chunk(f, cs->current.buf + begin, end - begin, last_trace_id, trace_id_count,
- chip_class, NULL, NULL);
+ gfx_level, NULL, NULL);
fprintf(f, "------------------- %s end (dw = %u) -------------------\n\n", name, orig_end);
}
@@ -392,7 +392,7 @@ static void si_parse_current_ib(FILE *f, struct radeon_cmdbuf *cs, unsigned begi
void si_print_current_ib(struct si_context *sctx, FILE *f)
{
si_parse_current_ib(f, &sctx->gfx_cs, 0, sctx->gfx_cs.prev_dw + sctx->gfx_cs.current.cdw,
- NULL, 0, "GFX", sctx->chip_class);
+ NULL, 0, "GFX", sctx->gfx_level);
}
static void si_log_chunk_type_cs_print(void *data, FILE *f)
@@ -415,19 +415,19 @@ static void si_log_chunk_type_cs_print(void *data, FILE *f)
if (chunk->gfx_begin == 0) {
if (ctx->cs_preamble_state)
ac_parse_ib(f, ctx->cs_preamble_state->pm4, ctx->cs_preamble_state->ndw, NULL, 0,
- "IB2: Init config", ctx->chip_class, NULL, NULL);
+ "IB2: Init config", ctx->gfx_level, NULL, NULL);
if (ctx->cs_preamble_gs_rings)
ac_parse_ib(f, ctx->cs_preamble_gs_rings->pm4, ctx->cs_preamble_gs_rings->ndw, NULL, 0,
- "IB2: Init GS rings", ctx->chip_class, NULL, NULL);
+ "IB2: Init GS rings", ctx->gfx_level, NULL, NULL);
}
if (scs->flushed) {
ac_parse_ib(f, scs->gfx.ib + chunk->gfx_begin, chunk->gfx_end - chunk->gfx_begin,
- &last_trace_id, map ? 1 : 0, "IB", ctx->chip_class, NULL, NULL);
+ &last_trace_id, map ? 1 : 0, "IB", ctx->gfx_level, NULL, NULL);
} else {
si_parse_current_ib(f, &ctx->gfx_cs, chunk->gfx_begin, chunk->gfx_end, &last_trace_id,
- map ? 1 : 0, "IB", ctx->chip_class);
+ map ? 1 : 0, "IB", ctx->gfx_level);
}
}
@@ -621,7 +621,7 @@ struct si_log_chunk_desc_list {
const char *shader_name;
const char *elem_name;
slot_remap_func slot_remap;
- enum chip_class chip_class;
+ enum amd_gfx_level gfx_level;
unsigned element_dw_size;
unsigned num_elements;
@@ -639,7 +639,7 @@ static void si_log_chunk_desc_list_print(void *data, FILE *f)
{
struct si_log_chunk_desc_list *chunk = data;
unsigned sq_img_rsrc_word0 =
- chunk->chip_class >= GFX10 ? R_00A000_SQ_IMG_RSRC_WORD0 : R_008F10_SQ_IMG_RSRC_WORD0;
+ chunk->gfx_level >= GFX10 ? R_00A000_SQ_IMG_RSRC_WORD0 : R_008F10_SQ_IMG_RSRC_WORD0;
for (unsigned i = 0; i < chunk->num_elements; i++) {
unsigned cpu_dw_offset = i * chunk->element_dw_size;
@@ -654,35 +654,35 @@ static void si_log_chunk_desc_list_print(void *data, FILE *f)
switch (chunk->element_dw_size) {
case 4:
for (unsigned j = 0; j < 4; j++)
- ac_dump_reg(f, chunk->chip_class, R_008F00_SQ_BUF_RSRC_WORD0 + j * 4, gpu_list[j],
+ ac_dump_reg(f, chunk->gfx_level, R_008F00_SQ_BUF_RSRC_WORD0 + j * 4, gpu_list[j],
0xffffffff);
break;
case 8:
for (unsigned j = 0; j < 8; j++)
- ac_dump_reg(f, chunk->chip_class, sq_img_rsrc_word0 + j * 4, gpu_list[j], 0xffffffff);
+ ac_dump_reg(f, chunk->gfx_level, sq_img_rsrc_word0 + j * 4, gpu_list[j], 0xffffffff);
fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
for (unsigned j = 0; j < 4; j++)
- ac_dump_reg(f, chunk->chip_class, R_008F00_SQ_BUF_RSRC_WORD0 + j * 4, gpu_list[4 + j],
+ ac_dump_reg(f, chunk->gfx_level, R_008F00_SQ_BUF_RSRC_WORD0 + j * 4, gpu_list[4 + j],
0xffffffff);
break;
case 16:
for (unsigned j = 0; j < 8; j++)
- ac_dump_reg(f, chunk->chip_class, sq_img_rsrc_word0 + j * 4, gpu_list[j], 0xffffffff);
+ ac_dump_reg(f, chunk->gfx_level, sq_img_rsrc_word0 + j * 4, gpu_list[j], 0xffffffff);
fprintf(f, COLOR_CYAN " Buffer:" COLOR_RESET "\n");
for (unsigned j = 0; j < 4; j++)
- ac_dump_reg(f, chunk->chip_class, R_008F00_SQ_BUF_RSRC_WORD0 + j * 4, gpu_list[4 + j],
+ ac_dump_reg(f, chunk->gfx_level, R_008F00_SQ_BUF_RSRC_WORD0 + j * 4, gpu_list[4 + j],
0xffffffff);
fprintf(f, COLOR_CYAN " FMASK:" COLOR_RESET "\n");
for (unsigned j = 0; j < 8; j++)
- ac_dump_reg(f, chunk->chip_class, sq_img_rsrc_word0 + j * 4, gpu_list[8 + j],
+ ac_dump_reg(f, chunk->gfx_level, sq_img_rsrc_word0 + j * 4, gpu_list[8 + j],
0xffffffff);
fprintf(f, COLOR_CYAN " Sampler state:" COLOR_RESET "\n");
for (unsigned j = 0; j < 4; j++)
- ac_dump_reg(f, chunk->chip_class, R_008F30_SQ_IMG_SAMP_WORD0 + j * 4, gpu_list[12 + j],
+ ac_dump_reg(f, chunk->gfx_level, R_008F30_SQ_IMG_SAMP_WORD0 + j * 4, gpu_list[12 + j],
0xffffffff);
break;
}
@@ -732,7 +732,7 @@ static void si_dump_descriptor_list(struct si_screen *screen, struct si_descript
chunk->element_dw_size = element_dw_size;
chunk->num_elements = num_elements;
chunk->slot_remap = slot_remap;
- chunk->chip_class = screen->info.chip_class;
+ chunk->gfx_level = screen->info.gfx_level;
si_resource_reference(&chunk->buf, desc->buffer);
chunk->gpu_list = desc->gpu_list;
@@ -976,7 +976,7 @@ static void si_print_annotated_shader(struct si_shader *shader, struct ac_wave_i
static void si_dump_annotated_shaders(struct si_context *sctx, FILE *f)
{
struct ac_wave_info waves[AC_MAX_WAVES_PER_CHIP];
- unsigned num_waves = ac_get_wave_info(sctx->chip_class, waves);
+ unsigned num_waves = ac_get_wave_info(sctx->gfx_level, waves);
fprintf(f, COLOR_CYAN "The number of active waves = %u" COLOR_RESET "\n\n", num_waves);
@@ -1083,7 +1083,7 @@ void si_check_vm_faults(struct si_context *sctx, struct radeon_saved_cs *saved,
uint64_t addr;
char cmd_line[4096];
- if (!ac_vm_fault_occured(sctx->chip_class, &sctx->dmesg_timestamp, &addr))
+ if (!ac_vm_fault_occured(sctx->gfx_level, &sctx->dmesg_timestamp, &addr))
return;
f = dd_get_debug_file(false);
@@ -1133,5 +1133,5 @@ void si_init_debug_functions(struct si_context *sctx)
* only new messages will be checked for VM faults.
*/
if (sctx->screen->debug_flags & DBG(CHECK_VM))
- ac_vm_fault_occured(sctx->chip_class, &sctx->dmesg_timestamp, NULL);
+ ac_vm_fault_occured(sctx->gfx_level, &sctx->dmesg_timestamp, NULL);
}
diff --git a/src/gallium/drivers/radeonsi/si_descriptors.c b/src/gallium/drivers/radeonsi/si_descriptors.c
index 1985fa37743..698ac5f4691 100644
--- a/src/gallium/drivers/radeonsi/si_descriptors.c
+++ b/src/gallium/drivers/radeonsi/si_descriptors.c
@@ -299,7 +299,7 @@ void si_set_mutable_tex_desc_fields(struct si_screen *sscreen, struct si_texture
va = tex->buffer.gpu_address;
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
/* Only stencil_offset needs to be added here. */
if (is_stencil)
va += tex->surface.u.gfx9.zs.stencil_offset;
@@ -315,14 +315,14 @@ void si_set_mutable_tex_desc_fields(struct si_screen *sscreen, struct si_texture
/* Only macrotiled modes can set tile swizzle.
* GFX9 doesn't use (legacy) base_level_info.
*/
- if (sscreen->info.chip_class >= GFX9 || base_level_info->mode == RADEON_SURF_MODE_2D)
+ if (sscreen->info.gfx_level >= GFX9 || base_level_info->mode == RADEON_SURF_MODE_2D)
state[0] |= tex->surface.tile_swizzle;
- if (sscreen->info.chip_class >= GFX8) {
+ if (sscreen->info.gfx_level >= GFX8) {
if (!(access & SI_IMAGE_ACCESS_DCC_OFF) && vi_dcc_enabled(tex, first_level)) {
meta_va = tex->buffer.gpu_address + tex->surface.meta_offset;
- if (sscreen->info.chip_class == GFX8) {
+ if (sscreen->info.gfx_level == GFX8) {
meta_va += tex->surface.u.legacy.color.dcc_level[base_level].dcc_offset;
assert(base_level_info->mode == RADEON_SURF_MODE_2D);
}
@@ -339,10 +339,10 @@ void si_set_mutable_tex_desc_fields(struct si_screen *sscreen, struct si_texture
state[6] |= S_008F28_COMPRESSION_EN(1);
}
- if (sscreen->info.chip_class >= GFX8 && sscreen->info.chip_class <= GFX9)
+ if (sscreen->info.gfx_level >= GFX8 && sscreen->info.gfx_level <= GFX9)
state[7] = meta_va >> 8;
- if (sscreen->info.chip_class >= GFX10) {
+ if (sscreen->info.gfx_level >= GFX10) {
if (is_stencil) {
state[3] |= S_00A00C_SW_MODE(tex->surface.u.gfx9.zs.stencil_swizzle_mode);
} else {
@@ -369,7 +369,7 @@ void si_set_mutable_tex_desc_fields(struct si_screen *sscreen, struct si_texture
* The same limitations apply to SDMA compressed stores because
* SDMA uses the same DCC codec.
*/
- S_00A018_WRITE_COMPRESS_ENABLE(ac_surface_supports_dcc_image_stores(sscreen->info.chip_class, &tex->surface) &&
+ S_00A018_WRITE_COMPRESS_ENABLE(ac_surface_supports_dcc_image_stores(sscreen->info.gfx_level, &tex->surface) &&
(access & SI_IMAGE_ACCESS_ALLOW_DCC_STORE));
/* TC-compatible MSAA HTILE requires ITERATE_256. */
@@ -378,7 +378,7 @@ void si_set_mutable_tex_desc_fields(struct si_screen *sscreen, struct si_texture
}
state[7] = meta_va >> 16;
- } else if (sscreen->info.chip_class == GFX9) {
+ } else if (sscreen->info.gfx_level == GFX9) {
if (is_stencil) {
state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.zs.stencil_swizzle_mode);
state[4] |= S_008F20_PITCH(tex->surface.u.gfx9.zs.stencil_epitch);
@@ -789,7 +789,7 @@ static void si_set_shader_image_desc(struct si_context *ctx, const struct pipe_i
unsigned depth = res->b.b.depth0;
unsigned hw_level = level;
- if (ctx->chip_class <= GFX8) {
+ if (ctx->gfx_level <= GFX8) {
/* Always force the base level to the selected level.
*
* This is required for 3D textures, where otherwise
@@ -803,7 +803,7 @@ static void si_set_shader_image_desc(struct si_context *ctx, const struct pipe_i
}
if (access & SI_IMAGE_ACCESS_BLOCK_FORMAT_AS_UINT) {
- if (ctx->chip_class >= GFX9) {
+ if (ctx->gfx_level >= GFX9) {
/* Since the aligned width and height are derived from the width and height
* by the hw, set them directly as the width and height, so that UINT formats
* get exactly the same layout as BCn formats.
@@ -1080,10 +1080,10 @@ static void si_init_buffer_resources(struct si_context *sctx,
desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_FLOAT) |
S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW);
- } else if (sctx->chip_class >= GFX10) {
+ } else if (sctx->gfx_level >= GFX10) {
desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
} else {
@@ -1213,7 +1213,7 @@ static void si_set_constant_buffer(struct si_context *sctx, struct si_buffer_res
/* GFX7 cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
* with a NULL buffer). We need to use a dummy buffer instead. */
- if (sctx->chip_class == GFX7 && (!input || (!input->buffer && !input->user_buffer)))
+ if (sctx->gfx_level == GFX7 && (!input || (!input->buffer && !input->user_buffer)))
input = &sctx->null_const_buf;
if (input && (input->buffer || input->user_buffer)) {
@@ -1529,7 +1529,7 @@ void si_set_ring_buffer(struct si_context *sctx, uint slot, struct pipe_resource
break;
}
- if (sctx->chip_class >= GFX8 && stride)
+ if (sctx->gfx_level >= GFX8 && stride)
num_records *= stride;
/* Set the descriptor. */
@@ -1541,10 +1541,10 @@ void si_set_ring_buffer(struct si_context *sctx, uint slot, struct pipe_resource
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
S_008F0C_INDEX_STRIDE(index_stride) | S_008F0C_ADD_TID_ENABLE(add_tid);
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
assert(!swizzle || element_size == 1 || element_size == 3); /* 4 or 16 bytes */
desc[1] |= S_008F04_SWIZZLE_ENABLE_GFX11(swizzle ? element_size : 0);
- } else if (sctx->chip_class >= GFX9) {
+ } else if (sctx->gfx_level >= GFX9) {
assert(!swizzle || element_size == 1); /* only 4 bytes on GFX9 */
desc[1] |= S_008F04_SWIZZLE_ENABLE_GFX6(swizzle);
} else {
@@ -1552,10 +1552,10 @@ void si_set_ring_buffer(struct si_context *sctx, uint slot, struct pipe_resource
desc[3] |= S_008F0C_ELEMENT_SIZE(element_size);
}
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_FLOAT) |
S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED);
- } else if (sctx->chip_class >= GFX10) {
+ } else if (sctx->gfx_level >= GFX10) {
desc[3] |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) | S_008F0C_RESOURCE_LEVEL(1);
} else {
@@ -2072,7 +2072,7 @@ void si_shader_pointers_mark_dirty(struct si_context *sctx)
sctx->compute_bindless_pointer_dirty = sctx->bindless_descriptors.buffer != NULL;
sctx->compute_shaderbuf_sgprs_dirty = true;
sctx->compute_image_sgprs_dirty = true;
- if (sctx->chip_class >= GFX11)
+ if (sctx->gfx_level >= GFX11)
sctx->gs_attribute_ring_pointer_dirty = true;
}
@@ -2105,14 +2105,14 @@ static void si_set_user_data_base(struct si_context *sctx, unsigned shader, uint
void si_shader_change_notify(struct si_context *sctx)
{
si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
- si_get_user_data_base(sctx->chip_class,
+ si_get_user_data_base(sctx->gfx_level,
sctx->shader.tes.cso ? TESS_ON : TESS_OFF,
sctx->shader.gs.cso ? GS_ON : GS_OFF,
sctx->ngg ? NGG_ON : NGG_OFF,
PIPE_SHADER_VERTEX));
si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
- si_get_user_data_base(sctx->chip_class,
+ si_get_user_data_base(sctx->gfx_level,
sctx->shader.tes.cso ? TESS_ON : TESS_OFF,
sctx->shader.gs.cso ? GS_ON : GS_OFF,
sctx->ngg ? NGG_ON : NGG_OFF,
@@ -2172,13 +2172,13 @@ static void si_emit_global_shader_pointers(struct si_context *sctx, struct si_de
{
radeon_begin(&sctx->gfx_cs);
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
radeon_emit_one_32bit_pointer(sctx, descs, R_00B030_SPI_SHADER_USER_DATA_PS_0);
radeon_emit_one_32bit_pointer(sctx, descs, R_00B230_SPI_SHADER_USER_DATA_GS_0);
radeon_emit_one_32bit_pointer(sctx, descs, R_00B430_SPI_SHADER_USER_DATA_HS_0);
radeon_end();
return;
- } else if (sctx->chip_class >= GFX10) {
+ } else if (sctx->gfx_level >= GFX10) {
radeon_emit_one_32bit_pointer(sctx, descs, R_00B030_SPI_SHADER_USER_DATA_PS_0);
/* HW VS stage only used in non-NGG mode. */
radeon_emit_one_32bit_pointer(sctx, descs, R_00B130_SPI_SHADER_USER_DATA_VS_0);
@@ -2186,7 +2186,7 @@ static void si_emit_global_shader_pointers(struct si_context *sctx, struct si_de
radeon_emit_one_32bit_pointer(sctx, descs, R_00B430_SPI_SHADER_USER_DATA_HS_0);
radeon_end();
return;
- } else if (sctx->chip_class == GFX9 && sctx->shadowed_regs) {
+ } else if (sctx->gfx_level == GFX9 && sctx->shadowed_regs) {
/* We can't use the COMMON registers with register shadowing. */
radeon_emit_one_32bit_pointer(sctx, descs, R_00B030_SPI_SHADER_USER_DATA_PS_0);
radeon_emit_one_32bit_pointer(sctx, descs, R_00B130_SPI_SHADER_USER_DATA_VS_0);
@@ -2194,7 +2194,7 @@ static void si_emit_global_shader_pointers(struct si_context *sctx, struct si_de
radeon_emit_one_32bit_pointer(sctx, descs, R_00B430_SPI_SHADER_USER_DATA_LS_0);
radeon_end();
return;
- } else if (sctx->chip_class == GFX9) {
+ } else if (sctx->gfx_level == GFX9) {
/* Broadcast it to all shader stages. */
radeon_emit_one_32bit_pointer(sctx, descs, R_00B530_SPI_SHADER_USER_DATA_COMMON_0);
radeon_end();
@@ -2231,7 +2231,7 @@ void si_emit_graphics_shader_pointers(struct si_context *sctx)
sh_base[PIPE_SHADER_GEOMETRY]);
if (sctx->gs_attribute_ring_pointer_dirty) {
- assert(sctx->chip_class >= GFX11);
+ assert(sctx->gfx_level >= GFX11);
radeon_set_sh_reg(R_00B230_SPI_SHADER_USER_DATA_GS_0 + GFX9_SGPR_ATTRIBUTE_RING_ADDR * 4,
sctx->screen->attribute_ring->gpu_address);
sctx->gs_attribute_ring_pointer_dirty = false;
@@ -2700,7 +2700,7 @@ void si_init_all_descriptors(struct si_context *sctx)
unsigned first_shader = sctx->has_graphics ? 0 : PIPE_SHADER_COMPUTE;
unsigned hs_sgpr0, gs_sgpr0;
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
hs_sgpr0 = R_00B420_SPI_SHADER_PGM_LO_HS;
gs_sgpr0 = R_00B220_SPI_SHADER_PGM_LO_GS;
} else {
@@ -2710,7 +2710,7 @@ void si_init_all_descriptors(struct si_context *sctx)
for (i = first_shader; i < SI_NUM_SHADERS; i++) {
bool is_2nd =
- sctx->chip_class >= GFX9 && (i == PIPE_SHADER_TESS_CTRL || i == PIPE_SHADER_GEOMETRY);
+ sctx->gfx_level >= GFX9 && (i == PIPE_SHADER_TESS_CTRL || i == PIPE_SHADER_GEOMETRY);
unsigned num_sampler_slots = SI_NUM_IMAGE_SLOTS / 2 + SI_NUM_SAMPLERS;
unsigned num_buffer_slots = SI_NUM_SHADER_BUFFERS + SI_NUM_CONST_BUFFERS;
int rel_dw_offset;
@@ -2720,7 +2720,7 @@ void si_init_all_descriptors(struct si_context *sctx)
if (i == PIPE_SHADER_TESS_CTRL) {
rel_dw_offset =
(hs_sgpr0 - R_00B430_SPI_SHADER_USER_DATA_LS_0) / 4;
- } else if (sctx->chip_class >= GFX10) { /* PIPE_SHADER_GEOMETRY */
+ } else if (sctx->gfx_level >= GFX10) { /* PIPE_SHADER_GEOMETRY */
rel_dw_offset =
(gs_sgpr0 - R_00B230_SPI_SHADER_USER_DATA_GS_0) / 4;
} else {
@@ -2740,7 +2740,7 @@ void si_init_all_descriptors(struct si_context *sctx)
if (i == PIPE_SHADER_TESS_CTRL) {
rel_dw_offset =
(hs_sgpr0 + 4 - R_00B430_SPI_SHADER_USER_DATA_LS_0) / 4;
- } else if (sctx->chip_class >= GFX10) { /* PIPE_SHADER_GEOMETRY */
+ } else if (sctx->gfx_level >= GFX10) { /* PIPE_SHADER_GEOMETRY */
rel_dw_offset =
(gs_sgpr0 + 4 - R_00B230_SPI_SHADER_USER_DATA_GS_0) / 4;
} else {
@@ -2800,13 +2800,13 @@ void si_init_all_descriptors(struct si_context *sctx)
/* Set default and immutable mappings. */
si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
- si_get_user_data_base(sctx->chip_class, TESS_OFF, GS_OFF,
+ si_get_user_data_base(sctx->gfx_level, TESS_OFF, GS_OFF,
sctx->ngg, PIPE_SHADER_VERTEX));
si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL,
- si_get_user_data_base(sctx->chip_class, TESS_OFF, GS_OFF,
+ si_get_user_data_base(sctx->gfx_level, TESS_OFF, GS_OFF,
NGG_OFF, PIPE_SHADER_TESS_CTRL));
si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY,
- si_get_user_data_base(sctx->chip_class, TESS_OFF, GS_OFF,
+ si_get_user_data_base(sctx->gfx_level, TESS_OFF, GS_OFF,
NGG_OFF, PIPE_SHADER_GEOMETRY));
si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
diff --git a/src/gallium/drivers/radeonsi/si_fence.c b/src/gallium/drivers/radeonsi/si_fence.c
index e6e4dca082c..87d0cdcd571 100644
--- a/src/gallium/drivers/radeonsi/si_fence.c
+++ b/src/gallium/drivers/radeonsi/si_fence.c
@@ -77,7 +77,7 @@ void si_cp_release_mem(struct si_context *ctx, struct radeon_cmdbuf *cs, unsigne
radeon_begin(cs);
- if (ctx->chip_class >= GFX9 || (compute_ib && ctx->chip_class >= GFX7)) {
+ if (ctx->gfx_level >= GFX9 || (compute_ib && ctx->gfx_level >= GFX7)) {
/* A ZPASS_DONE or PIXEL_STAT_DUMP_EVENT (of the DB occlusion
* counters) must immediately precede every timestamp event to
* prevent a GPU hang on GFX9.
@@ -85,7 +85,7 @@ void si_cp_release_mem(struct si_context *ctx, struct radeon_cmdbuf *cs, unsigne
* Occlusion queries don't need to do it here, because they
* always do ZPASS_DONE before the timestamp.
*/
- if (ctx->chip_class == GFX9 && !compute_ib && query_type != PIPE_QUERY_OCCLUSION_COUNTER &&
+ if (ctx->gfx_level == GFX9 && !compute_ib && query_type != PIPE_QUERY_OCCLUSION_COUNTER &&
query_type != PIPE_QUERY_OCCLUSION_PREDICATE &&
query_type != PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE) {
struct si_screen *sscreen = ctx->screen;
@@ -116,17 +116,17 @@ void si_cp_release_mem(struct si_context *ctx, struct radeon_cmdbuf *cs, unsigne
RADEON_USAGE_WRITE | RADEON_PRIO_QUERY);
}
- radeon_emit(PKT3(PKT3_RELEASE_MEM, ctx->chip_class >= GFX9 ? 6 : 5, 0));
+ radeon_emit(PKT3(PKT3_RELEASE_MEM, ctx->gfx_level >= GFX9 ? 6 : 5, 0));
radeon_emit(op);
radeon_emit(sel);
radeon_emit(va); /* address lo */
radeon_emit(va >> 32); /* address hi */
radeon_emit(new_fence); /* immediate data lo */
radeon_emit(0); /* immediate data hi */
- if (ctx->chip_class >= GFX9)
+ if (ctx->gfx_level >= GFX9)
radeon_emit(0); /* unused */
} else {
- if (ctx->chip_class == GFX7 || ctx->chip_class == GFX8) {
+ if (ctx->gfx_level == GFX7 || ctx->gfx_level == GFX8) {
struct si_resource *scratch = ctx->eop_bug_scratch;
uint64_t va = scratch->gpu_address;
@@ -164,7 +164,7 @@ unsigned si_cp_write_fence_dwords(struct si_screen *screen)
{
unsigned dwords = 6;
- if (screen->info.chip_class == GFX7 || screen->info.chip_class == GFX8)
+ if (screen->info.gfx_level == GFX7 || screen->info.gfx_level == GFX8)
dwords *= 2;
return dwords;
diff --git a/src/gallium/drivers/radeonsi/si_get.c b/src/gallium/drivers/radeonsi/si_get.c
index 07d0a2457d6..f183b64529a 100644
--- a/src/gallium/drivers/radeonsi/si_get.c
+++ b/src/gallium/drivers/radeonsi/si_get.c
@@ -49,7 +49,7 @@ static int si_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
struct si_screen *sscreen = (struct si_screen *)pscreen;
/* Gfx8 (Polaris11) hangs, so don't enable this on Gfx8 and older chips. */
- bool enable_sparse = sscreen->info.chip_class >= GFX9 &&
+ bool enable_sparse = sscreen->info.gfx_level >= GFX9 &&
sscreen->info.has_sparse_vm_mappings;
switch (param) {
@@ -174,7 +174,7 @@ static int si_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
return !(sscreen->debug_flags & DBG(NO_FAST_DISPLAY_LIST));
case PIPE_CAP_SHADER_SAMPLES_IDENTICAL:
- return sscreen->info.chip_class < GFX11;
+ return sscreen->info.gfx_level < GFX11;
case PIPE_CAP_GLSL_ZERO_INIT:
return 2;
@@ -189,7 +189,7 @@ static int si_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
return !sscreen->use_ngg_streamout;
case PIPE_CAP_POST_DEPTH_COVERAGE:
- return sscreen->info.chip_class >= GFX10;
+ return sscreen->info.gfx_level >= GFX10;
case PIPE_CAP_GRAPHICS:
return sscreen->info.has_graphics;
@@ -275,7 +275,7 @@ static int si_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
return 32;
case PIPE_CAP_TEXTURE_BORDER_COLOR_QUIRK:
- return sscreen->info.chip_class <= GFX8 ? PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600 : 0;
+ return sscreen->info.gfx_level <= GFX8 ? PIPE_QUIRK_TEXTURE_BORDER_COLOR_SWIZZLE_R600 : 0;
/* Stream output. */
case PIPE_CAP_MAX_STREAM_OUTPUT_SEPARATE_COMPONENTS:
@@ -307,12 +307,12 @@ static int si_get_param(struct pipe_screen *pscreen, enum pipe_cap param)
case PIPE_CAP_MAX_TEXTURE_3D_LEVELS:
if (!sscreen->info.has_3d_cube_border_color_mipmap)
return 0;
- if (sscreen->info.chip_class >= GFX10)
+ if (sscreen->info.gfx_level >= GFX10)
return 14;
/* textures support 8192, but layered rendering supports 2048 */
return 12;
case PIPE_CAP_MAX_TEXTURE_ARRAY_LAYERS:
- if (sscreen->info.chip_class >= GFX10)
+ if (sscreen->info.gfx_level >= GFX10)
return 8192;
/* textures support 8192, but layered rendering supports 2048 */
return 2048;
@@ -612,12 +612,12 @@ static int si_get_video_param(struct pipe_screen *screen, enum pipe_video_profil
switch (codec) {
case PIPE_VIDEO_FORMAT_MPEG12:
- if (sscreen->info.chip_class >= GFX11)
+ if (sscreen->info.gfx_level >= GFX11)
return false;
else
return profile != PIPE_VIDEO_PROFILE_MPEG1;
case PIPE_VIDEO_FORMAT_MPEG4:
- if (sscreen->info.chip_class >= GFX11)
+ if (sscreen->info.gfx_level >= GFX11)
return false;
else
return true;
@@ -629,7 +629,7 @@ static int si_get_video_param(struct pipe_screen *screen, enum pipe_video_profil
}
return true;
case PIPE_VIDEO_FORMAT_VC1:
- if (sscreen->info.chip_class >= GFX11)
+ if (sscreen->info.gfx_level >= GFX11)
return false;
else
return true;
@@ -1026,7 +1026,7 @@ void si_init_screen_get_functions(struct si_screen *sscreen)
/* fma32 is too slow for gpu < gfx9, so force it only when gpu >= gfx9 */
bool force_fma32 =
- sscreen->info.chip_class >= GFX9 && sscreen->options.force_use_fma32;
+ sscreen->info.gfx_level >= GFX9 && sscreen->options.force_use_fma32;
const struct nir_shader_compiler_options nir_options = {
.lower_scmp = true,
@@ -1055,11 +1055,11 @@ void si_init_screen_get_functions(struct si_screen *sscreen)
* gfx9 and newer prefer FMA for F16 because of the packed instruction.
* gfx10 and older prefer MAD for F32 because of the legacy instruction.
*/
- .lower_ffma16 = sscreen->info.chip_class < GFX9,
- .lower_ffma32 = sscreen->info.chip_class < GFX10_3 && !force_fma32,
+ .lower_ffma16 = sscreen->info.gfx_level < GFX9,
+ .lower_ffma32 = sscreen->info.gfx_level < GFX10_3 && !force_fma32,
.lower_ffma64 = false,
- .fuse_ffma16 = sscreen->info.chip_class >= GFX9,
- .fuse_ffma32 = sscreen->info.chip_class >= GFX10_3 || force_fma32,
+ .fuse_ffma16 = sscreen->info.gfx_level >= GFX9,
+ .fuse_ffma32 = sscreen->info.gfx_level >= GFX10_3 || force_fma32,
.fuse_ffma64 = true,
.lower_fmod = true,
.lower_pack_snorm_4x8 = true,
diff --git a/src/gallium/drivers/radeonsi/si_gfx_cs.c b/src/gallium/drivers/radeonsi/si_gfx_cs.c
index 233280130eb..5fb7eef195e 100644
--- a/src/gallium/drivers/radeonsi/si_gfx_cs.c
+++ b/src/gallium/drivers/radeonsi/si_gfx_cs.c
@@ -57,7 +57,7 @@ void si_flush_gfx_cs(struct si_context *ctx, unsigned flags, struct pipe_fence_h
if (!sscreen->info.kernel_flushes_tc_l2_after_ib) {
wait_flags |= wait_ps_cs | SI_CONTEXT_INV_L2;
- } else if (ctx->chip_class == GFX6) {
+ } else if (ctx->gfx_level == GFX6) {
/* The kernel flushes L2 before shaders are finished. */
wait_flags |= wait_ps_cs;
} else if (!(flags & RADEON_FLUSH_START_NEXT_GFX_IB_NOW) ||
@@ -112,13 +112,13 @@ void si_flush_gfx_cs(struct si_context *ctx, unsigned flags, struct pipe_fence_h
/* Make sure CP DMA is idle at the end of IBs after L2 prefetches
* because the kernel doesn't wait for it. */
- if (ctx->chip_class >= GFX7)
+ if (ctx->gfx_level >= GFX7)
si_cp_dma_wait_for_idle(ctx, &ctx->gfx_cs);
/* If we use s_sendmsg to set tess factors to all 0 or all 1 instead of writing to the tess
* factor buffer, we need this at the end of command buffers:
*/
- if (ctx->chip_class == GFX11 && ctx->tess_rings) {
+ if (ctx->gfx_level == GFX11 && ctx->tess_rings) {
radeon_begin(cs);
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(EVENT_TYPE(V_028A90_SQ_NON_EVENT) | EVENT_INDEX(0));
@@ -490,7 +490,7 @@ void si_begin_new_gfx_cs(struct si_context *ctx, bool first_cs)
if (!has_clear_state || ctx->blend_color_any_nonzeros)
si_mark_atom_dirty(ctx, &ctx->atoms.s.blend_color);
si_mark_atom_dirty(ctx, &ctx->atoms.s.db_render_state);
- if (ctx->chip_class >= GFX9)
+ if (ctx->gfx_level >= GFX9)
si_mark_atom_dirty(ctx, &ctx->atoms.s.dpbb_state);
si_mark_atom_dirty(ctx, &ctx->atoms.s.stencil_ref);
si_mark_atom_dirty(ctx, &ctx->atoms.s.spi_map);
@@ -574,15 +574,15 @@ void si_emit_surface_sync(struct si_context *sctx, struct radeon_cmdbuf *cs, uns
{
bool compute_ib = !sctx->has_graphics;
- assert(sctx->chip_class <= GFX9);
+ assert(sctx->gfx_level <= GFX9);
/* This seems problematic with GFX7 (see #4764) */
- if (sctx->chip_class != GFX7)
+ if (sctx->gfx_level != GFX7)
cp_coher_cntl |= 1u << 31; /* don't sync PFP, i.e. execute the sync in ME */
radeon_begin(cs);
- if (sctx->chip_class == GFX9 || compute_ib) {
+ if (sctx->gfx_level == GFX9 || compute_ib) {
/* Flush caches and wait for the caches to assert idle. */
radeon_emit(PKT3(PKT3_ACQUIRE_MEM, 5, 0));
radeon_emit(cp_coher_cntl); /* CP_COHER_CNTL */
@@ -695,7 +695,7 @@ void gfx10_emit_cache_flush(struct si_context *ctx, struct radeon_cmdbuf *cs)
}
/* Gfx11 can't flush DB_META and should use a TS event instead. */
- if (ctx->chip_class != GFX11 && flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
+ if (ctx->gfx_level != GFX11 && flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
/* Flush HTILE. Will wait for idle later. */
radeon_emit(PKT3(PKT3_EVENT_WRITE, 0, 0));
radeon_emit(EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
@@ -710,7 +710,7 @@ void gfx10_emit_cache_flush(struct si_context *ctx, struct radeon_cmdbuf *cs)
} else if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
} else if (flags & SI_CONTEXT_FLUSH_AND_INV_DB) {
- if (ctx->chip_class == GFX11)
+ if (ctx->gfx_level == GFX11)
cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
else
cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
@@ -843,7 +843,7 @@ void si_emit_cache_flush(struct si_context *sctx, struct radeon_cmdbuf *cs)
uint32_t cp_coher_cntl = 0;
const uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_FLUSH_AND_INV_DB);
- assert(sctx->chip_class <= GFX9);
+ assert(sctx->gfx_level <= GFX9);
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
sctx->num_cb_cache_flushes++;
@@ -863,7 +863,7 @@ void si_emit_cache_flush(struct si_context *sctx, struct radeon_cmdbuf *cs)
if (flags & SI_CONTEXT_INV_SCACHE)
cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
- if (sctx->chip_class <= GFX8) {
+ if (sctx->gfx_level <= GFX8) {
if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) | S_0085F0_CB0_DEST_BASE_ENA(1) |
S_0085F0_CB1_DEST_BASE_ENA(1) | S_0085F0_CB2_DEST_BASE_ENA(1) |
@@ -872,7 +872,7 @@ void si_emit_cache_flush(struct si_context *sctx, struct radeon_cmdbuf *cs)
S_0085F0_CB7_DEST_BASE_ENA(1);
/* Necessary for DCC */
- if (sctx->chip_class == GFX8)
+ if (sctx->gfx_level == GFX8)
si_cp_release_mem(sctx, cs, V_028A90_FLUSH_AND_INV_CB_DATA_TS, 0, EOP_DST_SEL_MEM,
EOP_INT_SEL_NONE, EOP_DATA_SEL_DISCARD, NULL, 0, 0, SI_NOT_QUERY);
}
@@ -935,7 +935,7 @@ void si_emit_cache_flush(struct si_context *sctx, struct radeon_cmdbuf *cs)
/* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
* wait for idle on GFX9. We have to use a TS event.
*/
- if (sctx->chip_class == GFX9 && flush_cb_db) {
+ if (sctx->gfx_level == GFX9 && flush_cb_db) {
uint64_t va;
unsigned tc_flags, cb_db_event;
@@ -1011,13 +1011,13 @@ void si_emit_cache_flush(struct si_context *sctx, struct radeon_cmdbuf *cs)
*
* GFX6-GFX7 don't support L2 write-back.
*/
- if (flags & SI_CONTEXT_INV_L2 || (sctx->chip_class <= GFX7 && (flags & SI_CONTEXT_WB_L2))) {
+ if (flags & SI_CONTEXT_INV_L2 || (sctx->gfx_level <= GFX7 && (flags & SI_CONTEXT_WB_L2))) {
/* Invalidate L1 & L2. (L1 is always invalidated on GFX6)
* WB must be set on GFX8+ when TC_ACTION is set.
*/
si_emit_surface_sync(sctx, cs,
cp_coher_cntl | S_0085F0_TC_ACTION_ENA(1) | S_0085F0_TCL1_ACTION_ENA(1) |
- S_0301F0_TC_WB_ACTION_ENA(sctx->chip_class >= GFX8));
+ S_0301F0_TC_WB_ACTION_ENA(sctx->gfx_level >= GFX8));
cp_coher_cntl = 0;
sctx->num_L2_invalidates++;
} else {
diff --git a/src/gallium/drivers/radeonsi/si_gpu_load.c b/src/gallium/drivers/radeonsi/si_gpu_load.c
index 0cc347c86f2..b6cd976a404 100644
--- a/src/gallium/drivers/radeonsi/si_gpu_load.c
+++ b/src/gallium/drivers/radeonsi/si_gpu_load.c
@@ -101,7 +101,7 @@ static void si_update_mmio_counters(struct si_screen *sscreen, union si_mmio_cou
UPDATE_COUNTER(gui, GUI_ACTIVE);
gui_busy = GUI_ACTIVE(value);
- if (sscreen->info.chip_class == GFX7 || sscreen->info.chip_class == GFX8) {
+ if (sscreen->info.gfx_level == GFX7 || sscreen->info.gfx_level == GFX8) {
/* SRBM_STATUS2 */
sscreen->ws->read_registers(sscreen->ws, SRBM_STATUS2, 1, &value);
@@ -109,7 +109,7 @@ static void si_update_mmio_counters(struct si_screen *sscreen, union si_mmio_cou
sdma_busy = SDMA_BUSY(value);
}
- if (sscreen->info.chip_class >= GFX8) {
+ if (sscreen->info.gfx_level >= GFX8) {
/* CP_STAT */
sscreen->ws->read_registers(sscreen->ws, CP_STAT, 1, &value);
diff --git a/src/gallium/drivers/radeonsi/si_perfcounter.c b/src/gallium/drivers/radeonsi/si_perfcounter.c
index a0e62ae7534..4097f7904e2 100644
--- a/src/gallium/drivers/radeonsi/si_perfcounter.c
+++ b/src/gallium/drivers/radeonsi/si_perfcounter.c
@@ -69,7 +69,7 @@ static void si_pc_emit_instance(struct si_context *sctx, int se, int instance)
value |= S_030800_SE_BROADCAST_WRITES(1);
}
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
/* TODO: Expose counters from each shader array separately if needed. */
value |= S_030800_SA_BROADCAST_WRITES(1);
}
@@ -276,15 +276,15 @@ static void si_pc_query_destroy(struct si_context *sctx, struct si_query *squery
void si_inhibit_clockgating(struct si_context *sctx, struct radeon_cmdbuf *cs, bool inhibit)
{
- if (sctx->chip_class >= GFX11)
+ if (sctx->gfx_level >= GFX11)
return;
radeon_begin(&sctx->gfx_cs);
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
radeon_set_uconfig_reg(R_037390_RLC_PERFMON_CLK_CNTL,
S_037390_PERFMON_CLOCK_STATE(inhibit));
- } else if (sctx->chip_class >= GFX8) {
+ } else if (sctx->gfx_level >= GFX8) {
radeon_set_uconfig_reg(R_0372FC_RLC_PERFMON_CLK_CNTL,
S_0372FC_PERFMON_CLOCK_STATE(inhibit));
}
@@ -908,7 +908,7 @@ si_spm_init(struct si_context *sctx)
/* L2 cache hit */
{GL2C, 0, 0x3}, /* Number of GL2C requests. */
- {GL2C, 0, info->chip_class >= GFX10_3 ? 0x2b : 0x23}, /* Number of GL2C misses. */
+ {GL2C, 0, info->gfx_level >= GFX10_3 ? 0x2b : 0x23}, /* Number of GL2C misses. */
};
if (!ac_init_perfcounters(info, false, false, pc))
diff --git a/src/gallium/drivers/radeonsi/si_pipe.c b/src/gallium/drivers/radeonsi/si_pipe.c
index 48d7844732c..8b8836ae57c 100644
--- a/src/gallium/drivers/radeonsi/si_pipe.c
+++ b/src/gallium/drivers/radeonsi/si_pipe.c
@@ -139,7 +139,7 @@ bool si_init_compiler(struct si_screen *sscreen, struct ac_llvm_compiler *compil
/* Only create the less-optimizing version of the compiler on APUs
* predating Ryzen (Raven). */
bool create_low_opt_compiler =
- !sscreen->info.has_dedicated_vram && sscreen->info.chip_class <= GFX8;
+ !sscreen->info.has_dedicated_vram && sscreen->info.gfx_level <= GFX8;
enum ac_target_machine_options tm_options =
(sscreen->debug_flags & DBG(CHECK_IR) ? AC_TM_CHECK_IR : 0) |
@@ -199,7 +199,7 @@ static void si_destroy_context(struct pipe_context *context)
si_release_all_descriptors(sctx);
- if (sctx->chip_class >= GFX10 && sctx->has_graphics)
+ if (sctx->gfx_level >= GFX10 && sctx->has_graphics)
gfx10_destroy_query(sctx);
if (sctx->thread_trace)
@@ -470,7 +470,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
if (!sctx)
return NULL;
- sctx->has_graphics = sscreen->info.chip_class == GFX6 || !(flags & PIPE_CONTEXT_COMPUTE_ONLY);
+ sctx->has_graphics = sscreen->info.gfx_level == GFX6 || !(flags & PIPE_CONTEXT_COMPUTE_ONLY);
if (flags & PIPE_CONTEXT_DEBUG)
sscreen->record_llvm_ir = true; /* racy but not critical */
@@ -487,9 +487,9 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
sctx->ws = sscreen->ws;
sctx->family = sscreen->info.family;
- sctx->chip_class = sscreen->info.chip_class;
+ sctx->gfx_level = sscreen->info.gfx_level;
- if (sctx->chip_class == GFX7 || sctx->chip_class == GFX8 || sctx->chip_class == GFX9) {
+ if (sctx->gfx_level == GFX7 || sctx->gfx_level == GFX8 || sctx->gfx_level == GFX9) {
sctx->eop_bug_scratch = si_aligned_buffer_create(
&sscreen->b, SI_RESOURCE_FLAG_DRIVER_INTERNAL,
PIPE_USAGE_DEFAULT, 16 * sscreen->info.max_render_backends, 256);
@@ -560,7 +560,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
si_shader_change_notify(sctx);
/* Initialize context functions used by graphics and compute. */
- if (sctx->chip_class >= GFX10)
+ if (sctx->gfx_level >= GFX10)
sctx->emit_cache_flush = gfx10_emit_cache_flush;
else
sctx->emit_cache_flush = si_emit_cache_flush;
@@ -587,7 +587,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
/* Initialize graphics-only context functions. */
if (sctx->has_graphics) {
- if (sctx->chip_class >= GFX10)
+ if (sctx->gfx_level >= GFX10)
gfx10_init_query(sctx);
si_init_msaa_functions(sctx);
si_init_shader_functions(sctx);
@@ -614,7 +614,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
sctx->discard_rasterizer_state = util_blitter_get_discard_rasterizer_state(sctx->blitter);
sctx->queued.named.rasterizer = sctx->discard_rasterizer_state;
- switch (sctx->chip_class) {
+ switch (sctx->gfx_level) {
case GFX6:
si_init_draw_functions_GFX6(sctx);
break;
@@ -637,7 +637,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
si_init_draw_functions_GFX11(sctx);
break;
default:
- unreachable("unhandled chip class");
+ unreachable("unhandled gfx level");
}
}
@@ -656,7 +656,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
sctx->b.create_video_buffer = vl_video_buffer_create;
}
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
sctx->wait_mem_scratch =
si_aligned_buffer_create(screen,
PIPE_RESOURCE_FLAG_UNMAPPABLE | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
@@ -668,7 +668,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
/* GFX7 cannot unbind a constant buffer (S_BUFFER_LOAD doesn't skip loads
* if NUM_RECORDS == 0). We need to use a dummy buffer instead. */
- if (sctx->chip_class == GFX7) {
+ if (sctx->gfx_level == GFX7) {
sctx->null_const_buf.buffer =
pipe_aligned_buffer_create(screen,
SI_RESOURCE_FLAG_32BIT | SI_RESOURCE_FLAG_DRIVER_INTERNAL,
@@ -714,7 +714,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
}
/* Set immutable fields of shader keys. */
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
/* The LS output / HS input layout can be communicated
* directly instead of via user SGPRs for merged LS-HS.
* This also enables jumping over the VS prolog for HS-only waves.
@@ -741,7 +741,7 @@ static struct pipe_context *si_create_context(struct pipe_screen *screen, unsign
si_cp_write_data(sctx, sctx->wait_mem_scratch_tmz, 0, 4, V_370_MEM, V_370_ME,
&sctx->wait_mem_number);
- if (sctx->chip_class == GFX7) {
+ if (sctx->gfx_level == GFX7) {
/* Clear the NULL constant buffer, because loads should return zeros.
* Note that this forces CP DMA to be used, because clover deadlocks
* for some reason when the compute codepath is used.
@@ -814,7 +814,7 @@ static struct pipe_context *si_pipe_create_context(struct pipe_screen *screen, v
ctx = si_create_context(screen, flags);
- if (ctx && sscreen->info.chip_class >= GFX9 && sscreen->debug_flags & DBG(SQTT)) {
+ if (ctx && sscreen->info.gfx_level >= GFX9 && sscreen->debug_flags & DBG(SQTT)) {
if (ac_check_profile_state(&sscreen->info)) {
fprintf(stderr, "radeonsi: Canceling RGP trace request as a hang condition has been "
"detected. Force the GPU into a profiling mode with e.g. "
@@ -942,7 +942,7 @@ static void si_destroy_screen(struct pipe_screen *pscreen)
static void si_init_gs_info(struct si_screen *sscreen)
{
- sscreen->gs_table_depth = ac_get_gs_table_depth(sscreen->info.chip_class, sscreen->info.family);
+ sscreen->gs_table_depth = ac_get_gs_table_depth(sscreen->info.gfx_level, sscreen->info.family);
}
static void si_test_vmfault(struct si_screen *sscreen, uint64_t test_flags)
@@ -1068,7 +1068,7 @@ static struct pipe_screen *radeonsi_screen_create_impl(struct radeon_winsys *ws,
sscreen->options.enable_sam,
sscreen->options.disable_sam);
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
sscreen->se_tile_repeat = 32 * sscreen->info.max_se;
} else {
ac_get_raster_config(&sscreen->info, &sscreen->pa_sc_raster_config,
@@ -1151,7 +1151,7 @@ static struct pipe_screen *radeonsi_screen_create_impl(struct radeon_winsys *ws,
return NULL;
}
- if (sscreen->info.chip_class < GFX10_3)
+ if (sscreen->info.gfx_level < GFX10_3)
sscreen->options.vrs2x2 = false;
si_disk_cache_create(sscreen);
@@ -1229,17 +1229,17 @@ static struct pipe_screen *radeonsi_screen_create_impl(struct radeon_winsys *ws,
sscreen->has_draw_indirect_multi =
(sscreen->info.family >= CHIP_POLARIS10) ||
- (sscreen->info.chip_class == GFX8 && sscreen->info.pfp_fw_version >= 121 &&
+ (sscreen->info.gfx_level == GFX8 && sscreen->info.pfp_fw_version >= 121 &&
sscreen->info.me_fw_version >= 87) ||
- (sscreen->info.chip_class == GFX7 && sscreen->info.pfp_fw_version >= 211 &&
+ (sscreen->info.gfx_level == GFX7 && sscreen->info.pfp_fw_version >= 211 &&
sscreen->info.me_fw_version >= 173) ||
- (sscreen->info.chip_class == GFX6 && sscreen->info.pfp_fw_version >= 79 &&
+ (sscreen->info.gfx_level == GFX6 && sscreen->info.pfp_fw_version >= 79 &&
sscreen->info.me_fw_version >= 142);
sscreen->has_out_of_order_rast =
sscreen->info.has_out_of_order_rast && !(sscreen->debug_flags & DBG(NO_OUT_OF_ORDER));
- if (sscreen->info.chip_class >= GFX11) {
+ if (sscreen->info.gfx_level >= GFX11) {
sscreen->use_ngg = true;
sscreen->use_ngg_streamout = true;
/* TODO: Disable for now. Investigate if it helps. */
@@ -1247,7 +1247,7 @@ static struct pipe_screen *radeonsi_screen_create_impl(struct radeon_winsys *ws,
!(sscreen->debug_flags & DBG(NO_NGG_CULLING));
} else {
sscreen->use_ngg = !(sscreen->debug_flags & DBG(NO_NGG)) &&
- sscreen->info.chip_class >= GFX10 &&
+ sscreen->info.gfx_level >= GFX10 &&
(sscreen->info.family != CHIP_NAVI14 ||
sscreen->info.is_pro_graphics);
sscreen->use_ngg_streamout = false;
@@ -1260,10 +1260,10 @@ static struct pipe_screen *radeonsi_screen_create_impl(struct radeon_winsys *ws,
/* Only set this for the cases that are known to work, which are:
* - GFX9 if bpp >= 4 (in bytes)
*/
- if (sscreen->info.chip_class >= GFX10) {
+ if (sscreen->info.gfx_level >= GFX10) {
memset(sscreen->allow_dcc_msaa_clear_to_reg_for_bpp, true,
sizeof(sscreen->allow_dcc_msaa_clear_to_reg_for_bpp));
- } else if (sscreen->info.chip_class == GFX9) {
+ } else if (sscreen->info.gfx_level == GFX9) {
for (unsigned bpp_log2 = util_logbase2(1); bpp_log2 <= util_logbase2(16); bpp_log2++)
sscreen->allow_dcc_msaa_clear_to_reg_for_bpp[bpp_log2] = true;
}
@@ -1273,14 +1273,14 @@ static struct pipe_screen *radeonsi_screen_create_impl(struct radeon_winsys *ws,
*/
sscreen->always_allow_dcc_stores = !(sscreen->debug_flags & DBG(NO_DCC_STORE)) &&
(sscreen->debug_flags & DBG(DCC_STORE) ||
- sscreen->info.chip_class >= GFX11 || /* always enabled on gfx11 */
- (sscreen->info.chip_class >= GFX10_3 &&
+ sscreen->info.gfx_level >= GFX11 || /* always enabled on gfx11 */
+ (sscreen->info.gfx_level >= GFX10_3 &&
!sscreen->info.has_dedicated_vram));
sscreen->dpbb_allowed = !(sscreen->debug_flags & DBG(NO_DPBB)) &&
- (sscreen->info.chip_class >= GFX10 ||
+ (sscreen->info.gfx_level >= GFX10 ||
/* Only enable primitive binning on gfx9 APUs by default. */
- (sscreen->info.chip_class == GFX9 && !sscreen->info.has_dedicated_vram) ||
+ (sscreen->info.gfx_level == GFX9 && !sscreen->info.has_dedicated_vram) ||
sscreen->debug_flags & DBG(DPBB));
if (sscreen->dpbb_allowed) {
@@ -1312,7 +1312,7 @@ static struct pipe_screen *radeonsi_screen_create_impl(struct radeon_winsys *ws,
sscreen->use_monolithic_shaders = (sscreen->debug_flags & DBG(MONOLITHIC_SHADERS)) != 0;
sscreen->barrier_flags.cp_to_L2 = SI_CONTEXT_INV_SCACHE | SI_CONTEXT_INV_VCACHE;
- if (sscreen->info.chip_class <= GFX8) {
+ if (sscreen->info.gfx_level <= GFX8) {
sscreen->barrier_flags.cp_to_L2 |= SI_CONTEXT_INV_L2;
sscreen->barrier_flags.L2_to_cp |= SI_CONTEXT_WB_L2;
}
@@ -1347,7 +1347,7 @@ static struct pipe_screen *radeonsi_screen_create_impl(struct radeon_winsys *ws,
sscreen->ngg_subgroup_size = 128;
- if (sscreen->info.chip_class >= GFX11) {
+ if (sscreen->info.gfx_level >= GFX11) {
/* TODO: tweak this */
unsigned attr_ring_size_per_se = align(1400000, 64 * 1024);
unsigned attr_ring_size = attr_ring_size_per_se * sscreen->info.max_se;
diff --git a/src/gallium/drivers/radeonsi/si_pipe.h b/src/gallium/drivers/radeonsi/si_pipe.h
index 42756da8347..bf107bfaffa 100644
--- a/src/gallium/drivers/radeonsi/si_pipe.h
+++ b/src/gallium/drivers/radeonsi/si_pipe.h
@@ -940,7 +940,7 @@ struct si_context {
struct pipe_context b; /* base class */
enum radeon_family family;
- enum chip_class chip_class;
+ enum amd_gfx_level gfx_level;
struct radeon_winsys *ws;
struct radeon_winsys_ctx *ctx;
@@ -1600,7 +1600,7 @@ bool vi_dcc_formats_are_incompatible(struct pipe_resource *tex, unsigned level,
enum pipe_format view_format);
void vi_disable_dcc_if_incompatible_format(struct si_context *sctx, struct pipe_resource *tex,
unsigned level, enum pipe_format view_format);
-unsigned si_translate_colorswap(enum chip_class chip_class, enum pipe_format format,
+unsigned si_translate_colorswap(enum amd_gfx_level gfx_level, enum pipe_format format,
bool do_endian_swap);
bool si_texture_disable_dcc(struct si_context *sctx, struct si_texture *tex);
void si_init_screen_texture_functions(struct si_screen *sscreen);
@@ -1788,12 +1788,12 @@ static inline void si_make_CB_shader_coherent(struct si_context *sctx, unsigned
sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_CB | SI_CONTEXT_INV_VCACHE;
sctx->force_cb_shader_coherent = false;
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
if (sctx->screen->info.tcc_rb_non_coherent)
sctx->flags |= SI_CONTEXT_INV_L2;
else if (shaders_read_metadata)
sctx->flags |= SI_CONTEXT_INV_L2_METADATA;
- } else if (sctx->chip_class == GFX9) {
+ } else if (sctx->gfx_level == GFX9) {
/* Single-sample color is coherent with shaders on GFX9, but
* L2 metadata must be flushed if shaders read metadata.
* (DCC, CMASK).
@@ -1813,12 +1813,12 @@ static inline void si_make_DB_shader_coherent(struct si_context *sctx, unsigned
{
sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_DB | SI_CONTEXT_INV_VCACHE;
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
if (sctx->screen->info.tcc_rb_non_coherent)
sctx->flags |= SI_CONTEXT_INV_L2;
else if (shaders_read_metadata)
sctx->flags |= SI_CONTEXT_INV_L2_METADATA;
- } else if (sctx->chip_class == GFX9) {
+ } else if (sctx->gfx_level == GFX9) {
/* Single-sample depth (not stencil) is coherent with shaders
* on GFX9, but L2 metadata must be flushed if shaders read
* metadata.
@@ -1847,7 +1847,7 @@ static inline bool si_htile_enabled(struct si_texture *tex, unsigned level, unsi
return false;
struct si_screen *sscreen = (struct si_screen *)tex->buffer.b.b.screen;
- if (sscreen->info.chip_class >= GFX8) {
+ if (sscreen->info.gfx_level >= GFX8) {
return level < tex->surface.num_meta_levels;
} else {
/* GFX6-7 don't have TC-compatible HTILE, which means they have to run
@@ -2037,17 +2037,17 @@ static inline unsigned si_get_num_coverage_samples(struct si_context *sctx)
}
static unsigned ALWAYS_INLINE
-si_num_vbos_in_user_sgprs_inline(enum chip_class chip_class)
+si_num_vbos_in_user_sgprs_inline(enum amd_gfx_level gfx_level)
{
/* This decreases CPU overhead if all descriptors are in user SGPRs because we don't
* have to allocate and count references for the upload buffer.
*/
- return chip_class >= GFX9 ? 5 : 1;
+ return gfx_level >= GFX9 ? 5 : 1;
}
static inline unsigned si_num_vbos_in_user_sgprs(struct si_screen *sscreen)
{
- return si_num_vbos_in_user_sgprs_inline(sscreen->info.chip_class);
+ return si_num_vbos_in_user_sgprs_inline(sscreen->info.gfx_level);
}
#define PRINT_ERR(fmt, args...) \
diff --git a/src/gallium/drivers/radeonsi/si_query.c b/src/gallium/drivers/radeonsi/si_query.c
index 7977192e37e..7dd488d9980 100644
--- a/src/gallium/drivers/radeonsi/si_query.c
+++ b/src/gallium/drivers/radeonsi/si_query.c
@@ -731,7 +731,7 @@ static struct pipe_query *si_query_hw_create(struct si_screen *sscreen, unsigned
query->b.num_cs_dw_suspend = 6 + si_cp_write_fence_dwords(sscreen);
query->index = index;
if ((index == PIPE_STAT_QUERY_GS_PRIMITIVES || index == PIPE_STAT_QUERY_GS_INVOCATIONS) &&
- sscreen->use_ngg && (sscreen->info.chip_class >= GFX10 && sscreen->info.chip_class <= GFX10_3))
+ sscreen->use_ngg && (sscreen->info.gfx_level >= GFX10 && sscreen->info.gfx_level <= GFX10_3))
query->flags |= SI_QUERY_EMULATE_GS_COUNTERS;
break;
default:
@@ -803,7 +803,7 @@ static void si_query_hw_do_emit_start(struct si_context *sctx, struct si_query_h
case PIPE_QUERY_OCCLUSION_PREDICATE:
case PIPE_QUERY_OCCLUSION_PREDICATE_CONSERVATIVE: {
radeon_begin(cs);
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
uint64_t rb_mask = BITFIELD64_MASK(sctx->screen->info.max_render_backends);
radeon_emit(PKT3(PKT3_EVENT_WRITE, 2, 0));
@@ -815,7 +815,7 @@ static void si_query_hw_do_emit_start(struct si_context *sctx, struct si_query_h
}
radeon_emit(PKT3(PKT3_EVENT_WRITE, 2, 0));
- if (sctx->chip_class >= GFX11)
+ if (sctx->gfx_level >= GFX11)
radeon_emit(EVENT_TYPE(V_028A90_PIXEL_PIPE_STAT_DUMP) | EVENT_INDEX(1));
else
radeon_emit(EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
@@ -926,7 +926,7 @@ static void si_query_hw_do_emit_stop(struct si_context *sctx, struct si_query_hw
va += 8;
radeon_begin(cs);
radeon_emit(PKT3(PKT3_EVENT_WRITE, 2, 0));
- if (sctx->chip_class >= GFX11)
+ if (sctx->gfx_level >= GFX11)
radeon_emit(EVENT_TYPE(V_028A90_PIXEL_PIPE_STAT_DUMP) | EVENT_INDEX(1));
else
radeon_emit(EVENT_TYPE(V_028A90_ZPASS_DONE) | EVENT_INDEX(1));
@@ -1031,7 +1031,7 @@ static void emit_set_predicate(struct si_context *ctx, struct si_resource *buf,
radeon_begin(cs);
- if (ctx->chip_class >= GFX9) {
+ if (ctx->gfx_level >= GFX9) {
radeon_emit(PKT3(PKT3_SET_PREDICATION, 2, 0));
radeon_emit(op);
radeon_emit(va);
@@ -1668,8 +1668,8 @@ static void si_render_condition(struct pipe_context *ctx, struct pipe_query *que
* SET_PREDICATION packets to give the wrong answer for
* non-inverted stream overflow predication.
*/
- if (((sctx->chip_class == GFX8 && sctx->screen->info.pfp_fw_feature < 49) ||
- (sctx->chip_class == GFX9 && sctx->screen->info.pfp_fw_feature < 38)) &&
+ if (((sctx->gfx_level == GFX8 && sctx->screen->info.pfp_fw_feature < 49) ||
+ (sctx->gfx_level == GFX9 && sctx->screen->info.pfp_fw_feature < 38)) &&
!condition &&
(squery->b.type == PIPE_QUERY_SO_OVERFLOW_ANY_PREDICATE ||
(squery->b.type == PIPE_QUERY_SO_OVERFLOW_PREDICATE &&
@@ -1837,7 +1837,7 @@ static unsigned si_get_num_queries(struct si_screen *sscreen)
{
/* amdgpu */
if (sscreen->info.is_amdgpu) {
- if (sscreen->info.chip_class >= GFX8)
+ if (sscreen->info.gfx_level >= GFX8)
return ARRAY_SIZE(si_driver_query_list);
else
return ARRAY_SIZE(si_driver_query_list) - 7;
@@ -1845,7 +1845,7 @@ static unsigned si_get_num_queries(struct si_screen *sscreen)
/* radeon */
if (sscreen->info.has_read_registers_query) {
- if (sscreen->info.chip_class == GFX7)
+ if (sscreen->info.gfx_level == GFX7)
return ARRAY_SIZE(si_driver_query_list) - 6;
else
return ARRAY_SIZE(si_driver_query_list) - 7;
diff --git a/src/gallium/drivers/radeonsi/si_sdma_copy_image.c b/src/gallium/drivers/radeonsi/si_sdma_copy_image.c
index 8171d4917a7..46bc46f7d67 100644
--- a/src/gallium/drivers/radeonsi/si_sdma_copy_image.c
+++ b/src/gallium/drivers/radeonsi/si_sdma_copy_image.c
@@ -74,7 +74,7 @@ static
bool si_translate_format_to_hw(struct si_context *sctx, enum pipe_format format, unsigned *hw_fmt, unsigned *hw_type)
{
const struct util_format_description *desc = util_format_description(format);
- *hw_fmt = si_translate_colorformat(sctx->chip_class, format);
+ *hw_fmt = si_translate_colorformat(sctx->gfx_level, format);
int firstchan;
for (firstchan = 0; firstchan < 4; firstchan++) {
@@ -258,7 +258,7 @@ bool cik_sdma_copy_texture(struct si_context *sctx, struct si_texture *sdst, str
src_pitch <= (1 << 14) && dst_pitch <= (1 << 14) && src_slice_pitch <= (1 << 28) &&
dst_slice_pitch <= (1 << 28) && copy_width <= (1 << 14) && copy_height <= (1 << 14) &&
/* HW limitation - GFX7: */
- (sctx->chip_class != GFX7 ||
+ (sctx->gfx_level != GFX7 ||
(copy_width < (1 << 14) && copy_height < (1 << 14))) &&
/* HW limitation - some GFX7 parts: */
((sctx->family != CHIP_BONAIRE && sctx->family != CHIP_KAVERI) ||
@@ -278,7 +278,7 @@ bool cik_sdma_copy_texture(struct si_context *sctx, struct si_texture *sdst, str
radeon_emit(0);
radeon_emit((dst_pitch - 1) << 16);
radeon_emit(dst_slice_pitch - 1);
- if (sctx->chip_class == GFX7) {
+ if (sctx->gfx_level == GFX7) {
radeon_emit(copy_width | (copy_height << 16));
radeon_emit(0);
} else {
@@ -402,7 +402,7 @@ bool cik_sdma_copy_texture(struct si_context *sctx, struct si_texture *sdst, str
radeon_emit(0);
radeon_emit(((linear_pitch - 1) << 16));
radeon_emit(linear_slice_pitch - 1);
- if (sctx->chip_class == GFX7) {
+ if (sctx->gfx_level == GFX7) {
radeon_emit(copy_width_aligned | (copy_height << 16));
radeon_emit(1);
} else {
@@ -422,7 +422,7 @@ bool si_sdma_copy_image(struct si_context *sctx, struct si_texture *dst, struct
struct radeon_winsys *ws = sctx->ws;
if (!sctx->sdma_cs) {
- if (sctx->screen->debug_flags & DBG(NO_DMA) || sctx->chip_class < GFX7)
+ if (sctx->screen->debug_flags & DBG(NO_DMA) || sctx->gfx_level < GFX7)
return false;
sctx->sdma_cs = CALLOC_STRUCT(radeon_cmdbuf);
@@ -435,7 +435,7 @@ bool si_sdma_copy_image(struct si_context *sctx, struct si_texture *dst, struct
return false;
/* Decompress DCC on older chips */
- if (vi_dcc_enabled(src, 0) && sctx->chip_class < GFX10)
+ if (vi_dcc_enabled(src, 0) && sctx->gfx_level < GFX10)
si_decompress_dcc(sctx, src);
/* TODO: DCC compression is possible on GFX10+. See si_set_mutable_tex_desc_fields for
* additional constraints.
@@ -447,7 +447,7 @@ bool si_sdma_copy_image(struct si_context *sctx, struct si_texture *dst, struct
/* Always flush the gfx queue to get the winsys to handle the dependencies for us. */
si_flush_gfx_cs(sctx, 0, NULL);
- switch (sctx->chip_class) {
+ switch (sctx->gfx_level) {
case GFX7:
case GFX8:
if (!cik_sdma_copy_texture(sctx, dst, src))
@@ -456,7 +456,7 @@ bool si_sdma_copy_image(struct si_context *sctx, struct si_texture *dst, struct
case GFX9:
case GFX10:
case GFX10_3:
- if (!si_sdma_v4_v5_copy_texture(sctx, dst, src, sctx->chip_class >= GFX10))
+ if (!si_sdma_v4_v5_copy_texture(sctx, dst, src, sctx->gfx_level >= GFX10))
return false;
break;
default:
diff --git a/src/gallium/drivers/radeonsi/si_shader.c b/src/gallium/drivers/radeonsi/si_shader.c
index 1777f73fb46..4b1c3f7f7ce 100644
--- a/src/gallium/drivers/radeonsi/si_shader.c
+++ b/src/gallium/drivers/radeonsi/si_shader.c
@@ -46,7 +46,7 @@ static void si_dump_shader_key(const struct si_shader *shader, FILE *f);
/** Whether the shader runs as a combination of multiple API shaders */
bool si_is_multi_part_shader(struct si_shader *shader)
{
- if (shader->selector->screen->info.chip_class <= GFX8 ||
+ if (shader->selector->screen->info.gfx_level <= GFX8 ||
shader->selector->stage > MESA_SHADER_GEOMETRY)
return false;
@@ -220,10 +220,10 @@ unsigned si_get_max_workgroup_size(const struct si_shader *shader)
case MESA_SHADER_TESS_CTRL:
/* Return this so that LLVM doesn't remove s_barrier
* instructions on chips where we use s_barrier. */
- return shader->selector->screen->info.chip_class >= GFX7 ? 128 : 0;
+ return shader->selector->screen->info.gfx_level >= GFX7 ? 128 : 0;
case MESA_SHADER_GEOMETRY:
- return shader->selector->screen->info.chip_class >= GFX9 ? 128 : 0;
+ return shader->selector->screen->info.gfx_level >= GFX9 ? 128 : 0;
case MESA_SHADER_COMPUTE:
break; /* see below */
@@ -306,11 +306,11 @@ static void declare_vs_input_vgprs(struct si_shader_context *ctx, unsigned *num_
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vertex_id);
if (shader->key.ge.as_ls) {
- if (ctx->screen->info.chip_class >= GFX11) {
+ if (ctx->screen->info.gfx_level >= GFX11) {
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
- } else if (ctx->screen->info.chip_class >= GFX10) {
+ } else if (ctx->screen->info.gfx_level >= GFX10) {
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.vs_rel_patch_id);
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
@@ -319,7 +319,7 @@ static void declare_vs_input_vgprs(struct si_shader_context *ctx, unsigned *num_
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.instance_id);
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* unused */
}
- } else if (ctx->screen->info.chip_class >= GFX10) {
+ } else if (ctx->screen->info.gfx_level >= GFX10) {
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, NULL); /* user VGPR */
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT,
&ctx->args.vs_prim_id); /* user vgpr or PrimID (legacy) */
@@ -394,7 +394,7 @@ void si_init_shader_args(struct si_shader_context *ctx, bool ngg_cull_shader)
memset(&ctx->args, 0, sizeof(ctx->args));
/* Set MERGED shaders. */
- if (ctx->screen->info.chip_class >= GFX9 && stage <= MESA_SHADER_GEOMETRY) {
+ if (ctx->screen->info.gfx_level >= GFX9 && stage <= MESA_SHADER_GEOMETRY) {
if (shader->key.ge.as_ls || stage == MESA_SHADER_TESS_CTRL)
stage = SI_SHADER_MERGED_VERTEX_TESSCTRL; /* LS or HS */
else if (shader->key.ge.as_es || shader->key.ge.as_ngg || stage == MESA_SHADER_GEOMETRY)
@@ -471,7 +471,7 @@ void si_init_shader_args(struct si_shader_context *ctx, bool ngg_cull_shader)
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tess_offchip_offset);
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.merged_wave_info);
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tcs_factor_offset);
- if (ctx->screen->info.chip_class >= GFX11)
+ if (ctx->screen->info.gfx_level >= GFX11)
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tcs_wave_id);
else
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.scratch_offset);
@@ -544,7 +544,7 @@ void si_init_shader_args(struct si_shader_context *ctx, bool ngg_cull_shader)
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.merged_wave_info);
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tess_offchip_offset);
- if (ctx->screen->info.chip_class >= GFX11)
+ if (ctx->screen->info.gfx_level >= GFX11)
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.gs_attr_offset);
else
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.scratch_offset);
@@ -578,7 +578,7 @@ void si_init_shader_args(struct si_shader_context *ctx, bool ngg_cull_shader)
}
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_CONST_DESC_PTR, &ctx->small_prim_cull_info);
- if (ctx->screen->info.chip_class >= GFX11)
+ if (ctx->screen->info.gfx_level >= GFX11)
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->gs_attr_address);
else
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL); /* unused */
@@ -777,7 +777,7 @@ void si_init_shader_args(struct si_shader_context *ctx, bool ngg_cull_shader)
/* Hardware VGPRs. */
/* Thread IDs are packed in VGPR0, 10 bits per component or stored in 3 separate VGPRs */
- if (ctx->screen->info.chip_class >= GFX11 ||
+ if (ctx->screen->info.gfx_level >= GFX11 ||
(!ctx->screen->info.has_graphics && ctx->screen->info.family >= CHIP_ALDEBARAN))
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &ctx->args.local_invocation_ids);
else
@@ -801,8 +801,8 @@ void si_init_shader_args(struct si_shader_context *ctx, bool ngg_cull_shader)
static unsigned get_lds_granularity(struct si_screen *screen, gl_shader_stage stage)
{
- return screen->info.chip_class >= GFX11 && stage == MESA_SHADER_FRAGMENT ? 1024 :
- screen->info.chip_class >= GFX7 ? 512 : 256;
+ return screen->info.gfx_level >= GFX11 && stage == MESA_SHADER_FRAGMENT ? 1024 :
+ screen->info.gfx_level >= GFX7 ? 512 : 256;
}
static bool si_shader_binary_open(struct si_screen *screen, struct si_shader *shader,
@@ -830,7 +830,7 @@ static bool si_shader_binary_open(struct si_screen *screen, struct si_shader *sh
struct ac_rtld_symbol lds_symbols[2];
unsigned num_lds_symbols = 0;
- if (sel && screen->info.chip_class >= GFX9 && !shader->is_gs_copy_shader &&
+ if (sel && screen->info.gfx_level >= GFX9 && !shader->is_gs_copy_shader &&
(sel->stage == MESA_SHADER_GEOMETRY ||
(sel->stage <= MESA_SHADER_GEOMETRY && shader->key.ge.as_ngg))) {
struct ac_rtld_symbol *sym = &lds_symbols[num_lds_symbols++];
@@ -877,7 +877,7 @@ static unsigned si_get_shader_binary_size(struct si_screen *screen, struct si_sh
return size;
}
-static bool si_get_external_symbol(enum chip_class chip_class, void *data, const char *name,
+static bool si_get_external_symbol(enum amd_gfx_level gfx_level, void *data, const char *name,
uint64_t *value)
{
uint64_t *scratch_va = data;
@@ -890,7 +890,7 @@ static bool si_get_external_symbol(enum chip_class chip_class, void *data, const
/* Enable scratch coalescing. */
*value = S_008F04_BASE_ADDRESS_HI(*scratch_va >> 32);
- if (chip_class >= GFX11)
+ if (gfx_level >= GFX11)
*value |= S_008F04_SWIZZLE_ENABLE_GFX11(1);
else
*value |= S_008F04_SWIZZLE_ENABLE_GFX6(1);
@@ -1234,7 +1234,7 @@ static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
break;
case MESA_SHADER_TESS_CTRL:
- if (shader->selector->screen->info.chip_class >= GFX9) {
+ if (shader->selector->screen->info.gfx_level >= GFX9) {
si_dump_shader_key_vs(key, &key->ge.part.tcs.ls_prolog, "part.tcs.ls_prolog", f);
}
fprintf(f, " part.tcs.epilog.prim_mode = %u\n", key->ge.part.tcs.epilog.prim_mode);
@@ -1254,7 +1254,7 @@ static void si_dump_shader_key(const struct si_shader *shader, FILE *f)
if (shader->is_gs_copy_shader)
break;
- if (shader->selector->screen->info.chip_class >= GFX9 &&
+ if (shader->selector->screen->info.gfx_level >= GFX9 &&
key->ge.part.gs.es->stage == MESA_SHADER_VERTEX) {
si_dump_shader_key_vs(key, &key->ge.part.gs.vs_prolog, "part.gs.vs_prolog", f);
}
@@ -1601,7 +1601,7 @@ struct nir_shader *si_get_nir_shader(struct si_shader_selector *sel,
/* Loop unrolling caused by uniform inlining can help eliminate indirect indexing, so
* this should be done after that.
*/
- progress2 |= ac_nir_lower_indirect_derefs(nir, sel->screen->info.chip_class);
+ progress2 |= ac_nir_lower_indirect_derefs(nir, sel->screen->info.gfx_level);
if (progress2)
si_nir_opts(sel->screen, nir, false);
@@ -1825,7 +1825,7 @@ bool si_compile_shader(struct si_screen *sscreen, struct ac_llvm_compiler *compi
}
/* Add the scratch offset to input SGPRs. */
- if (sel->screen->info.chip_class < GFX11 &&
+ if (sel->screen->info.gfx_level < GFX11 &&
shader->config.scratch_bytes_per_wave && !si_is_merged_shader(shader))
shader->info.num_input_sgprs += 1; /* scratch byte offset */
@@ -1987,7 +1987,7 @@ void si_get_tcs_epilog_key(struct si_shader *shader, union si_shader_part_key *k
static bool si_shader_select_tcs_parts(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
struct si_shader *shader, struct util_debug_callback *debug)
{
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
struct si_shader *ls_main_part = shader->key.ge.part.tcs.ls->main_shader_part_ls;
if (!si_get_vs_prolog(sscreen, compiler, shader, debug, ls_main_part,
@@ -2013,7 +2013,7 @@ static bool si_shader_select_tcs_parts(struct si_screen *sscreen, struct ac_llvm
static bool si_shader_select_gs_parts(struct si_screen *sscreen, struct ac_llvm_compiler *compiler,
struct si_shader *shader, struct util_debug_callback *debug)
{
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
struct si_shader *es_main_part;
if (shader->key.ge.as_ngg)
@@ -2445,7 +2445,7 @@ bool si_create_shader_variant(struct si_screen *sscreen, struct ac_llvm_compiler
fprintf(stderr, "Failed to compute subgroup info\n");
return false;
}
- } else if (sscreen->info.chip_class >= GFX9 && sel->stage == MESA_SHADER_GEOMETRY) {
+ } else if (sscreen->info.gfx_level >= GFX9 && sel->stage == MESA_SHADER_GEOMETRY) {
gfx9_get_gs_info(shader->previous_stage_sel, sel, &shader->gs_info);
}
diff --git a/src/gallium/drivers/radeonsi/si_shader_info.c b/src/gallium/drivers/radeonsi/si_shader_info.c
index b6eb7574ef3..497b090a175 100644
--- a/src/gallium/drivers/radeonsi/si_shader_info.c
+++ b/src/gallium/drivers/radeonsi/si_shader_info.c
@@ -773,7 +773,7 @@ void si_nir_scan_shader(struct si_screen *sscreen, const struct nir_shader *nir,
if (nir->info.stage == MESA_SHADER_VERTEX) {
info->num_vs_inputs =
nir->info.stage == MESA_SHADER_VERTEX && !info->base.vs.blit_sgprs_amd ? info->num_inputs : 0;
- unsigned num_vbos_in_sgprs = si_num_vbos_in_user_sgprs_inline(sscreen->info.chip_class);
+ unsigned num_vbos_in_sgprs = si_num_vbos_in_user_sgprs_inline(sscreen->info.gfx_level);
info->num_vbos_in_user_sgprs = MIN2(info->num_vs_inputs, num_vbos_in_sgprs);
/* The prolog is a no-op if there are no inputs. */
@@ -795,7 +795,7 @@ void si_nir_scan_shader(struct si_screen *sscreen, const struct nir_shader *nir,
/* For the ESGS ring in LDS, add 1 dword to reduce LDS bank
* conflicts, i.e. each vertex will start at a different bank.
*/
- if (sscreen->info.chip_class >= GFX9)
+ if (sscreen->info.gfx_level >= GFX9)
info->esgs_itemsize += 4;
assert(((info->esgs_itemsize / 4) & C_028AAC_ITEMSIZE) == 0);
diff --git a/src/gallium/drivers/radeonsi/si_shader_llvm.c b/src/gallium/drivers/radeonsi/si_shader_llvm.c
index f9d6cb980a3..7229abf78b7 100644
--- a/src/gallium/drivers/radeonsi/si_shader_llvm.c
+++ b/src/gallium/drivers/radeonsi/si_shader_llvm.c
@@ -132,7 +132,7 @@ void si_llvm_context_init(struct si_shader_context *ctx, struct si_screen *sscre
ctx->screen = sscreen;
ctx->compiler = compiler;
- ac_llvm_context_init(&ctx->ac, compiler, sscreen->info.chip_class, sscreen->info.family,
+ ac_llvm_context_init(&ctx->ac, compiler, sscreen->info.gfx_level, sscreen->info.family,
&sscreen->info, AC_FLOAT_MODE_DEFAULT_OPENGL, wave_size, 64);
}
@@ -150,7 +150,7 @@ void si_llvm_create_func(struct si_shader_context *ctx, const char *name, LLVMTy
gl_shader_stage real_stage = ctx->stage;
/* LS is merged into HS (TCS), and ES is merged into GS. */
- if (ctx->screen->info.chip_class >= GFX9 && ctx->stage <= MESA_SHADER_GEOMETRY) {
+ if (ctx->screen->info.gfx_level >= GFX9 && ctx->stage <= MESA_SHADER_GEOMETRY) {
if (ctx->shader->key.ge.as_ls)
real_stage = MESA_SHADER_TESS_CTRL;
else if (ctx->shader->key.ge.as_es || ctx->shader->key.ge.as_ngg)
@@ -927,7 +927,7 @@ bool si_llvm_translate_nir(struct si_shader_context *ctx, struct si_shader *shad
}
/* For merged shaders (VS-TCS, VS-GS, TES-GS): */
- if (ctx->screen->info.chip_class >= GFX9 && si_is_merged_shader(shader)) {
+ if (ctx->screen->info.gfx_level >= GFX9 && si_is_merged_shader(shader)) {
/* TES is special because it has only 1 shader part if NGG shader culling is disabled,
* and therefore it doesn't use the wrapper function.
*/
@@ -950,7 +950,7 @@ bool si_llvm_translate_nir(struct si_shader_context *ctx, struct si_shader *shad
if ((ctx->stage == MESA_SHADER_VERTEX || ctx->stage == MESA_SHADER_TESS_EVAL) &&
shader->key.ge.as_ngg && !shader->key.ge.as_es && !shader->key.ge.opt.ngg_culling) {
/* GFX10 requires a barrier before gs_alloc_req due to a hw bug. */
- if (ctx->screen->info.chip_class == GFX10)
+ if (ctx->screen->info.gfx_level == GFX10)
ac_build_s_barrier(&ctx->ac, ctx->stage);
gfx10_ngg_build_sendmsg_gs_alloc_req(ctx);
@@ -1033,7 +1033,7 @@ bool si_llvm_translate_nir(struct si_shader_context *ctx, struct si_shader *shad
if (nir->info.stage == MESA_SHADER_GEOMETRY) {
/* Unpack GS vertex offsets. */
for (unsigned i = 0; i < 6; i++) {
- if (ctx->screen->info.chip_class >= GFX9) {
+ if (ctx->screen->info.gfx_level >= GFX9) {
ctx->gs_vtx_offset[i] = si_unpack_param(ctx, ctx->args.gs_vtx_offset[i / 2], (i & 1) * 16, 16);
} else {
ctx->gs_vtx_offset[i] = ac_get_arg(&ctx->ac, ctx->args.gs_vtx_offset[i]);
@@ -1041,7 +1041,7 @@ bool si_llvm_translate_nir(struct si_shader_context *ctx, struct si_shader *shad
}
/* Apply the hw bug workaround for triangle strips with adjacency. */
- if (ctx->screen->info.chip_class <= GFX9 &&
+ if (ctx->screen->info.gfx_level <= GFX9 &&
ctx->shader->key.ge.mono.u.gs_tri_strip_adj_fix) {
LLVMValueRef prim_id = ac_get_arg(&ctx->ac, ctx->args.gs_prim_id);
/* Remap GS vertex offsets for every other primitive. */
@@ -1136,7 +1136,7 @@ static bool si_should_optimize_less(struct ac_llvm_compiler *compiler,
return false;
/* Assume a slow CPU. */
- assert(!sel->screen->info.has_dedicated_vram && sel->screen->info.chip_class <= GFX8);
+ assert(!sel->screen->info.has_dedicated_vram && sel->screen->info.gfx_level <= GFX8);
/* For a crazy dEQP test containing 2597 memory opcodes, mostly
* buffer stores. */
@@ -1221,7 +1221,7 @@ bool si_llvm_compile_shader(struct si_screen *sscreen, struct ac_llvm_compiler *
si_build_wrapper_function(&ctx, parts, 3, 0, 0, false);
} else if (shader->is_monolithic && sel->stage == MESA_SHADER_TESS_CTRL) {
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
struct si_shader_selector *ls = shader->key.ge.part.tcs.ls;
LLVMValueRef parts[4];
bool vs_needs_prolog =
@@ -1289,7 +1289,7 @@ bool si_llvm_compile_shader(struct si_screen *sscreen, struct ac_llvm_compiler *
si_build_wrapper_function(&ctx, parts, 2, 0, 0, false);
}
} else if (shader->is_monolithic && sel->stage == MESA_SHADER_GEOMETRY) {
- if (ctx.screen->info.chip_class >= GFX9) {
+ if (ctx.screen->info.gfx_level >= GFX9) {
struct si_shader_selector *es = shader->key.ge.part.gs.es;
LLVMValueRef es_prolog = NULL;
LLVMValueRef es_main = NULL;
diff --git a/src/gallium/drivers/radeonsi/si_shader_llvm_gs.c b/src/gallium/drivers/radeonsi/si_shader_llvm_gs.c
index 19e09e481f4..c5396c2c936 100644
--- a/src/gallium/drivers/radeonsi/si_shader_llvm_gs.c
+++ b/src/gallium/drivers/radeonsi/si_shader_llvm_gs.c
@@ -57,7 +57,7 @@ static LLVMValueRef si_llvm_load_input_gs(struct ac_shader_abi *abi, unsigned in
param = si_shader_io_get_unique_index(info->input[input_index].semantic, false);
/* GFX9 has the ESGS ring in LDS. */
- if (ctx->screen->info.chip_class >= GFX9) {
+ if (ctx->screen->info.gfx_level >= GFX9) {
unsigned offset = param * 4 + swizzle;
vtx_offset = LLVMBuildAdd(ctx->ac.builder, ctx->gs_vtx_offset[vtx_offset_param],
@@ -111,7 +111,7 @@ static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
else
ret = si_insert_input_ret(ctx, ret, ctx->args.gs2vs_offset, 2);
ret = si_insert_input_ret(ctx, ret, ctx->args.merged_wave_info, 3);
- if (ctx->screen->info.chip_class >= GFX11)
+ if (ctx->screen->info.gfx_level >= GFX11)
ret = si_insert_input_ret(ctx, ret, ctx->args.gs_attr_offset, 5);
else
ret = si_insert_input_ret(ctx, ret, ctx->args.scratch_offset, 5);
@@ -121,7 +121,7 @@ static void si_set_es_return_value_for_gs(struct si_shader_context *ctx)
if (ctx->screen->use_ngg) {
ret = si_insert_input_ptr(ctx, ret, ctx->vs_state_bits, 8 + SI_SGPR_VS_STATE_BITS);
ret = si_insert_input_ptr(ctx, ret, ctx->small_prim_cull_info, 8 + GFX9_SGPR_SMALL_PRIM_CULL_INFO);
- if (ctx->screen->info.chip_class >= GFX11)
+ if (ctx->screen->info.gfx_level >= GFX11)
ret = si_insert_input_ptr(ctx, ret, ctx->gs_attr_address, 8 + GFX9_SGPR_ATTRIBUTE_RING_ADDR);
}
@@ -144,7 +144,7 @@ void si_llvm_es_build_end(struct si_shader_context *ctx)
unsigned chan;
int i;
- if (ctx->screen->info.chip_class >= GFX9 && info->num_outputs) {
+ if (ctx->screen->info.gfx_level >= GFX9 && info->num_outputs) {
unsigned itemsize_dw = es->selector->info.esgs_itemsize / 4;
LLVMValueRef vertex_idx = ac_get_thread_id(&ctx->ac);
LLVMValueRef wave_idx = si_unpack_param(ctx, ctx->args.merged_wave_info, 24, 4);
@@ -174,7 +174,7 @@ void si_llvm_es_build_end(struct si_shader_context *ctx)
out_val = ac_to_integer(&ctx->ac, out_val);
/* GFX9 has the ESGS ring in LDS. */
- if (ctx->screen->info.chip_class >= GFX9) {
+ if (ctx->screen->info.gfx_level >= GFX9) {
LLVMValueRef idx = LLVMConstInt(ctx->ac.i32, param * 4 + chan, false);
idx = LLVMBuildAdd(ctx->ac.builder, lds_base, idx, "");
ac_build_indexed_store(&ctx->ac, ctx->esgs_ring, idx, out_val);
@@ -188,13 +188,13 @@ void si_llvm_es_build_end(struct si_shader_context *ctx)
}
}
- if (ctx->screen->info.chip_class >= GFX9)
+ if (ctx->screen->info.gfx_level >= GFX9)
si_set_es_return_value_for_gs(ctx);
}
static LLVMValueRef si_get_gs_wave_id(struct si_shader_context *ctx)
{
- if (ctx->screen->info.chip_class >= GFX9)
+ if (ctx->screen->info.gfx_level >= GFX9)
return si_unpack_param(ctx, ctx->args.merged_wave_info, 16, 8);
else
return ac_get_arg(&ctx->ac, ctx->args.gs_wave_id);
@@ -214,7 +214,7 @@ void si_llvm_gs_build_end(struct si_shader_context *ctx)
assert(info->num_outputs <= AC_LLVM_MAX_OUTPUTS);
- if (ctx->screen->info.chip_class >= GFX10)
+ if (ctx->screen->info.gfx_level >= GFX10)
ac_build_waitcnt(&ctx->ac, AC_WAIT_VSTORE);
if (ctx->screen->use_ngg) {
@@ -265,7 +265,7 @@ void si_llvm_gs_build_end(struct si_shader_context *ctx)
ac_build_sendmsg(&ctx->ac, AC_SENDMSG_GS_OP_NOP | AC_SENDMSG_GS_DONE, si_get_gs_wave_id(ctx));
- if (ctx->screen->info.chip_class >= GFX9)
+ if (ctx->screen->info.gfx_level >= GFX9)
ac_build_endif(&ctx->ac, ctx->merged_wrap_if_label);
}
@@ -366,7 +366,7 @@ void si_preload_esgs_ring(struct si_shader_context *ctx)
{
LLVMBuilderRef builder = ctx->ac.builder;
- if (ctx->screen->info.chip_class <= GFX8) {
+ if (ctx->screen->info.gfx_level <= GFX8) {
LLVMValueRef offset = LLVMConstInt(ctx->ac.i32, SI_RING_ESGS, 0);
LLVMValueRef buf_ptr = ac_get_arg(&ctx->ac, ctx->internal_bindings);
@@ -384,7 +384,7 @@ void si_preload_esgs_ring(struct si_shader_context *ctx)
S_008F0C_ADD_TID_ENABLE(1), 0), "");
/* If MUBUF && ADD_TID_ENABLE, DATA_FORMAT means STRIDE[14:17] on gfx8-9, so set 0. */
- if (ctx->screen->info.chip_class == GFX8) {
+ if (ctx->screen->info.gfx_level == GFX8) {
desc3 = LLVMBuildAnd(builder, desc3,
LLVMConstInt(ctx->ac.i32, C_008F0C_DATA_FORMAT, 0), "");
}
@@ -406,7 +406,7 @@ void si_preload_esgs_ring(struct si_shader_context *ctx)
void si_preload_gs_rings(struct si_shader_context *ctx)
{
- if (ctx->ac.chip_class >= GFX11)
+ if (ctx->ac.gfx_level >= GFX11)
return;
const struct si_shader_selector *sel = ctx->shader->selector;
@@ -464,12 +464,12 @@ void si_preload_gs_rings(struct si_shader_context *ctx)
S_008F0C_INDEX_STRIDE(1) | /* index_stride = 16 (elements) */
S_008F0C_ADD_TID_ENABLE(1);
- if (ctx->ac.chip_class >= GFX10) {
+ if (ctx->ac.gfx_level >= GFX10) {
rsrc3 |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_DISABLED) | S_008F0C_RESOURCE_LEVEL(1);
} else {
/* If MUBUF && ADD_TID_ENABLE, DATA_FORMAT means STRIDE[14:17] on gfx8-9, so set 0. */
- unsigned data_format = ctx->ac.chip_class == GFX8 || ctx->ac.chip_class == GFX9 ?
+ unsigned data_format = ctx->ac.gfx_level == GFX8 || ctx->ac.gfx_level == GFX9 ?
0 : V_008F0C_BUF_DATA_FORMAT_32;
rsrc3 |= S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
diff --git a/src/gallium/drivers/radeonsi/si_shader_llvm_ps.c b/src/gallium/drivers/radeonsi/si_shader_llvm_ps.c
index 79a32a2774f..a3f4c7fd184 100644
--- a/src/gallium/drivers/radeonsi/si_shader_llvm_ps.c
+++ b/src/gallium/drivers/radeonsi/si_shader_llvm_ps.c
@@ -85,7 +85,7 @@ static LLVMValueRef si_nir_emit_fbfetch(struct ac_shader_abi *abi)
if (ctx->shader->key.ps.mono.fbfetch_msaa)
args.coords[chan++] = si_get_sample_id(ctx);
- if (ctx->screen->info.chip_class < GFX11 &&
+ if (ctx->screen->info.gfx_level < GFX11 &&
ctx->shader->key.ps.mono.fbfetch_msaa &&
!(ctx->screen->debug_flags & DBG(NO_FMASK))) {
fmask = ac_build_load_to_sgpr(&ctx->ac, ptr,
@@ -296,7 +296,7 @@ static bool si_llvm_init_ps_export_args(struct si_shader_context *ctx, LLVMValue
if (key->ps.part.epilog.dual_src_blend_swizzle &&
(compacted_mrt_index == 0 || compacted_mrt_index == 1)) {
- assert(ctx->ac.chip_class >= GFX11);
+ assert(ctx->ac.gfx_level >= GFX11);
args->target += 21;
}
@@ -323,7 +323,7 @@ static bool si_llvm_init_ps_export_args(struct si_shader_context *ctx, LLVMValue
break;
case V_028714_SPI_SHADER_32_AR:
- if (ctx->screen->info.chip_class >= GFX10) {
+ if (ctx->screen->info.gfx_level >= GFX10) {
args->enabled_channels = 0x3; /* writemask */
args->out[0] = get_color_32bit(ctx, color_type, values[0]);
args->out[1] = get_color_32bit(ctx, color_type, values[3]);
@@ -397,7 +397,7 @@ static bool si_llvm_init_ps_export_args(struct si_shader_context *ctx, LLVMValue
}
}
if (packf || packi) {
- if (ctx->screen->info.chip_class >= GFX11)
+ if (ctx->screen->info.gfx_level >= GFX11)
args->enabled_channels = 0x3;
else
args->compr = 1; /* COMPR flag */
@@ -937,7 +937,7 @@ void si_llvm_build_ps_epilog(struct si_shader_context *ctx, union si_shader_part
exp.args[exp.num - 1].done = 1; /* DONE bit */
if (key->ps_epilog.states.dual_src_blend_swizzle) {
- assert(ctx->ac.chip_class >= GFX11);
+ assert(ctx->ac.gfx_level >= GFX11);
assert((key->ps_epilog.colors_written & 0x3) == 0x3);
ac_build_dual_src_blend_swizzle(&ctx->ac, &exp.args[first_color_export],
&exp.args[first_color_export + 1]);
diff --git a/src/gallium/drivers/radeonsi/si_shader_llvm_resources.c b/src/gallium/drivers/radeonsi/si_shader_llvm_resources.c
index bb857d79dd6..5a0ab62250a 100644
--- a/src/gallium/drivers/radeonsi/si_shader_llvm_resources.c
+++ b/src/gallium/drivers/radeonsi/si_shader_llvm_resources.c
@@ -72,10 +72,10 @@ static LLVMValueRef load_const_buffer_desc_fast_path(struct si_shader_context *c
uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
- if (ctx->screen->info.chip_class >= GFX11)
+ if (ctx->screen->info.gfx_level >= GFX11)
rsrc3 |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_FLOAT) |
S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW);
- else if (ctx->screen->info.chip_class >= GFX10)
+ else if (ctx->screen->info.gfx_level >= GFX10)
rsrc3 |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
else
@@ -138,7 +138,7 @@ static LLVMValueRef load_ssbo(struct ac_shader_abi *abi, LLVMValueRef index, boo
*/
static LLVMValueRef force_dcc_off(struct si_shader_context *ctx, LLVMValueRef rsrc)
{
- if (ctx->screen->info.chip_class <= GFX7) {
+ if (ctx->screen->info.gfx_level <= GFX7) {
return rsrc;
} else {
LLVMValueRef i32_6 = LLVMConstInt(ctx->ac.i32, 6, 0);
@@ -165,7 +165,7 @@ static LLVMValueRef force_write_compress_off(struct si_shader_context *ctx, LLVM
static LLVMValueRef fixup_image_desc(struct si_shader_context *ctx, LLVMValueRef rsrc,
bool uses_store)
{
- if (uses_store && ctx->ac.chip_class <= GFX9)
+ if (uses_store && ctx->ac.gfx_level <= GFX9)
rsrc = force_dcc_off(ctx, rsrc);
if (!uses_store && ctx->screen->info.has_image_load_dcc_bug &&
@@ -222,7 +222,7 @@ static LLVMValueRef si_load_sampler_desc(struct si_shader_context *ctx, LLVMValu
break;
case AC_DESC_FMASK:
/* The FMASK is at [8:15]. */
- assert(ctx->screen->info.chip_class < GFX11);
+ assert(ctx->screen->info.gfx_level < GFX11);
index = ac_build_imad(&ctx->ac, index, LLVMConstInt(ctx->ac.i32, 2, 0), ctx->ac.i32_1);
break;
case AC_DESC_SAMPLER:
diff --git a/src/gallium/drivers/radeonsi/si_shader_llvm_tess.c b/src/gallium/drivers/radeonsi/si_shader_llvm_tess.c
index 925b9e156a0..df228eed990 100644
--- a/src/gallium/drivers/radeonsi/si_shader_llvm_tess.c
+++ b/src/gallium/drivers/radeonsi/si_shader_llvm_tess.c
@@ -159,7 +159,7 @@ static LLVMValueRef get_tcs_in_vertex_dw_stride(struct si_shader_context *ctx)
return LLVMConstInt(ctx->ac.i32, stride, 0);
case MESA_SHADER_TESS_CTRL:
- if (ctx->screen->info.chip_class >= GFX9 && ctx->shader->is_monolithic) {
+ if (ctx->screen->info.gfx_level >= GFX9 && ctx->shader->is_monolithic) {
stride = ctx->shader->key.ge.part.tcs.ls->info.lshs_vertex_stride / 4;
return LLVMConstInt(ctx->ac.i32, stride, 0);
}
@@ -357,10 +357,10 @@ static LLVMValueRef get_tess_ring_descriptor(struct si_shader_context *ctx, enum
uint32_t rsrc3 = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) | S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) | S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
- if (ctx->screen->info.chip_class >= GFX11)
+ if (ctx->screen->info.gfx_level >= GFX11)
rsrc3 |= S_008F0C_FORMAT(V_008F0C_GFX11_FORMAT_32_FLOAT) |
S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW);
- else if (ctx->screen->info.chip_class >= GFX10)
+ else if (ctx->screen->info.gfx_level >= GFX10)
rsrc3 |= S_008F0C_FORMAT(V_008F0C_GFX10_FORMAT_32_FLOAT) |
S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_RAW) | S_008F0C_RESOURCE_LEVEL(1);
else
@@ -706,7 +706,7 @@ static void si_write_tess_factors(struct si_shader_context *ctx, union si_shader
offset = 0;
/* Store the dynamic HS control word. */
- if (ctx->screen->info.chip_class <= GFX8) {
+ if (ctx->screen->info.gfx_level <= GFX8) {
ac_build_ifcc(&ctx->ac,
LLVMBuildICmp(ctx->ac.builder, LLVMIntEQ, rel_patch_id, ctx->ac.i32_0, ""), 6504);
ac_build_buffer_store_dword(&ctx->ac, buffer, LLVMConstInt(ctx->ac.i32, 0x80000000, 0),
@@ -770,7 +770,7 @@ void si_llvm_tcs_build_end(struct si_shader_context *ctx)
invocation_id = si_unpack_param(ctx, ctx->args.tcs_rel_ids, 8, 5);
tf_lds_offset = get_tcs_out_current_patch_data_offset(ctx);
- if (ctx->screen->info.chip_class >= GFX9 && !ctx->shader->is_monolithic) {
+ if (ctx->screen->info.gfx_level >= GFX9 && !ctx->shader->is_monolithic) {
LLVMBasicBlockRef blocks[2] = {LLVMGetInsertBlock(builder), ctx->merged_wrap_if_entry_block};
LLVMValueRef values[2];
@@ -793,7 +793,7 @@ void si_llvm_tcs_build_end(struct si_shader_context *ctx)
LLVMValueRef ret = ctx->return_value;
unsigned vgpr;
- if (ctx->screen->info.chip_class >= GFX9) {
+ if (ctx->screen->info.gfx_level >= GFX9) {
ret =
si_insert_input_ret(ctx, ret, ctx->tcs_offchip_layout, 8 + GFX9_SGPR_TCS_OFFCHIP_LAYOUT);
ret = si_insert_input_ret(ctx, ret, ctx->tcs_out_lds_layout, 8 + GFX9_SGPR_TCS_OUT_LAYOUT);
@@ -850,7 +850,7 @@ static void si_set_ls_return_value_for_tcs(struct si_shader_context *ctx)
ret = si_insert_input_ret(ctx, ret, ctx->args.tess_offchip_offset, 2);
ret = si_insert_input_ret(ctx, ret, ctx->args.merged_wave_info, 3);
ret = si_insert_input_ret(ctx, ret, ctx->args.tcs_factor_offset, 4);
- if (ctx->screen->info.chip_class <= GFX10_3)
+ if (ctx->screen->info.gfx_level <= GFX10_3)
ret = si_insert_input_ret(ctx, ret, ctx->args.scratch_offset, 5);
ret = si_insert_input_ptr(ctx, ret, ctx->internal_bindings, 8 + SI_SGPR_INTERNAL_BINDINGS);
@@ -879,7 +879,7 @@ void si_llvm_ls_build_end(struct si_shader_context *ctx)
struct si_shader_info *info = &shader->selector->info;
unsigned i, chan;
LLVMValueRef vertex_id;
- if (ctx->screen->info.chip_class >= GFX11) {
+ if (ctx->screen->info.gfx_level >= GFX11) {
vertex_id = ac_build_imad(&ctx->ac, si_unpack_param(ctx, ctx->args.tcs_wave_id, 0, 5),
LLVMConstInt(ctx->ac.i32, ctx->ac.wave_size, 0),
ac_get_thread_id(&ctx->ac));
@@ -935,7 +935,7 @@ void si_llvm_ls_build_end(struct si_shader_context *ctx)
}
}
- if (ctx->screen->info.chip_class >= GFX9)
+ if (ctx->screen->info.gfx_level >= GFX9)
si_set_ls_return_value_for_tcs(ctx);
}
@@ -947,7 +947,7 @@ void si_llvm_build_tcs_epilog(struct si_shader_context *ctx, union si_shader_par
{
memset(&ctx->args, 0, sizeof(ctx->args));
- if (ctx->screen->info.chip_class >= GFX9) {
+ if (ctx->screen->info.gfx_level >= GFX9) {
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, NULL);
ac_add_arg(&ctx->args, AC_ARG_SGPR, 1, AC_ARG_INT, &ctx->args.tess_offchip_offset);
@@ -995,7 +995,7 @@ void si_llvm_build_tcs_epilog(struct si_shader_context *ctx, union si_shader_par
ac_add_arg(&ctx->args, AC_ARG_VGPR, 1, AC_ARG_INT, &tess_factors[i]);
/* Create the function. */
- si_llvm_create_func(ctx, "tcs_epilog", NULL, 0, ctx->screen->info.chip_class >= GFX7 ? 128 : 0);
+ si_llvm_create_func(ctx, "tcs_epilog", NULL, 0, ctx->screen->info.gfx_level >= GFX7 ? 128 : 0);
ac_declare_lds_as_pointer(&ctx->ac);
LLVMValueRef invoc0_tess_factors[6];
diff --git a/src/gallium/drivers/radeonsi/si_shader_llvm_vs.c b/src/gallium/drivers/radeonsi/si_shader_llvm_vs.c
index 103fb64356a..bae47468d1c 100644
--- a/src/gallium/drivers/radeonsi/si_shader_llvm_vs.c
+++ b/src/gallium/drivers/radeonsi/si_shader_llvm_vs.c
@@ -602,7 +602,7 @@ void si_llvm_build_vs_exports(struct si_shader_context *ctx,
if (writes_vrs) {
LLVMValueRef rates;
- if (ctx->screen->info.chip_class >= GFX11) {
+ if (ctx->screen->info.gfx_level >= GFX11) {
/* Bits [2:5] = VRS rate
*
* The range is [0, 15].
@@ -637,7 +637,7 @@ void si_llvm_build_vs_exports(struct si_shader_context *ctx,
pos_args[1].out[1] = ac_to_float(&ctx->ac, v);
}
- if (ctx->screen->info.chip_class >= GFX9) {
+ if (ctx->screen->info.gfx_level >= GFX9) {
/* GFX9 has the layer in out.z[10:0] and the viewport
* index in out.z[19:16].
*/
@@ -671,7 +671,7 @@ void si_llvm_build_vs_exports(struct si_shader_context *ctx,
/* GFX10 (Navi1x) skip POS0 exports if EXEC=0 and DONE=0, causing a hang.
* Setting valid_mask=1 prevents it and has no other effect.
*/
- if (ctx->screen->info.chip_class == GFX10)
+ if (ctx->screen->info.gfx_level == GFX10)
pos_args[0].valid_mask = 1;
pos_idx = 0;
@@ -692,7 +692,7 @@ void si_llvm_build_vs_exports(struct si_shader_context *ctx,
*
* VLOAD is for atomics with return.
*/
- if (ctx->screen->info.chip_class >= GFX10 &&
+ if (ctx->screen->info.gfx_level >= GFX10 &&
!shader->info.nr_param_exports &&
shader->selector->info.base.writes_memory)
ac_build_waitcnt(&ctx->ac, AC_WAIT_VLOAD | AC_WAIT_VSTORE);
@@ -721,7 +721,7 @@ void si_llvm_build_vs_exports(struct si_shader_context *ctx,
&param_exports[offset]);
}
- if (ctx->screen->info.chip_class >= GFX11) {
+ if (ctx->screen->info.gfx_level >= GFX11) {
/* Get the attribute ring address and descriptor. */
LLVMValueRef attr_address;
if (ctx->stage == MESA_SHADER_VERTEX && shader->selector->info.base.vs.blit_sgprs_amd) {
@@ -910,7 +910,7 @@ void si_llvm_build_vs_prolog(struct si_shader_context *ctx, union si_shader_part
}
unsigned vertex_id_vgpr = first_vs_vgpr;
- unsigned instance_id_vgpr = ctx->screen->info.chip_class >= GFX10
+ unsigned instance_id_vgpr = ctx->screen->info.gfx_level >= GFX10
? first_vs_vgpr + 3
: first_vs_vgpr + (key->vs_prolog.as_ls ? 2 : 1);
diff --git a/src/gallium/drivers/radeonsi/si_shader_nir.c b/src/gallium/drivers/radeonsi/si_shader_nir.c
index 10815770d9b..d3bbc864b6e 100644
--- a/src/gallium/drivers/radeonsi/si_shader_nir.c
+++ b/src/gallium/drivers/radeonsi/si_shader_nir.c
@@ -160,7 +160,7 @@ static void si_late_optimize_16bit_samplers(struct si_screen *sscreen, nir_shade
* based on those two.
*/
/* TODO: The constraints can't represent the ddx constraint. */
- /*bool has_g16 = sscreen->info.chip_class >= GFX10 && LLVM_VERSION_MAJOR >= 12;*/
+ /*bool has_g16 = sscreen->info.gfx_level >= GFX10 && LLVM_VERSION_MAJOR >= 12;*/
bool has_g16 = false;
nir_tex_src_type_constraints tex_constraints = {
[nir_tex_src_comparator] = {true, 32},
diff --git a/src/gallium/drivers/radeonsi/si_sqtt.c b/src/gallium/drivers/radeonsi/si_sqtt.c
index de5cf86923f..5e4e03c2bfa 100644
--- a/src/gallium/drivers/radeonsi/si_sqtt.c
+++ b/src/gallium/drivers/radeonsi/si_sqtt.c
@@ -103,7 +103,7 @@ si_emit_thread_trace_start(struct si_context* sctx,
/* Select the first active CUs */
int first_active_cu = ffs(sctx->screen->info.cu_mask[se][0]);
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
/* Order seems important for the following 2 registers. */
radeon_set_privileged_config_reg(R_008D04_SQ_THREAD_TRACE_BUF0_SIZE,
S_008D04_SIZE(shifted_size) |
@@ -139,7 +139,7 @@ si_emit_thread_trace_start(struct si_context* sctx,
S_008D1C_SQ_STALL_EN(1) |
S_008D1C_REG_DROP_ON_STALL(0) |
S_008D1C_LOWATER_OFFSET(
- sctx->chip_class >= GFX10_3 ? 4 : 0) |
+ sctx->gfx_level >= GFX10_3 ? 4 : 0) |
S_008D1C_AUTO_FLUSH_MODE(sctx->screen->info.has_sqtt_auto_flush_mode_bug));
} else {
/* Order seems important for the following 4 registers. */
@@ -181,7 +181,7 @@ si_emit_thread_trace_start(struct si_context* sctx,
radeon_set_uconfig_reg(R_030CEC_SQ_THREAD_TRACE_HIWATER,
S_030CEC_HIWATER(4));
- if (sctx->chip_class == GFX9) {
+ if (sctx->gfx_level == GFX9) {
/* Reset thread trace status errors. */
radeon_set_uconfig_reg(R_030CE8_SQ_THREAD_TRACE_STATUS,
S_030CE8_UTC_ERROR(0));
@@ -199,7 +199,7 @@ si_emit_thread_trace_start(struct si_context* sctx,
S_030CD8_AUTOFLUSH_EN(1) | /* periodically flush SQTT data to memory */
S_030CD8_MODE(1);
- if (sctx->chip_class == GFX9) {
+ if (sctx->gfx_level == GFX9) {
/* Count SQTT traffic in TCC perf counters. */
thread_trace_mode |= S_030CD8_TC_PERF_EN(1);
}
@@ -247,7 +247,7 @@ si_copy_thread_trace_info_regs(struct si_context* sctx,
{
const uint32_t *thread_trace_info_regs = NULL;
- switch (sctx->chip_class) {
+ switch (sctx->gfx_level) {
case GFX10_3:
case GFX10:
thread_trace_info_regs = gfx10_thread_trace_info_regs;
@@ -256,7 +256,7 @@ si_copy_thread_trace_info_regs(struct si_context* sctx,
thread_trace_info_regs = gfx9_thread_trace_info_regs;
break;
default:
- unreachable("Unsupported chip_class");
+ unreachable("Unsupported gfx_level");
}
/* Get the VA where the info struct is stored for this SE. */
@@ -323,7 +323,7 @@ si_emit_thread_trace_stop(struct si_context *sctx,
S_030800_SH_INDEX(0) |
S_030800_INSTANCE_BROADCAST_WRITES(1));
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
if (!sctx->screen->info.has_sqtt_rb_harvest_bug) {
/* Make sure to wait for the trace buffer. */
radeon_emit(PKT3(PKT3_WAIT_REG_MEM, 5, 0));
@@ -577,7 +577,7 @@ si_get_thread_trace(struct si_context *sctx,
/* For GFX10+ compute_unit really means WGP */
thread_trace_se.compute_unit =
- sctx->screen->info.chip_class >= GFX10 ? (first_active_cu / 2) : first_active_cu;
+ sctx->screen->info.gfx_level >= GFX10 ? (first_active_cu / 2) : first_active_cu;
thread_trace->traces[se] = thread_trace_se;
}
@@ -600,14 +600,14 @@ si_init_thread_trace(struct si_context *sctx)
sctx->thread_trace = CALLOC_STRUCT(ac_thread_trace_data);
- if (sctx->chip_class < GFX8) {
+ if (sctx->gfx_level < GFX8) {
fprintf(stderr, "GPU hardware not supported: refer to "
"the RGP documentation for the list of "
"supported GPUs!\n");
return false;
}
- if (sctx->chip_class > GFX10_3) {
+ if (sctx->gfx_level > GFX10_3) {
fprintf(stderr, "radeonsi: Thread trace is not supported "
"for that GPU!\n");
return false;
@@ -639,7 +639,7 @@ si_init_thread_trace(struct si_context *sctx)
list_inithead(&sctx->thread_trace->rgp_code_object.record);
simple_mtx_init(&sctx->thread_trace->rgp_code_object.lock, mtx_plain);
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
/* Limit SPM counters to GFX10+ for now */
ASSERTED bool r = si_spm_init(sctx);
assert(r);
@@ -700,7 +700,7 @@ si_destroy_thread_trace(struct si_context *sctx)
free(sctx->thread_trace);
sctx->thread_trace = NULL;
- if (sctx->chip_class >= GFX10)
+ if (sctx->gfx_level >= GFX10)
si_spm_finish(sctx);
}
@@ -753,7 +753,7 @@ si_handle_thread_trace(struct si_context *sctx, struct radeon_cmdbuf *rcs)
if (sctx->ws->fence_wait(sctx->ws, sctx->last_sqtt_fence, PIPE_TIMEOUT_INFINITE) &&
si_get_thread_trace(sctx, &thread_trace)) {
/* Map the SPM counter buffer */
- if (sctx->chip_class >= GFX10)
+ if (sctx->gfx_level >= GFX10)
sctx->spm_trace.ptr = sctx->ws->buffer_map(sctx->ws, sctx->spm_trace.bo,
NULL, PIPE_MAP_READ | RADEON_MAP_TEMPORARY);
@@ -784,7 +784,7 @@ si_emit_thread_trace_userdata(struct si_context* sctx,
/* Without the perfctr bit the CP might not always pass the
* write on correctly. */
- radeon_set_uconfig_reg_seq(R_030D08_SQ_THREAD_TRACE_USERDATA_2, count, sctx->chip_class >= GFX10);
+ radeon_set_uconfig_reg_seq(R_030D08_SQ_THREAD_TRACE_USERDATA_2, count, sctx->gfx_level >= GFX10);
radeon_emit_array(dwords, count);
@@ -800,13 +800,13 @@ si_emit_spi_config_cntl(struct si_context* sctx,
{
radeon_begin(cs);
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
uint32_t spi_config_cntl = S_031100_GPR_WRITE_PRIORITY(0x2c688) |
S_031100_EXP_PRIORITY_ORDER(3) |
S_031100_ENABLE_SQG_TOP_EVENTS(enable) |
S_031100_ENABLE_SQG_BOP_EVENTS(enable);
- if (sctx->chip_class >= GFX10)
+ if (sctx->gfx_level >= GFX10)
spi_config_cntl |= S_031100_PS_PKR_PRIORITY_CNTL(3);
radeon_set_uconfig_reg(R_031100_SPI_CONFIG_CNTL, spi_config_cntl);
diff --git a/src/gallium/drivers/radeonsi/si_state.c b/src/gallium/drivers/radeonsi/si_state.c
index d28a4010fbf..69d3cd859b2 100644
--- a/src/gallium/drivers/radeonsi/si_state.c
+++ b/src/gallium/drivers/radeonsi/si_state.c
@@ -103,7 +103,7 @@ static void si_emit_cb_render_state(struct si_context *sctx)
radeon_opt_set_context_reg(sctx, R_028238_CB_TARGET_MASK, SI_TRACKED_CB_TARGET_MASK,
cb_target_mask);
- if (sctx->chip_class >= GFX8) {
+ if (sctx->gfx_level >= GFX8) {
/* DCC MSAA workaround.
* Alternatively, we can set CB_COLORi_DCC_CONTROL.OVERWRITE_-
* COMBINER_DISABLE, but that would be more complicated.
@@ -112,17 +112,17 @@ static void si_emit_cb_render_state(struct si_context *sctx)
blend->dcc_msaa_corruption_4bit & cb_target_mask && sctx->framebuffer.nr_samples >= 2;
unsigned watermark = sctx->framebuffer.dcc_overwrite_combiner_watermark;
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
radeon_opt_set_context_reg(sctx, R_028424_CB_FDCC_CONTROL, SI_TRACKED_CB_DCC_CONTROL,
S_028424_SAMPLE_MASK_TRACKER_DISABLE(oc_disable) |
S_028424_SAMPLE_MASK_TRACKER_WATERMARK(watermark));
} else {
radeon_opt_set_context_reg(
sctx, R_028424_CB_DCC_CONTROL, SI_TRACKED_CB_DCC_CONTROL,
- S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(sctx->chip_class <= GFX9) |
+ S_028424_OVERWRITE_COMBINER_MRT_SHARING_DISABLE(sctx->gfx_level <= GFX9) |
S_028424_OVERWRITE_COMBINER_WATERMARK(watermark) |
S_028424_OVERWRITE_COMBINER_DISABLE(oc_disable) |
- S_028424_DISABLE_CONSTANT_ENCODE_REG(sctx->chip_class < GFX11 &&
+ S_028424_DISABLE_CONSTANT_ENCODE_REG(sctx->gfx_level < GFX11 &&
sctx->screen->info.has_dcc_constant_encode));
}
}
@@ -151,14 +151,14 @@ static void si_emit_cb_render_state(struct si_context *sctx)
continue;
}
- format = sctx->chip_class >= GFX11 ? G_028C70_FORMAT_GFX11(surf->cb_color_info):
+ format = sctx->gfx_level >= GFX11 ? G_028C70_FORMAT_GFX11(surf->cb_color_info):
G_028C70_FORMAT_GFX6(surf->cb_color_info);
swap = G_028C70_COMP_SWAP(surf->cb_color_info);
spi_format = (spi_shader_col_format >> (i * 4)) & 0xf;
colormask = (cb_target_mask >> (i * 4)) & 0xf;
/* Set if RGB and A are present. */
- has_alpha = !(sctx->chip_class >= GFX11 ? G_028C74_FORCE_DST_ALPHA_1_GFX11(surf->cb_color_attrib):
+ has_alpha = !(sctx->gfx_level >= GFX11 ? G_028C74_FORCE_DST_ALPHA_1_GFX11(surf->cb_color_attrib):
G_028C74_FORCE_DST_ALPHA_1_GFX6(surf->cb_color_attrib));
if (format == V_028C70_COLOR_8 || format == V_028C70_COLOR_16 ||
@@ -298,7 +298,7 @@ static uint32_t si_translate_blend_function(int blend_func)
return 0;
}
-static uint32_t si_translate_blend_factor(enum chip_class chip_class, int blend_fact)
+static uint32_t si_translate_blend_factor(enum amd_gfx_level gfx_level, int blend_fact)
{
switch (blend_fact) {
case PIPE_BLENDFACTOR_ONE:
@@ -314,10 +314,10 @@ static uint32_t si_translate_blend_factor(enum chip_class chip_class, int blend_
case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE:
return V_028780_BLEND_SRC_ALPHA_SATURATE;
case PIPE_BLENDFACTOR_CONST_COLOR:
- return chip_class >= GFX11 ? V_028780_BLEND_CONSTANT_COLOR_GFX11:
+ return gfx_level >= GFX11 ? V_028780_BLEND_CONSTANT_COLOR_GFX11:
V_028780_BLEND_CONSTANT_COLOR_GFX6;
case PIPE_BLENDFACTOR_CONST_ALPHA:
- return chip_class >= GFX11 ? V_028780_BLEND_CONSTANT_ALPHA_GFX11 :
+ return gfx_level >= GFX11 ? V_028780_BLEND_CONSTANT_ALPHA_GFX11 :
V_028780_BLEND_CONSTANT_ALPHA_GFX6;
case PIPE_BLENDFACTOR_ZERO:
return V_028780_BLEND_ZERO;
@@ -330,22 +330,22 @@ static uint32_t si_translate_blend_factor(enum chip_class chip_class, int blend_
case PIPE_BLENDFACTOR_INV_DST_COLOR:
return V_028780_BLEND_ONE_MINUS_DST_COLOR;
case PIPE_BLENDFACTOR_INV_CONST_COLOR:
- return chip_class >= GFX11 ? V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR_GFX11:
+ return gfx_level >= GFX11 ? V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR_GFX11:
V_028780_BLEND_ONE_MINUS_CONSTANT_COLOR_GFX6;
case PIPE_BLENDFACTOR_INV_CONST_ALPHA:
- return chip_class >= GFX11 ? V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA_GFX11:
+ return gfx_level >= GFX11 ? V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA_GFX11:
V_028780_BLEND_ONE_MINUS_CONSTANT_ALPHA_GFX6;
case PIPE_BLENDFACTOR_SRC1_COLOR:
- return chip_class >= GFX11 ? V_028780_BLEND_SRC1_COLOR_GFX11:
+ return gfx_level >= GFX11 ? V_028780_BLEND_SRC1_COLOR_GFX11:
V_028780_BLEND_SRC1_COLOR_GFX6;
case PIPE_BLENDFACTOR_SRC1_ALPHA:
- return chip_class >= GFX11 ? V_028780_BLEND_SRC1_ALPHA_GFX11:
+ return gfx_level >= GFX11 ? V_028780_BLEND_SRC1_ALPHA_GFX11:
V_028780_BLEND_SRC1_ALPHA_GFX6;
case PIPE_BLENDFACTOR_INV_SRC1_COLOR:
- return chip_class >= GFX11 ? V_028780_BLEND_INV_SRC1_COLOR_GFX11:
+ return gfx_level >= GFX11 ? V_028780_BLEND_INV_SRC1_COLOR_GFX11:
V_028780_BLEND_INV_SRC1_COLOR_GFX6;
case PIPE_BLENDFACTOR_INV_SRC1_ALPHA:
- return chip_class >= GFX11 ? V_028780_BLEND_INV_SRC1_ALPHA_GFX11:
+ return gfx_level >= GFX11 ? V_028780_BLEND_INV_SRC1_ALPHA_GFX11:
V_028780_BLEND_INV_SRC1_ALPHA_GFX6;
default:
PRINT_ERR("Bad blend factor %d not supported!\n", blend_fact);
@@ -515,7 +515,7 @@ static void *si_create_blend_state_mode(struct pipe_context *ctx,
/* Only set dual source blending for MRT0 to avoid a hang. */
if (i >= 1 && blend->dual_src_blend) {
if (i == 1) {
- if (sctx->chip_class >= GFX11)
+ if (sctx->gfx_level >= GFX11)
blend_cntl = last_blend_cntl;
else
blend_cntl = S_028780_ENABLE(1);
@@ -588,21 +588,21 @@ static void *si_create_blend_state_mode(struct pipe_context *ctx,
/* Set blend state. */
blend_cntl |= S_028780_ENABLE(1);
blend_cntl |= S_028780_COLOR_COMB_FCN(si_translate_blend_function(eqRGB));
- blend_cntl |= S_028780_COLOR_SRCBLEND(si_translate_blend_factor(sctx->chip_class, srcRGB));
- blend_cntl |= S_028780_COLOR_DESTBLEND(si_translate_blend_factor(sctx->chip_class, dstRGB));
+ blend_cntl |= S_028780_COLOR_SRCBLEND(si_translate_blend_factor(sctx->gfx_level, srcRGB));
+ blend_cntl |= S_028780_COLOR_DESTBLEND(si_translate_blend_factor(sctx->gfx_level, dstRGB));
if (srcA != srcRGB || dstA != dstRGB || eqA != eqRGB) {
blend_cntl |= S_028780_SEPARATE_ALPHA_BLEND(1);
blend_cntl |= S_028780_ALPHA_COMB_FCN(si_translate_blend_function(eqA));
- blend_cntl |= S_028780_ALPHA_SRCBLEND(si_translate_blend_factor(sctx->chip_class, srcA));
- blend_cntl |= S_028780_ALPHA_DESTBLEND(si_translate_blend_factor(sctx->chip_class, dstA));
+ blend_cntl |= S_028780_ALPHA_SRCBLEND(si_translate_blend_factor(sctx->gfx_level, srcA));
+ blend_cntl |= S_028780_ALPHA_DESTBLEND(si_translate_blend_factor(sctx->gfx_level, dstA));
}
si_pm4_set_reg(pm4, R_028780_CB_BLEND0_CONTROL + i * 4, blend_cntl);
last_blend_cntl = blend_cntl;
blend->blend_enable_4bit |= 0xfu << (i * 4);
- if (sctx->chip_class >= GFX8 && sctx->chip_class <= GFX10)
+ if (sctx->gfx_level >= GFX8 && sctx->gfx_level <= GFX10)
blend->dcc_msaa_corruption_4bit |= 0xfu << (i * 4);
/* This is only important for formats without alpha. */
@@ -613,7 +613,7 @@ static void *si_create_blend_state_mode(struct pipe_context *ctx,
blend->need_src_alpha_4bit |= 0xfu << (i * 4);
}
- if (sctx->chip_class >= GFX8 && sctx->chip_class <= GFX10 && logicop_enable)
+ if (sctx->gfx_level >= GFX8 && sctx->gfx_level <= GFX10 && logicop_enable)
blend->dcc_msaa_corruption_4bit |= blend->cb_target_enabled_4bit;
if (blend->cb_target_mask) {
@@ -638,7 +638,7 @@ static void *si_create_blend_state_mode(struct pipe_context *ctx,
/* RB+ doesn't work with dual source blending, logic op, and RESOLVE. */
if (blend->dual_src_blend || logicop_enable || mode == V_028808_CB_RESOLVE ||
- (sctx->chip_class == GFX11 && blend->blend_enable_4bit))
+ (sctx->gfx_level == GFX11 && blend->blend_enable_4bit))
color_control |= S_028808_DISABLE_DUAL_QUAD(1);
}
@@ -861,9 +861,9 @@ static void si_emit_clip_regs(struct si_context *sctx)
clipdist_mask &= rs->clip_plane_enable;
culldist_mask |= clipdist_mask;
- unsigned pa_cl_cntl = S_02881C_BYPASS_VTX_RATE_COMBINER(sctx->chip_class >= GFX10_3 &&
+ unsigned pa_cl_cntl = S_02881C_BYPASS_VTX_RATE_COMBINER(sctx->gfx_level >= GFX10_3 &&
!sctx->screen->options.vrs2x2) |
- S_02881C_BYPASS_PRIM_RATE_COMBINER(sctx->chip_class >= GFX10_3) |
+ S_02881C_BYPASS_PRIM_RATE_COMBINER(sctx->gfx_level >= GFX10_3) |
clipdist_mask | (culldist_mask << 8);
radeon_begin(&sctx->gfx_cs);
@@ -1043,7 +1043,7 @@ static void *si_create_rs_state(struct pipe_context *ctx, const struct pipe_rast
S_028A48_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
S_028A48_MSAA_ENABLE(state->multisample || state->poly_smooth || state->line_smooth) |
S_028A48_VPORT_SCISSOR_ENABLE(1) |
- S_028A48_ALTERNATE_RBS_PER_TILE(sscreen->info.chip_class >= GFX9));
+ S_028A48_ALTERNATE_RBS_PER_TILE(sscreen->info.gfx_level >= GFX9));
bool polygon_mode_enabled =
(state->fill_front != PIPE_POLYGON_MODE_FILL && !(state->cull_face & PIPE_FACE_FRONT)) ||
@@ -1061,7 +1061,7 @@ static void *si_create_rs_state(struct pipe_context *ctx, const struct pipe_rast
S_028814_POLYMODE_FRONT_PTYPE(si_translate_fill(state->fill_front)) |
S_028814_POLYMODE_BACK_PTYPE(si_translate_fill(state->fill_back)) |
/* this must be set if POLY_MODE or PERPENDICULAR_ENDCAP_ENA is set */
- S_028814_KEEP_TOGETHER_ENABLE(sscreen->info.chip_class >= GFX10 ?
+ S_028814_KEEP_TOGETHER_ENABLE(sscreen->info.gfx_level >= GFX10 ?
polygon_mode_enabled ||
rs->perpendicular_end_caps : 0));
@@ -1512,7 +1512,7 @@ static void si_emit_db_render_state(struct si_context *sctx)
S_028000_STENCIL_CLEAR_ENABLE(sctx->db_stencil_clear);
}
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
unsigned max_allowed_tiles_in_wave = 0;
if (sctx->screen->info.has_dedicated_vram) {
@@ -1540,9 +1540,9 @@ static void si_emit_db_render_state(struct si_context *sctx)
/* DB_COUNT_CONTROL (occlusion queries) */
if (sctx->num_occlusion_queries > 0 && !sctx->occlusion_queries_disabled) {
bool perfect = sctx->num_perfect_occlusion_queries > 0;
- bool gfx10_perfect = sctx->chip_class >= GFX10 && perfect;
+ bool gfx10_perfect = sctx->gfx_level >= GFX10 && perfect;
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
unsigned log_sample_rate = sctx->framebuffer.log_samples;
db_count_control = S_028004_PERFECT_ZPASS_COUNTS(perfect) |
@@ -1555,7 +1555,7 @@ static void si_emit_db_render_state(struct si_context *sctx)
}
} else {
/* Disable occlusion queries. */
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
db_count_control = 0;
} else {
db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
@@ -1572,12 +1572,12 @@ static void si_emit_db_render_state(struct si_context *sctx)
S_028010_DISABLE_ZMASK_EXPCLEAR_OPTIMIZATION(sctx->db_depth_disable_expclear) |
S_028010_DISABLE_SMEM_EXPCLEAR_OPTIMIZATION(sctx->db_stencil_disable_expclear) |
S_028010_DECOMPRESS_Z_ON_FLUSH(sctx->framebuffer.nr_samples >= 4) |
- S_028010_CENTROID_COMPUTATION_MODE(sctx->chip_class >= GFX10_3 ? 1 : 0));
+ S_028010_CENTROID_COMPUTATION_MODE(sctx->gfx_level >= GFX10_3 ? 1 : 0));
db_shader_control = sctx->ps_db_shader_control;
/* Bug workaround for smoothing (overrasterization) on GFX6. */
- if (sctx->chip_class == GFX6 && sctx->smoothing_enabled) {
+ if (sctx->gfx_level == GFX6 && sctx->smoothing_enabled) {
db_shader_control &= C_02880C_Z_ORDER;
db_shader_control |= S_02880C_Z_ORDER(V_02880C_LATE_Z);
}
@@ -1592,9 +1592,9 @@ static void si_emit_db_render_state(struct si_context *sctx)
radeon_opt_set_context_reg(sctx, R_02880C_DB_SHADER_CONTROL, SI_TRACKED_DB_SHADER_CONTROL,
db_shader_control);
- if (sctx->chip_class >= GFX10_3) {
+ if (sctx->gfx_level >= GFX10_3) {
if (sctx->allow_flat_shading) {
- if (sctx->chip_class == GFX11) {
+ if (sctx->gfx_level == GFX11) {
radeon_opt_set_context_reg(sctx, R_0283D0_PA_SC_VRS_OVERRIDE_CNTL,
SI_TRACKED_DB_PA_SC_VRS_OVERRIDE_CNTL,
S_0283D0_VRS_OVERRIDE_RATE_COMBINER_MODE(
@@ -1616,7 +1616,7 @@ static void si_emit_db_render_state(struct si_context *sctx)
*
* MIN allows sample shading but not coarse shading.
*/
- if (sctx->chip_class == GFX11) {
+ if (sctx->gfx_level == GFX11) {
unsigned mode = sctx->screen->options.vrs2x2 && G_02880C_KILL_ENABLE(db_shader_control) ?
V_0283D0_SC_VRS_COMB_MODE_MIN : V_0283D0_SC_VRS_COMB_MODE_PASSTHRU;
@@ -1642,7 +1642,7 @@ static void si_emit_db_render_state(struct si_context *sctx)
/*
* format translation
*/
-uint32_t si_translate_colorformat(enum chip_class chip_class,
+uint32_t si_translate_colorformat(enum amd_gfx_level gfx_level,
enum pipe_format format)
{
const struct util_format_description *desc = util_format_description(format);
@@ -1656,7 +1656,7 @@ uint32_t si_translate_colorformat(enum chip_class chip_class,
if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
return V_028C70_COLOR_10_11_11;
- if (chip_class >= GFX10_3 &&
+ if (gfx_level >= GFX10_3 &&
format == PIPE_FORMAT_R9G9B9E5_FLOAT) /* isn't plain */
return V_028C70_COLOR_5_9_9_9;
@@ -1813,7 +1813,7 @@ static uint32_t si_translate_texformat(struct pipe_screen *screen, enum pipe_for
bool uniform = true;
int i;
- assert(sscreen->info.chip_class <= GFX9);
+ assert(sscreen->info.gfx_level <= GFX9);
/* Colorspace (return non-RGB formats directly). */
switch (desc->colorspace) {
@@ -1829,7 +1829,7 @@ static uint32_t si_translate_texformat(struct pipe_screen *screen, enum pipe_for
* gathers in stencil sampling. This affects at least
* GL45-CTS.texture_cube_map_array.sampling on GFX8.
*/
- if (sscreen->info.chip_class <= GFX8)
+ if (sscreen->info.gfx_level <= GFX8)
return V_008F14_IMG_DATA_FORMAT_8_8_8_8;
if (format == PIPE_FORMAT_X24S8_UINT)
@@ -2172,7 +2172,7 @@ static unsigned si_tex_dim(struct si_screen *sscreen, struct si_texture *tex, un
/* GFX9 allocates 1D textures as 2D. */
if ((res_target == PIPE_TEXTURE_1D || res_target == PIPE_TEXTURE_1D_ARRAY) &&
- sscreen->info.chip_class == GFX9 &&
+ sscreen->info.gfx_level == GFX9 &&
tex->surface.u.gfx9.resource_type == RADEON_RESOURCE_2D) {
if (res_target == PIPE_TEXTURE_1D)
res_target = PIPE_TEXTURE_2D;
@@ -2216,7 +2216,7 @@ static bool si_is_sampler_format_supported(struct pipe_screen *screen, enum pipe
desc->channel[0].size == 64)
return false;
- if (sscreen->info.chip_class >= GFX10) {
+ if (sscreen->info.gfx_level >= GFX10) {
const struct gfx10_format *fmt = &ac_get_gfx10_format_table(&sscreen->info)[format];
if (!fmt->img_format || fmt->buffers_only)
return false;
@@ -2233,7 +2233,7 @@ static uint32_t si_translate_buffer_dataformat(struct pipe_screen *screen,
{
int i;
- assert(((struct si_screen *)screen)->info.chip_class <= GFX9);
+ assert(((struct si_screen *)screen)->info.gfx_level <= GFX9);
if (desc->format == PIPE_FORMAT_R11G11B10_FLOAT)
return V_008F0C_BUF_DATA_FORMAT_10_11_11;
@@ -2307,7 +2307,7 @@ static uint32_t si_translate_buffer_numformat(struct pipe_screen *screen,
const struct util_format_description *desc,
int first_non_void)
{
- assert(((struct si_screen *)screen)->info.chip_class <= GFX9);
+ assert(((struct si_screen *)screen)->info.gfx_level <= GFX9);
if (desc->format == PIPE_FORMAT_R11G11B10_FLOAT)
return V_008F0C_BUF_NUM_FORMAT_FLOAT;
@@ -2369,7 +2369,7 @@ static unsigned si_is_vertex_format_supported(struct pipe_screen *screen, enum p
}
}
- if (sscreen->info.chip_class >= GFX10) {
+ if (sscreen->info.gfx_level >= GFX10) {
const struct gfx10_format *fmt = &ac_get_gfx10_format_table(&sscreen->info)[format];
if (!fmt->img_format || fmt->img_format >= 128)
return 0;
@@ -2384,11 +2384,11 @@ static unsigned si_is_vertex_format_supported(struct pipe_screen *screen, enum p
return usage;
}
-static bool si_is_colorbuffer_format_supported(enum chip_class chip_class,
+static bool si_is_colorbuffer_format_supported(enum amd_gfx_level gfx_level,
enum pipe_format format)
{
- return si_translate_colorformat(chip_class, format) != V_028C70_COLOR_INVALID &&
- si_translate_colorswap(chip_class, format, false) != ~0U;
+ return si_translate_colorformat(gfx_level, format) != V_028C70_COLOR_INVALID &&
+ si_translate_colorswap(gfx_level, format, false) != ~0U;
}
static bool si_is_zs_format_supported(enum pipe_format format)
@@ -2456,7 +2456,7 @@ static bool si_is_format_supported(struct pipe_screen *screen, enum pipe_format
/* Gfx11: BGRA doesn't work with samples >= 4. Only allow R/0/1 to be the first
* component for simplicity.
*/
- if (sscreen->info.chip_class >= GFX11 &&
+ if (sscreen->info.gfx_level >= GFX11 &&
!util_format_is_depth_or_stencil(format) &&
util_format_description(format)->swizzle[0] != PIPE_SWIZZLE_X &&
util_format_description(format)->swizzle[0] != PIPE_SWIZZLE_0 &&
@@ -2476,7 +2476,7 @@ static bool si_is_format_supported(struct pipe_screen *screen, enum pipe_format
if ((usage & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT |
PIPE_BIND_SHARED | PIPE_BIND_BLENDABLE)) &&
- si_is_colorbuffer_format_supported(sscreen->info.chip_class, format)) {
+ si_is_colorbuffer_format_supported(sscreen->info.gfx_level, format)) {
retval |= usage & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DISPLAY_TARGET | PIPE_BIND_SCANOUT |
PIPE_BIND_SHARED);
if (!util_format_is_pure_integer(format) && !util_format_is_depth_or_stencil(format))
@@ -2560,12 +2560,12 @@ static void si_initialize_color_surface(struct si_context *sctx, struct si_surfa
}
}
- format = si_translate_colorformat(sctx->chip_class, surf->base.format);
+ format = si_translate_colorformat(sctx->gfx_level, surf->base.format);
if (format == V_028C70_COLOR_INVALID) {
PRINT_ERR("Invalid CB format: %d, disabling CB.\n", surf->base.format);
}
assert(format != V_028C70_COLOR_INVALID);
- swap = si_translate_colorswap(sctx->chip_class, surf->base.format, false);
+ swap = si_translate_colorswap(sctx->gfx_level, surf->base.format, false);
endian = si_colorformat_endian_swap(format);
/* blend clamp should be set for all NORM/SRGB types */
@@ -2598,7 +2598,7 @@ static void si_initialize_color_surface(struct si_context *sctx, struct si_surfa
format != V_028C70_COLOR_24_8) |
S_028C70_NUMBER_TYPE(ntype);
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
assert(!SI_BIG_ENDIAN);
color_info |= S_028C70_FORMAT_GFX11(format);
} else {
@@ -2606,7 +2606,7 @@ static void si_initialize_color_surface(struct si_context *sctx, struct si_surfa
}
/* Intensity is implemented as Red, so treat it that way. */
- color_attrib = sctx->chip_class >= GFX11 ?
+ color_attrib = sctx->gfx_level >= GFX11 ?
S_028C74_FORCE_DST_ALPHA_1_GFX11(desc->swizzle[3] == PIPE_SWIZZLE_1 || util_format_is_intensity(surf->base.format)):
S_028C74_FORCE_DST_ALPHA_1_GFX6(desc->swizzle[3] == PIPE_SWIZZLE_1 || util_format_is_intensity(surf->base.format));
@@ -2614,7 +2614,7 @@ static void si_initialize_color_surface(struct si_context *sctx, struct si_surfa
unsigned log_samples = util_logbase2(tex->buffer.b.b.nr_samples);
unsigned log_fragments = util_logbase2(tex->buffer.b.b.nr_storage_samples);
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
color_attrib |= S_028C74_NUM_FRAGMENTS_GFX11(log_fragments);
} else {
color_attrib |= S_028C74_NUM_SAMPLES(log_samples) | S_028C74_NUM_FRAGMENTS_GFX6(log_fragments);
@@ -2623,7 +2623,7 @@ static void si_initialize_color_surface(struct si_context *sctx, struct si_surfa
color_info |= S_028C70_COMPRESSION(1);
unsigned fmask_bankh = util_logbase2(tex->surface.u.legacy.color.fmask.bankh);
- if (sctx->chip_class == GFX6) {
+ if (sctx->gfx_level == GFX6) {
/* due to a hw bug, FMASK_BANK_HEIGHT must be set on GFX6 too */
color_attrib |= S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
}
@@ -2639,16 +2639,16 @@ static void si_initialize_color_surface(struct si_context *sctx, struct si_surfa
if (!sctx->screen->info.has_dedicated_vram)
min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_64B;
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
surf->cb_dcc_control = S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(V_028C78_MAX_BLOCK_SIZE_256B) |
S_028C78_MAX_COMPRESSED_BLOCK_SIZE(tex->surface.u.gfx9.color.dcc.max_compressed_block_size) |
S_028C78_MIN_COMPRESSED_BLOCK_SIZE(min_compressed_block_size) |
S_028C78_INDEPENDENT_64B_BLOCKS(tex->surface.u.gfx9.color.dcc.independent_64B_blocks);
- if (sctx->chip_class >= GFX11)
+ if (sctx->gfx_level >= GFX11)
surf->cb_dcc_control |= S_028C78_INDEPENDENT_128B_BLOCKS_GFX11(tex->surface.u.gfx9.color.dcc.independent_128B_blocks);
else
surf->cb_dcc_control |= S_028C78_INDEPENDENT_128B_BLOCKS_GFX10(tex->surface.u.gfx9.color.dcc.independent_128B_blocks);
- } else if (sctx->chip_class >= GFX8) {
+ } else if (sctx->gfx_level >= GFX8) {
unsigned max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_256B;
if (tex->buffer.b.b.nr_storage_samples > 1) {
@@ -2664,7 +2664,7 @@ static void si_initialize_color_surface(struct si_context *sctx, struct si_surfa
}
/* This must be set for fast clear to work without FMASK. */
- if (!tex->surface.fmask_size && sctx->chip_class == GFX6) {
+ if (!tex->surface.fmask_size && sctx->gfx_level == GFX6) {
unsigned bankh = util_logbase2(tex->surface.u.legacy.bankh);
color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
}
@@ -2676,19 +2676,19 @@ static void si_initialize_color_surface(struct si_context *sctx, struct si_surfa
unsigned mip0_height = surf->height0 - 1;
unsigned mip0_depth = util_max_layer(&tex->buffer.b.b, 0);
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
color_view |= S_028C6C_MIP_LEVEL_GFX10(surf->base.u.tex.level);
surf->cb_color_attrib3 = S_028EE0_MIP0_DEPTH(mip0_depth) |
S_028EE0_RESOURCE_TYPE(tex->surface.u.gfx9.resource_type) |
- S_028EE0_RESOURCE_LEVEL(sctx->chip_class >= GFX11 ? 0 : 1);
- } else if (sctx->chip_class == GFX9) {
+ S_028EE0_RESOURCE_LEVEL(sctx->gfx_level >= GFX11 ? 0 : 1);
+ } else if (sctx->gfx_level == GFX9) {
color_view |= S_028C6C_MIP_LEVEL_GFX9(surf->base.u.tex.level);
color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
S_028C74_RESOURCE_TYPE(tex->surface.u.gfx9.resource_type);
}
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
surf->cb_color_attrib2 = S_028C68_MIP0_WIDTH(mip0_width) |
S_028C68_MIP0_HEIGHT(mip0_height) |
S_028C68_MAX_MIP(tex->buffer.b.b.last_level);
@@ -2723,12 +2723,12 @@ static void si_init_depth_surface(struct si_context *sctx, struct si_surface *su
surf->db_htile_data_base = 0;
surf->db_htile_surface = 0;
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
surf->db_depth_view |= S_028008_SLICE_START_HI(surf->base.u.tex.first_layer >> 11) |
S_028008_SLICE_MAX_HI(surf->base.u.tex.last_layer >> 11);
}
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
assert(tex->surface.u.gfx9.surf_offset == 0);
surf->db_depth_base = tex->buffer.gpu_address >> 8;
surf->db_stencil_base = (tex->buffer.gpu_address + tex->surface.u.gfx9.zs.stencil_offset) >> 8;
@@ -2736,12 +2736,12 @@ static void si_init_depth_surface(struct si_context *sctx, struct si_surface *su
S_028038_NUM_SAMPLES(util_logbase2(tex->buffer.b.b.nr_samples)) |
S_028038_SW_MODE(tex->surface.u.gfx9.swizzle_mode) |
S_028038_MAXMIP(tex->buffer.b.b.last_level) |
- S_028040_ITERATE_256(sctx->chip_class >= GFX11);
+ S_028040_ITERATE_256(sctx->gfx_level >= GFX11);
s_info = S_02803C_FORMAT(stencil_format) |
S_02803C_SW_MODE(tex->surface.u.gfx9.zs.stencil_swizzle_mode) |
- S_028044_ITERATE_256(sctx->chip_class >= GFX11);
+ S_028044_ITERATE_256(sctx->gfx_level >= GFX11);
- if (sctx->chip_class == GFX9) {
+ if (sctx->gfx_level == GFX9) {
surf->db_z_info2 = S_028068_EPITCH(tex->surface.u.gfx9.epitch);
surf->db_stencil_info2 = S_02806C_EPITCH(tex->surface.u.gfx9.zs.stencil_epitch);
}
@@ -2763,7 +2763,7 @@ static void si_init_depth_surface(struct si_context *sctx, struct si_surface *su
surf->db_htile_data_base = (tex->buffer.gpu_address + tex->surface.meta_offset) >> 8;
surf->db_htile_surface =
S_028ABC_FULL_CACHE(1) | S_028ABC_PIPE_ALIGNED(1);
- if (sctx->chip_class == GFX9) {
+ if (sctx->gfx_level == GFX9) {
surf->db_htile_surface |= S_028ABC_RB_ALIGNED(1);
}
}
@@ -2783,7 +2783,7 @@ static void si_init_depth_surface(struct si_context *sctx, struct si_surface *su
s_info = S_028044_FORMAT(stencil_format);
surf->db_depth_info = 0;
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
struct radeon_info *info = &sctx->screen->info;
unsigned index = tex->surface.u.legacy.tiling_index[level];
unsigned stencil_index = tex->surface.u.legacy.zs.stencil_tiling_index[level];
@@ -3006,7 +3006,7 @@ static void si_set_framebuffer_state(struct pipe_context *ctx,
*/
if (sctx->generate_mipmap_for_depth) {
si_make_DB_shader_coherent(sctx, 1, false, sctx->framebuffer.DB_has_shader_readable_metadata);
- } else if (sctx->chip_class == GFX9) {
+ } else if (sctx->gfx_level == GFX9) {
/* It appears that DB metadata "leaks" in a sequence of:
* - depth clear
* - DCC decompress for shader image writes (with DB disabled)
@@ -3090,7 +3090,7 @@ static void si_set_framebuffer_state(struct pipe_context *ctx,
if (vi_dcc_enabled(tex, surf->base.u.tex.level)) {
sctx->framebuffer.CB_has_shader_readable_metadata = true;
- if (sctx->chip_class >= GFX9 && !tex->surface.u.gfx9.color.dcc.pipe_aligned)
+ if (sctx->gfx_level >= GFX9 && !tex->surface.u.gfx9.color.dcc.pipe_aligned)
sctx->framebuffer.all_DCC_pipe_aligned = false;
if (tex->buffer.b.b.nr_storage_samples >= 2)
@@ -3108,7 +3108,7 @@ static void si_set_framebuffer_state(struct pipe_context *ctx,
}
/* For optimal DCC performance. */
- if (sctx->chip_class >= GFX10)
+ if (sctx->gfx_level >= GFX10)
sctx->framebuffer.dcc_overwrite_combiner_watermark = 6;
else
sctx->framebuffer.dcc_overwrite_combiner_watermark = 4;
@@ -3235,7 +3235,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
cb = (struct si_surface *)state->cbufs[i];
if (!cb) {
radeon_set_context_reg(R_028C70_CB_COLOR0_INFO + i * 0x3C,
- sctx->chip_class >= GFX11 ?
+ sctx->gfx_level >= GFX11 ?
S_028C70_FORMAT_GFX11(V_028C70_COLOR_INVALID) :
S_028C70_FORMAT_GFX6(V_028C70_COLOR_INVALID));
continue;
@@ -3274,7 +3274,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
cb_color_info |= S_028C70_COMP_SWAP(swap);
}
- if (sctx->chip_class < GFX11 && cb->base.u.tex.level > 0)
+ if (sctx->gfx_level < GFX11 && cb->base.u.tex.level > 0)
cb_color_info &= C_028C70_FAST_CLEAR;
if (tex->surface.fmask_offset) {
@@ -3289,9 +3289,9 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
state->cbufs[1]->texture->nr_samples <= 1;
/* CB can't do MSAA resolve on gfx11. */
- assert(!is_msaa_resolve_dst || sctx->chip_class < GFX11);
+ assert(!is_msaa_resolve_dst || sctx->gfx_level < GFX11);
- if (!is_msaa_resolve_dst && sctx->chip_class < GFX11)
+ if (!is_msaa_resolve_dst && sctx->gfx_level < GFX11)
cb_color_info |= S_028C70_DCC_ENABLE(1);
cb_dcc_base = (tex->buffer.gpu_address + tex->surface.meta_offset) >> 8;
@@ -3301,7 +3301,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
cb_dcc_base |= dcc_tile_swizzle;
}
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
unsigned cb_color_attrib3, cb_fdcc_control;
/* Set mutable surface parameters. */
@@ -3327,7 +3327,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
radeon_set_context_reg(R_028EA0_CB_COLOR0_DCC_BASE_EXT + i * 4, cb_dcc_base >> 32);
radeon_set_context_reg(R_028EC0_CB_COLOR0_ATTRIB2 + i * 4, cb->cb_color_attrib2);
radeon_set_context_reg(R_028EE0_CB_COLOR0_ATTRIB3 + i * 4, cb_color_attrib3);
- } else if (sctx->chip_class >= GFX10) {
+ } else if (sctx->gfx_level >= GFX10) {
unsigned cb_color_attrib3;
/* Set mutable surface parameters. */
@@ -3368,7 +3368,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
radeon_set_context_reg(R_028EA0_CB_COLOR0_DCC_BASE_EXT + i * 4, cb_dcc_base >> 32);
radeon_set_context_reg(R_028EC0_CB_COLOR0_ATTRIB2 + i * 4, cb->cb_color_attrib2);
radeon_set_context_reg(R_028EE0_CB_COLOR0_ATTRIB3 + i * 4, cb_color_attrib3);
- } else if (sctx->chip_class == GFX9) {
+ } else if (sctx->gfx_level == GFX9) {
struct gfx9_surf_meta_flags meta = {
.rb_aligned = 1,
.pipe_aligned = 1,
@@ -3436,7 +3436,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
if (tex->surface.fmask_offset) {
- if (sctx->chip_class >= GFX7)
+ if (sctx->gfx_level >= GFX7)
cb_color_pitch |=
S_028C64_FMASK_TILE_MAX(tex->surface.u.legacy.color.fmask.pitch_in_pixels / 8 - 1);
cb_color_attrib |=
@@ -3444,14 +3444,14 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
cb_color_fmask_slice = S_028C88_TILE_MAX(tex->surface.u.legacy.color.fmask.slice_tile_max);
} else {
/* This must be set for fast clear to work without FMASK. */
- if (sctx->chip_class >= GFX7)
+ if (sctx->gfx_level >= GFX7)
cb_color_pitch |= S_028C64_FMASK_TILE_MAX(pitch_tile_max);
cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(tile_mode_index);
cb_color_fmask_slice = S_028C88_TILE_MAX(slice_tile_max);
}
radeon_set_context_reg_seq(R_028C60_CB_COLOR0_BASE + i * 0x3C,
- sctx->chip_class >= GFX8 ? 14 : 13);
+ sctx->gfx_level >= GFX8 ? 14 : 13);
radeon_emit(cb_color_base); /* CB_COLOR0_BASE */
radeon_emit(cb_color_pitch); /* CB_COLOR0_PITCH */
radeon_emit(cb_color_slice); /* CB_COLOR0_SLICE */
@@ -3466,7 +3466,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
radeon_emit(tex->color_clear_value[0]); /* CB_COLOR0_CLEAR_WORD0 */
radeon_emit(tex->color_clear_value[1]); /* CB_COLOR0_CLEAR_WORD1 */
- if (sctx->chip_class >= GFX8) /* R_028C94_CB_COLOR0_DCC_BASE */
+ if (sctx->gfx_level >= GFX8) /* R_028C94_CB_COLOR0_DCC_BASE */
radeon_emit(cb_dcc_base);
}
}
@@ -3488,13 +3488,13 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
bool tc_compat_htile = vi_tc_compat_htile_enabled(tex, zb->base.u.tex.level, PIPE_MASK_ZS);
/* Set fields dependent on tc_compatile_htile. */
- if (sctx->chip_class >= GFX9 && tc_compat_htile) {
+ if (sctx->gfx_level >= GFX9 && tc_compat_htile) {
unsigned max_zplanes = 4;
if (tex->db_render_format == PIPE_FORMAT_Z16_UNORM && tex->buffer.b.b.nr_samples > 1)
max_zplanes = 2;
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
bool iterate256 = tex->buffer.b.b.nr_samples >= 2;
db_z_info |= S_028040_ITERATE_FLUSH(1) |
S_028040_ITERATE_256(iterate256);
@@ -3516,11 +3516,11 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
unsigned level = zb->base.u.tex.level;
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
radeon_set_context_reg(R_028014_DB_HTILE_DATA_BASE, zb->db_htile_data_base);
radeon_set_context_reg(R_02801C_DB_DEPTH_SIZE_XY, zb->db_depth_size);
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
radeon_set_context_reg_seq(R_028040_DB_Z_INFO, 6);
} else {
radeon_set_context_reg_seq(R_02803C_DB_DEPTH_INFO, 7);
@@ -3540,7 +3540,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
radeon_emit(zb->db_depth_base >> 32); /* DB_Z_WRITE_BASE_HI */
radeon_emit(zb->db_stencil_base >> 32); /* DB_STENCIL_WRITE_BASE_HI */
radeon_emit(zb->db_htile_data_base >> 32); /* DB_HTILE_DATA_BASE_HI */
- } else if (sctx->chip_class == GFX9) {
+ } else if (sctx->gfx_level == GFX9) {
radeon_set_context_reg_seq(R_028014_DB_HTILE_DATA_BASE, 3);
radeon_emit(zb->db_htile_data_base); /* DB_HTILE_DATA_BASE */
radeon_emit(S_028018_BASE_HI(zb->db_htile_data_base >> 32)); /* DB_HTILE_DATA_BASE_HI */
@@ -3602,7 +3602,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
radeon_set_context_reg(R_028008_DB_DEPTH_VIEW, zb->db_depth_view);
radeon_set_context_reg(R_028ABC_DB_HTILE_SURFACE, db_htile_surface);
} else if (sctx->framebuffer.dirty_zsbuf) {
- if (sctx->chip_class == GFX9)
+ if (sctx->gfx_level == GFX9)
radeon_set_context_reg_seq(R_028038_DB_Z_INFO, 2);
else
radeon_set_context_reg_seq(R_028040_DB_Z_INFO, 2);
@@ -3611,7 +3611,7 @@ static void si_emit_framebuffer_state(struct si_context *sctx)
* It affects VRS and occlusion queries if depth and stencil are not bound.
*/
radeon_emit(S_028040_FORMAT(V_028040_Z_INVALID) | /* DB_Z_INFO */
- S_028040_NUM_SAMPLES(sctx->chip_class == GFX11 ? sctx->framebuffer.log_samples : 0));
+ S_028040_NUM_SAMPLES(sctx->gfx_level == GFX11 ? sctx->framebuffer.log_samples : 0));
radeon_emit(S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
}
@@ -3651,7 +3651,7 @@ static void si_emit_msaa_sample_locs(struct si_context *sctx)
* GFX10 uses sample locations unconditionally, so they always need
* to be set up.
*/
- if ((nr_samples >= 2 || has_msaa_sample_loc_bug || sctx->chip_class >= GFX10) &&
+ if ((nr_samples >= 2 || has_msaa_sample_loc_bug || sctx->gfx_level >= GFX10) &&
nr_samples != sctx->sample_locs_num_samples) {
sctx->sample_locs_num_samples = nr_samples;
si_emit_sample_locations(cs, nr_samples);
@@ -3686,7 +3686,7 @@ static void si_emit_msaa_sample_locs(struct si_context *sctx)
/* The exclusion bits can be set to improve rasterization efficiency
* if no sample lies on the pixel boundary (-8 sample offset).
*/
- bool exclusion = sctx->chip_class >= GFX7 && (!rs->multisample_enable || nr_samples != 16);
+ bool exclusion = sctx->gfx_level >= GFX7 && (!rs->multisample_enable || nr_samples != 16);
radeon_opt_set_context_reg(
sctx, R_02882C_PA_SU_PRIM_FILTER_CNTL, SI_TRACKED_PA_SU_PRIM_FILTER_CNTL,
S_02882C_XMAX_RIGHT_EXCLUSION(exclusion) | S_02882C_YMAX_BOTTOM_EXCLUSION(exclusion));
@@ -3849,11 +3849,11 @@ static void si_emit_msaa_config(struct si_context *sctx)
S_028BDC_PERPENDICULAR_ENDCAP_ENA(rs->perpendicular_end_caps) |
S_028BDC_EXTRA_DX_DY_PRECISION(rs->perpendicular_end_caps &&
(sctx->family == CHIP_VEGA20 ||
- sctx->chip_class >= GFX10));
+ sctx->gfx_level >= GFX10));
sc_aa_config = S_028BE0_MSAA_NUM_SAMPLES(log_samples) |
S_028BE0_MAX_SAMPLE_DIST(max_dist[log_samples]) |
S_028BE0_MSAA_EXPOSED_SAMPLES(log_samples) |
- S_028BE0_COVERED_CENTROID_IS_CENTER(sctx->chip_class >= GFX10_3);
+ S_028BE0_COVERED_CENTROID_IS_CENTER(sctx->gfx_level >= GFX10_3);
if (sctx->framebuffer.nr_samples > 1) {
db_eqaa |= S_028804_MAX_ANCHOR_SAMPLES(log_z_samples) |
@@ -3951,7 +3951,7 @@ void si_make_buffer_descriptor(struct si_screen *screen, struct si_resource *buf
* - For VMEM and inst.IDXEN == 0 or STRIDE == 0, it's in byte units.
* - For VMEM and inst.IDXEN == 1 and STRIDE != 0, it's in units of STRIDE.
*/
- if (screen->info.chip_class == GFX8)
+ if (screen->info.gfx_level == GFX8)
num_records *= stride;
state[4] = 0;
@@ -3962,7 +3962,7 @@ void si_make_buffer_descriptor(struct si_screen *screen, struct si_resource *buf
S_008F0C_DST_SEL_Z(si_map_swizzle(desc->swizzle[2])) |
S_008F0C_DST_SEL_W(si_map_swizzle(desc->swizzle[3]));
- if (screen->info.chip_class >= GFX10) {
+ if (screen->info.gfx_level >= GFX10) {
const struct gfx10_format *fmt = &ac_get_gfx10_format_table(&screen->info)[format];
/* OOB_SELECT chooses the out-of-bounds check:
@@ -3974,7 +3974,7 @@ void si_make_buffer_descriptor(struct si_screen *screen, struct si_resource *buf
*/
state[7] |= S_008F0C_FORMAT(fmt->img_format) |
S_008F0C_OOB_SELECT(V_008F0C_OOB_SELECT_STRUCTURED_WITH_OFFSET) |
- S_008F0C_RESOURCE_LEVEL(screen->info.chip_class < GFX11);
+ S_008F0C_RESOURCE_LEVEL(screen->info.gfx_level < GFX11);
} else {
int first_non_void;
unsigned num_format, data_format;
@@ -4063,7 +4063,7 @@ static void gfx10_make_texture_descriptor(
}
if (tex->upgraded_depth && !is_stencil) {
- if (screen->info.chip_class >= GFX11) {
+ if (screen->info.gfx_level >= GFX11) {
assert(img_format == V_008F0C_GFX11_FORMAT_32_FLOAT);
img_format = V_008F0C_GFX11_FORMAT_32_FLOAT_CLAMP;
} else {
@@ -4096,7 +4096,7 @@ static void gfx10_make_texture_descriptor(
state[0] = 0;
state[1] = S_00A004_FORMAT(img_format) | S_00A004_WIDTH_LO(width - 1);
state[2] = S_00A008_WIDTH_HI((width - 1) >> 2) | S_00A008_HEIGHT(height - 1) |
- S_00A008_RESOURCE_LEVEL(screen->info.chip_class < GFX11);
+ S_00A008_RESOURCE_LEVEL(screen->info.gfx_level < GFX11);
state[3] =
S_00A00C_DST_SEL_X(si_map_swizzle(swizzle[0])) |
@@ -4118,7 +4118,7 @@ static void gfx10_make_texture_descriptor(
unsigned max_mip = res->nr_samples > 1 ? util_logbase2(res->nr_samples) :
tex->buffer.b.b.last_level;
- if (screen->info.chip_class >= GFX11) {
+ if (screen->info.gfx_level >= GFX11) {
state[1] |= S_00A004_MAX_MIP(max_mip);
} else {
state[5] |= S_00A014_MAX_MIP(max_mip);
@@ -4240,7 +4240,7 @@ static void si_make_texture_descriptor(struct si_screen *screen, struct si_textu
* fix texture gathers. This affects at least
* GL45-CTS.texture_cube_map_array.sampling on GFX8.
*/
- if (screen->info.chip_class <= GFX8)
+ if (screen->info.gfx_level <= GFX8)
util_format_compose_swizzles(swizzle_wwww, state_swizzle, swizzle);
else
util_format_compose_swizzles(swizzle_yyyy, state_swizzle, swizzle);
@@ -4326,11 +4326,11 @@ static void si_make_texture_descriptor(struct si_screen *screen, struct si_textu
}
/* S8 with Z32 HTILE needs a special format. */
- if (screen->info.chip_class == GFX9 && pipe_format == PIPE_FORMAT_S8_UINT)
+ if (screen->info.gfx_level == GFX9 && pipe_format == PIPE_FORMAT_S8_UINT)
data_format = V_008F14_IMG_DATA_FORMAT_S8_32;
if (!sampler && (res->target == PIPE_TEXTURE_CUBE || res->target == PIPE_TEXTURE_CUBE_ARRAY ||
- (screen->info.chip_class <= GFX8 && res->target == PIPE_TEXTURE_3D))) {
+ (screen->info.gfx_level <= GFX8 && res->target == PIPE_TEXTURE_3D))) {
/* For the purpose of shader images, treat cube maps and 3D
* textures as 2D arrays. For 3D textures, the address
* calculations for mipmaps are different, so we rely on the
@@ -4367,7 +4367,7 @@ static void si_make_texture_descriptor(struct si_screen *screen, struct si_textu
state[6] = 0;
state[7] = 0;
- if (screen->info.chip_class == GFX9) {
+ if (screen->info.gfx_level == GFX9) {
unsigned bc_swizzle = gfx9_border_color_swizzle(desc->swizzle);
/* Depth is the the last accessible layer on Gfx9.
@@ -4393,7 +4393,7 @@ static void si_make_texture_descriptor(struct si_screen *screen, struct si_textu
/* The last dword is unused by hw. The shader uses it to clear
* bits in the first dword of sampler state.
*/
- if (screen->info.chip_class <= GFX7 && res->nr_samples <= 1) {
+ if (screen->info.gfx_level <= GFX7 && res->nr_samples <= 1) {
if (first_level == last_level)
state[7] = C_008F30_MAX_ANISO_RATIO;
else
@@ -4408,7 +4408,7 @@ static void si_make_texture_descriptor(struct si_screen *screen, struct si_textu
va = tex->buffer.gpu_address + tex->surface.fmask_offset;
#define FMASK(s, f) (((unsigned)(MAX2(1, s)) * 16) + (MAX2(1, f)))
- if (screen->info.chip_class == GFX9) {
+ if (screen->info.gfx_level == GFX9) {
data_format = V_008F14_IMG_DATA_FORMAT_FMASK;
switch (FMASK(res->nr_samples, res->nr_storage_samples)) {
case FMASK(2, 1):
@@ -4514,7 +4514,7 @@ static void si_make_texture_descriptor(struct si_screen *screen, struct si_textu
fmask_state[6] = 0;
fmask_state[7] = 0;
- if (screen->info.chip_class == GFX9) {
+ if (screen->info.gfx_level == GFX9) {
fmask_state[3] |= S_008F1C_SW_MODE(tex->surface.u.gfx9.color.fmask_swizzle_mode);
fmask_state[4] |=
S_008F20_DEPTH(last_layer) | S_008F20_PITCH(tex->surface.u.gfx9.color.fmask_epitch);
@@ -4713,7 +4713,7 @@ static uint32_t si_translate_border_color(struct si_context *sctx,
sctx->border_color_count++;
}
- return (sctx->screen->info.chip_class >= GFX11 ? S_008F3C_BORDER_COLOR_PTR_GFX11(i):
+ return (sctx->screen->info.gfx_level >= GFX11 ? S_008F3C_BORDER_COLOR_PTR_GFX11(i):
S_008F3C_BORDER_COLOR_PTR_GFX6(i)) |
S_008F3C_BORDER_COLOR_TYPE(V_008F3C_SQ_TEX_BORDER_COLOR_REGISTER);
}
@@ -4785,7 +4785,7 @@ static void *si_create_sampler_state(struct pipe_context *ctx,
S_008F30_ANISO_THRESHOLD(max_aniso_ratio >> 1) | S_008F30_ANISO_BIAS(max_aniso_ratio) |
S_008F30_DISABLE_CUBE_WRAP(!state->seamless_cube_map) |
S_008F30_TRUNC_COORD(trunc_coord) |
- S_008F30_COMPAT_MODE(sctx->chip_class == GFX8 || sctx->chip_class == GFX9));
+ S_008F30_COMPAT_MODE(sctx->gfx_level == GFX8 || sctx->gfx_level == GFX9));
rstate->val[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(state->min_lod, 0, 15), 8)) |
S_008F34_MAX_LOD(S_FIXED(CLAMP(state->max_lod, 0, 15), 8)) |
S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0));
@@ -4796,12 +4796,12 @@ static void *si_create_sampler_state(struct pipe_context *ctx,
rstate->val[3] = si_translate_border_color(sctx, state, &state->border_color,
state->border_color_is_integer);
- if (sscreen->info.chip_class >= GFX10) {
+ if (sscreen->info.gfx_level >= GFX10) {
rstate->val[2] |= S_008F38_ANISO_OVERRIDE_GFX10(1);
} else {
- rstate->val[2] |= S_008F38_DISABLE_LSB_CEIL(sctx->chip_class <= GFX8) |
+ rstate->val[2] |= S_008F38_DISABLE_LSB_CEIL(sctx->gfx_level <= GFX8) |
S_008F38_FILTER_PREC_FIX(1) |
- S_008F38_ANISO_OVERRIDE_GFX8(sctx->chip_class >= GFX8);
+ S_008F38_ANISO_OVERRIDE_GFX8(sctx->gfx_level >= GFX8);
}
/* Create sampler resource for upgraded depth textures. */
@@ -4814,7 +4814,7 @@ static void *si_create_sampler_state(struct pipe_context *ctx,
}
if (memcmp(&state->border_color, &clamped_border_color, sizeof(clamped_border_color)) == 0) {
- if (sscreen->info.chip_class <= GFX9)
+ if (sscreen->info.gfx_level <= GFX9)
rstate->upgraded_depth_val[3] |= S_008F3C_UPGRADED_DEPTH(1);
} else {
rstate->upgraded_depth_val[3] =
@@ -4995,7 +4995,7 @@ static void *si_create_vertex_elements(struct pipe_context *ctx, unsigned count,
* unsigned, so a shader workaround is needed. The affected
* chips are GFX8 and older except Stoney (GFX8.1).
*/
- always_fix = sscreen->info.chip_class <= GFX8 && sscreen->info.family != CHIP_STONEY &&
+ always_fix = sscreen->info.gfx_level <= GFX8 && sscreen->info.family != CHIP_STONEY &&
channel->type == UTIL_FORMAT_TYPE_SIGNED;
} else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT) {
fix_fetch.u.log_size = 3; /* special encoding */
@@ -5040,7 +5040,7 @@ static void *si_create_vertex_elements(struct pipe_context *ctx, unsigned count,
*/
bool check_alignment =
log_hw_load_size >= 1 &&
- (sscreen->info.chip_class == GFX6 || sscreen->info.chip_class >= GFX10);
+ (sscreen->info.gfx_level == GFX6 || sscreen->info.gfx_level >= GFX10);
bool opencode = sscreen->options.vs_fetch_always_opencode;
if (check_alignment && (elements[i].src_offset & ((1 << log_hw_load_size) - 1)) != 0)
@@ -5067,11 +5067,11 @@ static void *si_create_vertex_elements(struct pipe_context *ctx, unsigned count,
S_008F0C_DST_SEL_Z(si_map_swizzle(desc->swizzle[2])) |
S_008F0C_DST_SEL_W(si_map_swizzle(desc->swizzle[3]));
- if (sscreen->info.chip_class >= GFX10) {
+ if (sscreen->info.gfx_level >= GFX10) {
const struct gfx10_format *fmt = &ac_get_gfx10_format_table(&sscreen->info)[elements[i].src_format];
assert(fmt->img_format != 0 && fmt->img_format < 128);
v->rsrc_word3[i] |= S_008F0C_FORMAT(fmt->img_format) |
- S_008F0C_RESOURCE_LEVEL(sscreen->info.chip_class < GFX11);
+ S_008F0C_RESOURCE_LEVEL(sscreen->info.gfx_level < GFX11);
} else {
unsigned data_format, num_format;
data_format = si_translate_buffer_dataformat(ctx->screen, desc, first_non_void);
@@ -5377,7 +5377,7 @@ static void si_memory_barrier(struct pipe_context *ctx, unsigned flags)
/* Indices are read through TC L2 since GFX8.
* L1 isn't used.
*/
- if (sctx->screen->info.chip_class <= GFX7)
+ if (sctx->screen->info.gfx_level <= GFX7)
sctx->flags |= SI_CONTEXT_WB_L2;
}
@@ -5387,12 +5387,12 @@ static void si_memory_barrier(struct pipe_context *ctx, unsigned flags)
if (flags & PIPE_BARRIER_FRAMEBUFFER && sctx->framebuffer.uncompressed_cb_mask) {
sctx->flags |= SI_CONTEXT_FLUSH_AND_INV_CB;
- if (sctx->chip_class <= GFX8)
+ if (sctx->gfx_level <= GFX8)
sctx->flags |= SI_CONTEXT_WB_L2;
}
/* Indirect buffers use TC L2 on GFX9, but not older hw. */
- if (sctx->screen->info.chip_class <= GFX8 && flags & PIPE_BARRIER_INDIRECT_BUFFER)
+ if (sctx->screen->info.gfx_level <= GFX8 && flags & PIPE_BARRIER_INDIRECT_BUFFER)
sctx->flags |= SI_CONTEXT_WB_L2;
}
@@ -5444,7 +5444,7 @@ void si_init_state_functions(struct si_context *sctx)
sctx->custom_dsa_flush = si_create_db_flush_dsa(sctx);
- if (sctx->chip_class < GFX11) {
+ if (sctx->gfx_level < GFX11) {
sctx->custom_blend_resolve = si_create_blend_custom(sctx, V_028808_CB_RESOLVE);
sctx->custom_blend_fmask_decompress = si_create_blend_custom(sctx, V_028808_CB_FMASK_DECOMPRESS);
sctx->custom_blend_eliminate_fastclear =
@@ -5452,7 +5452,7 @@ void si_init_state_functions(struct si_context *sctx)
}
sctx->custom_blend_dcc_decompress =
- si_create_blend_custom(sctx, sctx->chip_class >= GFX11 ?
+ si_create_blend_custom(sctx, sctx->gfx_level >= GFX11 ?
V_028808_CB_DCC_DECOMPRESS_GFX11 :
V_028808_CB_DCC_DECOMPRESS_GFX8);
@@ -5482,7 +5482,7 @@ void si_init_screen_state_functions(struct si_screen *sscreen)
sscreen->b.create_vertex_state = si_pipe_create_vertex_state;
sscreen->b.vertex_state_destroy = si_pipe_vertex_state_destroy;
- if (sscreen->info.chip_class >= GFX10) {
+ if (sscreen->info.gfx_level >= GFX10) {
sscreen->make_texture_descriptor = gfx10_make_texture_descriptor;
} else {
sscreen->make_texture_descriptor = si_make_texture_descriptor;
@@ -5494,7 +5494,7 @@ void si_init_screen_state_functions(struct si_screen *sscreen)
static void si_set_grbm_gfx_index(struct si_context *sctx, struct si_pm4_state *pm4, unsigned value)
{
- unsigned reg = sctx->chip_class >= GFX7 ? R_030800_GRBM_GFX_INDEX : R_00802C_GRBM_GFX_INDEX;
+ unsigned reg = sctx->gfx_level >= GFX7 ? R_030800_GRBM_GFX_INDEX : R_00802C_GRBM_GFX_INDEX;
si_pm4_set_reg(pm4, reg, value);
}
@@ -5522,7 +5522,7 @@ static void si_write_harvested_raster_configs(struct si_context *sctx, struct si
}
si_set_grbm_gfx_index(sctx, pm4, ~0);
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
si_pm4_set_reg(pm4, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1);
}
}
@@ -5540,7 +5540,7 @@ static void si_set_raster_config(struct si_context *sctx, struct si_pm4_state *p
* (or when we failed to determine the enabled backends).
*/
si_pm4_set_reg(pm4, R_028350_PA_SC_RASTER_CONFIG, raster_config);
- if (sctx->chip_class >= GFX7)
+ if (sctx->gfx_level >= GFX7)
si_pm4_set_reg(pm4, R_028354_PA_SC_RASTER_CONFIG_1, raster_config_1);
} else {
si_write_harvested_raster_configs(sctx, pm4, raster_config, raster_config_1);
@@ -5606,22 +5606,22 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
si_pm4_set_reg(pm4, R_028A8C_VGT_PRIMITIVEID_RESET, 0x0);
si_pm4_set_reg(pm4, R_028B98_VGT_STRMOUT_BUFFER_CONFIG, 0x0);
- if (sctx->chip_class < GFX11) {
+ if (sctx->gfx_level < GFX11) {
si_pm4_set_reg(pm4, R_028A5C_VGT_GS_PER_VS, 0x2);
si_pm4_set_reg(pm4, R_028AB8_VGT_VTX_CNT_EN, 0x0);
}
}
si_pm4_set_reg(pm4, R_028080_TA_BC_BASE_ADDR, border_color_va >> 8);
- if (sctx->chip_class >= GFX7)
+ if (sctx->gfx_level >= GFX7)
si_pm4_set_reg(pm4, R_028084_TA_BC_BASE_ADDR_HI, S_028084_ADDRESS(border_color_va >> 40));
- if (sctx->chip_class == GFX6) {
+ if (sctx->gfx_level == GFX6) {
si_pm4_set_reg(pm4, R_008A14_PA_CL_ENHANCE,
S_008A14_NUM_CLIP_SEQ(3) | S_008A14_CLIP_VTX_REORDER_ENA(1));
}
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
si_pm4_set_reg(pm4, R_030A00_PA_SU_LINE_STIPPLE_VALUE, 0);
si_pm4_set_reg(pm4, R_030A04_PA_SC_LINE_STIPPLE_STATE, 0);
} else {
@@ -5629,8 +5629,8 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
si_pm4_set_reg(pm4, R_008B10_PA_SC_LINE_STIPPLE_STATE, 0);
}
- if (sctx->chip_class <= GFX7 || !has_clear_state) {
- if (sctx->chip_class < GFX11) {
+ if (sctx->gfx_level <= GFX7 || !has_clear_state) {
+ if (sctx->gfx_level < GFX11) {
si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
si_pm4_set_reg(pm4, R_028C5C_VGT_OUT_DEALLOC_CNTL, 16);
}
@@ -5645,7 +5645,7 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
S_028034_BR_X(16384) | S_028034_BR_Y(16384));
}
- if (sctx->chip_class >= GFX10 && sctx->chip_class < GFX11) {
+ if (sctx->gfx_level >= GFX10 && sctx->gfx_level < GFX11) {
si_pm4_set_reg(pm4, R_028038_DB_DFSM_CONTROL,
S_028038_PUNCHOUT_MODE(V_028038_FORCE_OFF) |
S_028038_POPS_DRAIN_PS_ON_OVERLAP(1));
@@ -5653,19 +5653,19 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
unsigned cu_mask_ps = 0xffffffff;
- if (sctx->chip_class >= GFX10_3)
+ if (sctx->gfx_level >= GFX10_3)
cu_mask_ps = gfx103_get_cu_mask_ps(sscreen);
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
ac_set_reg_cu_en(pm4, R_00B01C_SPI_SHADER_PGM_RSRC3_PS,
S_00B01C_CU_EN(cu_mask_ps) |
S_00B01C_WAVE_LIMIT(0x3F) |
- S_00B01C_LDS_GROUP_SIZE(sctx->chip_class >= GFX11),
+ S_00B01C_LDS_GROUP_SIZE(sctx->gfx_level >= GFX11),
C_00B01C_CU_EN, 0, &sscreen->info,
- (void*)(sctx->chip_class >= GFX10 ? si_pm4_set_reg_idx3 : si_pm4_set_reg));
+ (void*)(sctx->gfx_level >= GFX10 ? si_pm4_set_reg_idx3 : si_pm4_set_reg));
}
- if (sctx->chip_class <= GFX8) {
+ if (sctx->gfx_level <= GFX8) {
si_set_raster_config(sctx, pm4);
/* FIXME calculate these values somehow ??? */
@@ -5681,12 +5681,12 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
si_pm4_set_reg(pm4, R_028408_VGT_INDX_OFFSET, 0);
}
- if (sscreen->info.chip_class >= GFX10) {
+ if (sscreen->info.gfx_level >= GFX10) {
si_pm4_set_reg(pm4, R_00B524_SPI_SHADER_PGM_HI_LS,
S_00B524_MEM_BASE(sscreen->info.address32_hi >> 8));
si_pm4_set_reg(pm4, R_00B324_SPI_SHADER_PGM_HI_ES,
S_00B324_MEM_BASE(sscreen->info.address32_hi >> 8));
- } else if (sscreen->info.chip_class == GFX9) {
+ } else if (sscreen->info.gfx_level == GFX9) {
si_pm4_set_reg(pm4, R_00B414_SPI_SHADER_PGM_HI_LS,
S_00B414_MEM_BASE(sscreen->info.address32_hi >> 8));
si_pm4_set_reg(pm4, R_00B214_SPI_SHADER_PGM_HI_ES,
@@ -5696,7 +5696,7 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
S_00B524_MEM_BASE(sscreen->info.address32_hi >> 8));
}
- if (sctx->chip_class >= GFX7 && sctx->chip_class <= GFX8) {
+ if (sctx->gfx_level >= GFX7 && sctx->gfx_level <= GFX8) {
ac_set_reg_cu_en(pm4, R_00B51C_SPI_SHADER_PGM_RSRC3_LS,
S_00B51C_CU_EN(0xffff) | S_00B51C_WAVE_LIMIT(0x3F),
C_00B51C_CU_EN, 0, &sscreen->info, (void*)si_pm4_set_reg);
@@ -5713,7 +5713,7 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
S_028A44_ES_VERTS_PER_SUBGRP(64) | S_028A44_GS_PRIMS_PER_SUBGRP(4));
}
- if (sctx->chip_class == GFX8) {
+ if (sctx->gfx_level == GFX8) {
unsigned vgt_tess_distribution;
vgt_tess_distribution = S_028B50_ACCUM_ISOLINE(32) | S_028B50_ACCUM_TRI(11) |
@@ -5728,11 +5728,11 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
si_pm4_set_reg(pm4, R_028B50_VGT_TESS_DISTRIBUTION, vgt_tess_distribution);
}
- if (sscreen->info.chip_class <= GFX9) {
+ if (sscreen->info.gfx_level <= GFX9) {
si_pm4_set_reg(pm4, R_028AA0_VGT_INSTANCE_STEP_RATE_0, 1);
}
- if (sctx->chip_class == GFX9) {
+ if (sctx->gfx_level == GFX9) {
si_pm4_set_reg(pm4, R_030920_VGT_MAX_VTX_INDX, ~0);
si_pm4_set_reg(pm4, R_030924_VGT_MIN_VTX_INDX, 0);
si_pm4_set_reg(pm4, R_030928_VGT_INDX_OFFSET, 0);
@@ -5742,11 +5742,11 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
S_028060_POPS_DRAIN_PS_ON_OVERLAP(1));
}
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
ac_set_reg_cu_en(pm4, R_00B41C_SPI_SHADER_PGM_RSRC3_HS,
S_00B41C_CU_EN(0xffff) | S_00B41C_WAVE_LIMIT(0x3F), C_00B41C_CU_EN,
0, &sscreen->info,
- (void*)(sctx->chip_class >= GFX10 ? si_pm4_set_reg_idx3 : si_pm4_set_reg));
+ (void*)(sctx->gfx_level >= GFX10 ? si_pm4_set_reg_idx3 : si_pm4_set_reg));
si_pm4_set_reg(pm4, R_028B50_VGT_TESS_DISTRIBUTION,
S_028B50_ACCUM_ISOLINE(12) | S_028B50_ACCUM_TRI(30) | S_028B50_ACCUM_QUAD(24) |
@@ -5759,13 +5759,13 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
si_pm4_set_reg(pm4, R_030968_VGT_INSTANCE_BASE_ID, 0);
- if (sctx->chip_class < GFX11) {
+ if (sctx->gfx_level < GFX11) {
si_pm4_set_reg(pm4, R_0301EC_CP_COHER_START_DELAY,
- sctx->chip_class >= GFX10 ? 0x20 : 0);
+ sctx->gfx_level >= GFX10 ? 0x20 : 0);
}
}
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
si_pm4_set_reg(pm4, R_00B0C8_SPI_SHADER_USER_ACCUM_PS_0, 0);
si_pm4_set_reg(pm4, R_00B0CC_SPI_SHADER_USER_ACCUM_PS_1, 0);
si_pm4_set_reg(pm4, R_00B0D0_SPI_SHADER_USER_ACCUM_PS_2, 0);
@@ -5785,7 +5785,7 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
/* Enable CMASK/HTILE/DCC caching in L2 for small chips. */
unsigned meta_write_policy, meta_read_policy;
- unsigned no_alloc = sctx->chip_class >= GFX11 ? V_02807C_CACHE_NOA_GFX11:
+ unsigned no_alloc = sctx->gfx_level >= GFX11 ? V_02807C_CACHE_NOA_GFX11:
V_02807C_CACHE_NOA_GFX10;
if (sscreen->info.max_render_backends <= 4) {
meta_write_policy = V_02807C_CACHE_LRU_WR; /* cache writes */
@@ -5805,7 +5805,7 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
S_02807C_HTILE_RD_POLICY(meta_read_policy));
unsigned gl2_cc;
- if (sctx->chip_class >= GFX11)
+ if (sctx->gfx_level >= GFX11)
gl2_cc = S_028410_DCC_WR_POLICY_GFX11(meta_write_policy) |
S_028410_COLOR_WR_POLICY_GFX11(V_028410_CACHE_STREAM) |
S_028410_COLOR_RD_POLICY(V_028410_CACHE_NOA_GFX11);
@@ -5835,9 +5835,9 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
* a single primitive shader subgroup.
*/
si_pm4_set_reg(pm4, R_028C50_PA_SC_NGG_MODE_CNTL,
- S_028C50_MAX_DEALLOCS_IN_WAVE(sctx->chip_class >= GFX11 ? 16 : 512));
+ S_028C50_MAX_DEALLOCS_IN_WAVE(sctx->gfx_level >= GFX11 ? 16 : 512));
- if (sctx->chip_class < GFX11) {
+ if (sctx->gfx_level < GFX11) {
/* Reuse for legacy (non-NGG) only. */
si_pm4_set_reg(pm4, R_028C58_VGT_VERTEX_REUSE_BLOCK_CNTL, 14);
}
@@ -5855,7 +5855,7 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
si_pm4_set_reg(pm4, R_030988_GE_USER_VGPR_EN, 0);
}
- if (sctx->chip_class >= GFX10 && sctx->chip_class <= GFX10_3) {
+ if (sctx->gfx_level >= GFX10 && sctx->gfx_level <= GFX10_3) {
/* Logical CUs 16 - 31 */
ac_set_reg_cu_en(pm4, R_00B004_SPI_SHADER_PGM_RSRC4_PS, S_00B004_CU_EN(cu_mask_ps >> 16),
C_00B004_CU_EN, 16, &sscreen->info, (void*)si_pm4_set_reg_idx3);
@@ -5871,7 +5871,7 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
si_pm4_set_reg(pm4, R_00B1D4_SPI_SHADER_USER_ACCUM_VS_3, 0);
}
- if (sctx->chip_class >= GFX10_3) {
+ if (sctx->gfx_level >= GFX10_3) {
si_pm4_set_reg(pm4, R_028750_SX_PS_DOWNCONVERT_CONTROL, 0xff);
/* The rate combiners have no effect if they are disabled like this:
* VERTEX_RATE: BYPASS_VTX_RATE_COMBINER = 1
@@ -5887,7 +5887,7 @@ void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing)
S_028848_SAMPLE_ITER_COMBINER_MODE(V_028848_VRS_COMB_MODE_OVERRIDE));
}
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
si_pm4_set_reg(pm4, R_028C54_PA_SC_BINNER_CNTL_2, 0);
si_pm4_set_reg(pm4, R_028620_PA_RATE_CNTL,
S_028620_VERTEX_RATE(2) | S_028620_PRIM_RATE(1));
diff --git a/src/gallium/drivers/radeonsi/si_state.h b/src/gallium/drivers/radeonsi/si_state.h
index b6e83a186c1..4cbcde6ac34 100644
--- a/src/gallium/drivers/radeonsi/si_state.h
+++ b/src/gallium/drivers/radeonsi/si_state.h
@@ -529,7 +529,7 @@ struct pb_slab *si_bindless_descriptor_slab_alloc(void *priv, unsigned heap, uns
void si_bindless_descriptor_slab_free(void *priv, struct pb_slab *pslab);
void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf);
/* si_state.c */
-uint32_t si_translate_colorformat(enum chip_class chip_class, enum pipe_format format);
+uint32_t si_translate_colorformat(enum amd_gfx_level gfx_level, enum pipe_format format);
void si_init_state_compute_functions(struct si_context *sctx);
void si_init_state_functions(struct si_context *sctx);
void si_init_screen_state_functions(struct si_screen *sscreen);
diff --git a/src/gallium/drivers/radeonsi/si_state_binning.c b/src/gallium/drivers/radeonsi/si_state_binning.c
index 49c5758ac07..48c2343f8ba 100644
--- a/src/gallium/drivers/radeonsi/si_state_binning.c
+++ b/src/gallium/drivers/radeonsi/si_state_binning.c
@@ -406,7 +406,7 @@ static void si_emit_dpbb_disable(struct si_context *sctx)
{
radeon_begin(&sctx->gfx_cs);
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
struct uvec2 bin_size = {};
struct uvec2 bin_size_extend = {};
@@ -446,7 +446,7 @@ void si_emit_dpbb_state(struct si_context *sctx)
struct si_state_dsa *dsa = sctx->queued.named.dsa;
unsigned db_shader_control = sctx->ps_db_shader_control;
- assert(sctx->chip_class >= GFX9);
+ assert(sctx->gfx_level >= GFX9);
if (!sscreen->dpbb_allowed || sctx->dpbb_force_off ||
sctx->dpbb_force_off_profile_vs || sctx->dpbb_force_off_profile_ps) {
@@ -475,7 +475,7 @@ void si_emit_dpbb_state(struct si_context *sctx)
sctx->framebuffer.colorbuf_enabled_4bit & blend->cb_target_enabled_4bit;
struct uvec2 color_bin_size, depth_bin_size;
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
gfx10_get_bin_sizes(sctx, cb_target_enabled_4bit, &color_bin_size, &depth_bin_size);
} else {
color_bin_size = si_get_color_bin_size(sctx, cb_target_enabled_4bit);
diff --git a/src/gallium/drivers/radeonsi/si_state_draw.cpp b/src/gallium/drivers/radeonsi/si_state_draw.cpp
index 8bf1bea07a7..9103cacc1f4 100644
--- a/src/gallium/drivers/radeonsi/si_state_draw.cpp
+++ b/src/gallium/drivers/radeonsi/si_state_draw.cpp
@@ -45,7 +45,7 @@
#elif (GFX_VER == 11)
#define GFX(name) name##GFX11
#else
-#error "Unknown gfx version"
+#error "Unknown gfx level"
#endif
/* special primitive types */
@@ -107,7 +107,7 @@ static void si_emit_spi_map(struct si_context *sctx)
radeon_end_update_context_roll(sctx);
}
-template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG>
+template <amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG>
static bool si_update_shaders(struct si_context *sctx)
{
struct pipe_context *ctx = (struct pipe_context *)sctx;
@@ -400,7 +400,7 @@ static unsigned si_conv_pipe_prim(unsigned mode)
return prim_conv[mode];
}
-template<chip_class GFX_VERSION>
+template<amd_gfx_level GFX_VERSION>
static void si_cp_dma_prefetch_inline(struct si_context *sctx, struct pipe_resource *buf,
unsigned offset, unsigned size)
{
@@ -449,7 +449,7 @@ static void si_cp_dma_prefetch_inline(struct si_context *sctx, struct pipe_resou
void si_cp_dma_prefetch(struct si_context *sctx, struct pipe_resource *buf,
unsigned offset, unsigned size)
{
- switch (sctx->chip_class) {
+ switch (sctx->gfx_level) {
case GFX7:
si_cp_dma_prefetch_inline<GFX7>(sctx, buf, offset, size);
break;
@@ -475,7 +475,7 @@ void si_cp_dma_prefetch(struct si_context *sctx, struct pipe_resource *buf,
#endif
-template<chip_class GFX_VERSION>
+template<amd_gfx_level GFX_VERSION>
static void si_prefetch_shader_async(struct si_context *sctx, struct si_shader *shader)
{
struct pipe_resource *bo = &shader->bo->b.b;
@@ -492,7 +492,7 @@ enum si_L2_prefetch_mode {
/**
* Prefetch shaders.
*/
-template<chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
+template<amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
si_L2_prefetch_mode mode>
static void si_prefetch_shaders(struct si_context *sctx)
{
@@ -623,12 +623,12 @@ static void si_emit_derived_tess_state(struct si_context *sctx, unsigned *num_pa
struct si_shader_selector *tcs =
sctx->shader.tcs.cso ? sctx->shader.tcs.cso : sctx->shader.tes.cso;
unsigned tess_uses_primid = sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id;
- bool has_primid_instancing_bug = sctx->chip_class == GFX6 && sctx->screen->info.max_se == 1;
+ bool has_primid_instancing_bug = sctx->gfx_level == GFX6 && sctx->screen->info.max_se == 1;
unsigned tes_sh_base = sctx->shader_pointers.sh_base[PIPE_SHADER_TESS_EVAL];
uint8_t num_tcs_input_cp = sctx->patch_vertices;
/* Since GFX9 has merged LS-HS in the TCS state, set LS = TCS. */
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
if (sctx->shader.tcs.cso)
ls_current = sctx->shader.tcs.current;
else
@@ -744,7 +744,7 @@ static void si_emit_derived_tess_state(struct si_context *sctx, unsigned *num_pa
(wave_size - temp_verts_per_tg % wave_size >= MAX2(max_verts_per_patch, 8)))
*num_patches = (temp_verts_per_tg & ~(wave_size - 1)) / max_verts_per_patch;
- if (sctx->chip_class == GFX6) {
+ if (sctx->gfx_level == GFX6) {
/* GFX6 bug workaround, related to power management. Limit LS-HS
* threadgroups to only one wave.
*/
@@ -797,7 +797,7 @@ static void si_emit_derived_tess_state(struct si_context *sctx, unsigned *num_pa
/* Compute the LDS size. */
unsigned lds_size = lds_per_patch * *num_patches;
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
assert(lds_size <= 65536);
lds_size = align(lds_size, 512) / 512;
} else {
@@ -817,10 +817,10 @@ static void si_emit_derived_tess_state(struct si_context *sctx, unsigned *num_pa
struct radeon_cmdbuf *cs = &sctx->gfx_cs;
radeon_begin(cs);
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
unsigned hs_rsrc2 = ls_current->config.rsrc2;
- if (sctx->chip_class >= GFX10)
+ if (sctx->gfx_level >= GFX10)
hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX10(lds_size);
else
hs_rsrc2 |= S_00B42C_LDS_SIZE_GFX9(lds_size);
@@ -841,7 +841,7 @@ static void si_emit_derived_tess_state(struct si_context *sctx, unsigned *num_pa
/* Due to a hw bug, RSRC2_LS must be written twice with another
* LS register written in between. */
- if (sctx->chip_class == GFX7 && sctx->family != CHIP_HAWAII)
+ if (sctx->gfx_level == GFX7 && sctx->family != CHIP_HAWAII)
radeon_set_sh_reg(R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
radeon_set_sh_reg_seq(R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
radeon_emit(ls_current->config.rsrc1);
@@ -869,7 +869,7 @@ static void si_emit_derived_tess_state(struct si_context *sctx, unsigned *num_pa
if (sctx->last_ls_hs_config != ls_hs_config) {
radeon_begin(cs);
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
radeon_set_context_reg_idx(R_028B58_VGT_LS_HS_CONFIG, 2, ls_hs_config);
} else {
radeon_set_context_reg(R_028B58_VGT_LS_HS_CONFIG, ls_hs_config);
@@ -921,7 +921,7 @@ static unsigned si_get_init_multi_vgt_param(struct si_screen *sscreen, union si_
/* Needed for 028B6C_DISTRIBUTION_MODE != 0. (implies >= GFX8) */
if (sscreen->info.has_distributed_tess) {
if (key->u.uses_gs) {
- if (sscreen->info.chip_class == GFX8)
+ if (sscreen->info.gfx_level == GFX8)
partial_es_wave = true;
} else {
partial_vs_wave = true;
@@ -935,7 +935,7 @@ static unsigned si_get_init_multi_vgt_param(struct si_screen *sscreen, union si_
wd_switch_on_eop = true;
}
- if (sscreen->info.chip_class >= GFX7) {
+ if (sscreen->info.gfx_level >= GFX7) {
/* WD_SWITCH_ON_EOP has no effect on GPUs with less than
* 4 shader engines. Set 1 to pass the assertion below.
* The other cases are hardware requirements.
@@ -964,7 +964,7 @@ static unsigned si_get_init_multi_vgt_param(struct si_screen *sscreen, union si_
* Assume indirect draws always use small instances.
* This is needed for good VS wave utilization.
*/
- if (sscreen->info.chip_class <= GFX8 && sscreen->info.max_se == 4 &&
+ if (sscreen->info.gfx_level <= GFX8 && sscreen->info.max_se == 4 &&
key->u.multi_instances_smaller_than_primgroup)
wd_switch_on_eop = true;
@@ -984,7 +984,7 @@ static unsigned si_get_init_multi_vgt_param(struct si_screen *sscreen, union si_
/* Required by Hawaii and, for some special cases, by GFX8. */
if (ia_switch_on_eoi &&
(sscreen->info.family == CHIP_HAWAII ||
- (sscreen->info.chip_class == GFX8 && (key->u.uses_gs || max_primgroup_in_wave != 2))))
+ (sscreen->info.gfx_level == GFX8 && (key->u.uses_gs || max_primgroup_in_wave != 2))))
partial_vs_wave = true;
/* Instancing bug on Bonaire. */
@@ -1002,18 +1002,18 @@ static unsigned si_get_init_multi_vgt_param(struct si_screen *sscreen, union si_
}
/* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
- if (sscreen->info.chip_class <= GFX8 && ia_switch_on_eoi)
+ if (sscreen->info.gfx_level <= GFX8 && ia_switch_on_eoi)
partial_es_wave = true;
return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) | S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
- S_028AA8_WD_SWITCH_ON_EOP(sscreen->info.chip_class >= GFX7 ? wd_switch_on_eop : 0) |
+ S_028AA8_WD_SWITCH_ON_EOP(sscreen->info.gfx_level >= GFX7 ? wd_switch_on_eop : 0) |
/* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
- S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen->info.chip_class == GFX8 ? max_primgroup_in_wave
+ S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen->info.gfx_level == GFX8 ? max_primgroup_in_wave
: 0) |
- S_030960_EN_INST_OPT_BASIC(sscreen->info.chip_class >= GFX9) |
- S_030960_EN_INST_OPT_ADV(sscreen->info.chip_class >= GFX9);
+ S_030960_EN_INST_OPT_BASIC(sscreen->info.gfx_level >= GFX9) |
+ S_030960_EN_INST_OPT_ADV(sscreen->info.gfx_level >= GFX9);
}
static void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
@@ -1078,7 +1078,7 @@ static bool num_instanced_prims_less_than(const struct pipe_draw_indirect_info *
}
}
-template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS,
+template <amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS,
si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
const struct pipe_draw_indirect_info *indirect,
@@ -1160,7 +1160,7 @@ static unsigned si_conv_prim_to_gs_out(unsigned mode)
}
/* rast_prim is the primitive type after GS. */
-template<chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG> ALWAYS_INLINE
+template<amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG> ALWAYS_INLINE
static void si_emit_rasterizer_prim_state(struct si_context *sctx)
{
struct radeon_cmdbuf *cs = &sctx->gfx_cs;
@@ -1214,7 +1214,7 @@ static void si_emit_rasterizer_prim_state(struct si_context *sctx)
}
}
-template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
+template <amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
static void si_emit_vs_state(struct si_context *sctx, unsigned index_size)
{
@@ -1283,7 +1283,7 @@ static bool si_prim_restart_index_changed(struct si_context *sctx, bool primitiv
sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN);
}
-template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS,
+template <amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS,
si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
static void si_emit_ia_multi_vgt_param(struct si_context *sctx,
const struct pipe_draw_indirect_info *indirect,
@@ -1322,7 +1322,7 @@ static void si_emit_ia_multi_vgt_param(struct si_context *sctx,
/* GFX10 removed IA_MULTI_VGT_PARAM in exchange for GE_CNTL.
* We overload last_multi_vgt_param.
*/
-template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG> ALWAYS_INLINE
+template <amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG> ALWAYS_INLINE
static void gfx10_emit_ge_cntl(struct si_context *sctx, unsigned num_patches)
{
union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
@@ -1376,7 +1376,7 @@ static void gfx10_emit_ge_cntl(struct si_context *sctx, unsigned num_patches)
}
}
-template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
+template <amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
static void si_emit_draw_registers(struct si_context *sctx,
const struct pipe_draw_indirect_info *indirect,
@@ -1440,7 +1440,7 @@ static void si_emit_draw_registers(struct si_context *sctx,
} \
} while (0)
-template <chip_class GFX_VERSION, si_has_ngg NGG, si_is_draw_vertex_state IS_DRAW_VERTEX_STATE>
+template <amd_gfx_level GFX_VERSION, si_has_ngg NGG, si_is_draw_vertex_state IS_DRAW_VERTEX_STATE>
ALWAYS_INLINE
static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw_info *info,
unsigned drawid_base,
@@ -1812,7 +1812,7 @@ static void si_emit_draw_packets(struct si_context *sctx, const struct pipe_draw
}
/* Return false if not bound. */
-template<chip_class GFX_VERSION>
+template<amd_gfx_level GFX_VERSION>
static bool ALWAYS_INLINE si_set_vb_descriptor(struct si_vertex_elements *velems,
struct pipe_vertex_buffer *vb,
unsigned index, /* vertex element index */
@@ -1864,7 +1864,7 @@ void si_set_vertex_buffer_descriptor(struct si_screen *sscreen, struct si_vertex
struct pipe_vertex_buffer *vb, unsigned element_index,
uint32_t *out)
{
- switch (sscreen->info.chip_class) {
+ switch (sscreen->info.gfx_level) {
case GFX6:
si_set_vb_descriptor<GFX6>(velems, vb, element_index, out);
break;
@@ -1887,7 +1887,7 @@ void si_set_vertex_buffer_descriptor(struct si_screen *sscreen, struct si_vertex
si_set_vb_descriptor<GFX11>(velems, vb, element_index, out);
break;
default:
- unreachable("unhandled chip class");
+ unreachable("unhandled gfx level");
}
}
@@ -1903,7 +1903,7 @@ static ALWAYS_INLINE unsigned get_next_vertex_state_elem(struct pipe_vertex_stat
return util_bitcount_fast<POPCNT>(state->input.full_velem_mask & BITFIELD_MASK(semantic_index));
}
-template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
+template <amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
si_is_draw_vertex_state IS_DRAW_VERTEX_STATE, util_popcnt POPCNT> ALWAYS_INLINE
static bool si_upload_and_prefetch_VB_descriptors(struct si_context *sctx,
struct pipe_vertex_state *state,
@@ -2125,7 +2125,7 @@ static void si_get_draw_start_count(struct si_context *sctx, const struct pipe_d
}
}
-template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
+template <amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
si_is_draw_vertex_state IS_DRAW_VERTEX_STATE> ALWAYS_INLINE
static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_info *info,
const struct pipe_draw_indirect_info *indirect,
@@ -2178,7 +2178,7 @@ static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_i
pipe_resource_reference(&indexbuf, NULL); \
} while (0)
-template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
+template <amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
si_is_draw_vertex_state IS_DRAW_VERTEX_STATE, util_popcnt POPCNT> ALWAYS_INLINE
static void si_draw(struct pipe_context *ctx,
const struct pipe_draw_info *info,
@@ -2638,7 +2638,7 @@ static void si_draw(struct pipe_context *ctx,
DRAW_CLEANUP;
}
-template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG>
+template <amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG>
static void si_draw_vbo(struct pipe_context *ctx,
const struct pipe_draw_info *info,
unsigned drawid_offset,
@@ -2650,7 +2650,7 @@ static void si_draw_vbo(struct pipe_context *ctx,
(ctx, info, drawid_offset, indirect, draws, num_draws, NULL, 0);
}
-template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
+template <amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG,
util_popcnt POPCNT>
static void si_draw_vertex_state(struct pipe_context *ctx,
struct pipe_vertex_state *vstate,
@@ -2717,7 +2717,7 @@ static void si_draw_rectangle(struct blitter_context *blitter, void *vertex_elem
pipe->draw_vbo(pipe, &info, 0, NULL, &draw, 1);
}
-template <chip_class GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG>
+template <amd_gfx_level GFX_VERSION, si_has_tess HAS_TESS, si_has_gs HAS_GS, si_has_ngg NGG>
static void si_init_draw_vbo(struct si_context *sctx)
{
if (NGG && GFX_VERSION < GFX10)
@@ -2738,7 +2738,7 @@ static void si_init_draw_vbo(struct si_context *sctx)
}
}
-template <chip_class GFX_VERSION>
+template <amd_gfx_level GFX_VERSION>
static void si_init_draw_vbo_all_pipeline_options(struct si_context *sctx)
{
si_init_draw_vbo<GFX_VERSION, TESS_OFF, GS_OFF, NGG_OFF>(sctx);
@@ -2774,7 +2774,7 @@ static void si_invalid_draw_vertex_state(struct pipe_context *ctx,
extern "C"
void GFX(si_init_draw_functions_)(struct si_context *sctx)
{
- assert(sctx->chip_class == GFX());
+ assert(sctx->gfx_level == GFX());
si_init_draw_vbo_all_pipeline_options<GFX()>(sctx);
diff --git a/src/gallium/drivers/radeonsi/si_state_shaders.cpp b/src/gallium/drivers/radeonsi/si_state_shaders.cpp
index 4ad3d8ca0e4..cf320504e69 100644
--- a/src/gallium/drivers/radeonsi/si_state_shaders.cpp
+++ b/src/gallium/drivers/radeonsi/si_state_shaders.cpp
@@ -43,7 +43,7 @@ unsigned si_determine_wave_size(struct si_screen *sscreen, struct si_shader *sha
struct si_shader_info *info = shader ? &shader->selector->info : NULL;
gl_shader_stage stage = shader ? shader->selector->stage : MESA_SHADER_COMPUTE;
- if (sscreen->info.chip_class < GFX10)
+ if (sscreen->info.gfx_level < GFX10)
return 64;
/* Legacy GS only supports Wave64. */
@@ -118,7 +118,7 @@ unsigned si_determine_wave_size(struct si_screen *sscreen, struct si_shader *sha
* know why this helps.
*/
if (stage <= MESA_SHADER_GEOMETRY &&
- !(sscreen->info.chip_class == GFX10 && shader && shader->key.ge.opt.ngg_culling))
+ !(sscreen->info.gfx_level == GFX10 && shader && shader->key.ge.opt.ngg_culling))
return 32;
/* TODO: Merged shaders must use the same wave size because the driver doesn't recompile
@@ -501,7 +501,7 @@ void si_destroy_shader_cache(struct si_screen *sscreen)
bool si_shader_mem_ordered(struct si_shader *shader)
{
- if (shader->selector->screen->info.chip_class < GFX10)
+ if (shader->selector->screen->info.gfx_level < GFX10)
return false;
/* Return true if both types of VMEM that return something are used. */
@@ -588,7 +588,7 @@ static void si_set_tesseval_regs(struct si_screen *sscreen, const struct si_shad
static void polaris_set_vgt_vertex_reuse(struct si_screen *sscreen, struct si_shader_selector *sel,
struct si_shader *shader)
{
- if (sscreen->info.family < CHIP_POLARIS10 || sscreen->info.chip_class >= GFX10)
+ if (sscreen->info.family < CHIP_POLARIS10 || sscreen->info.gfx_level >= GFX10)
return;
/* VS as VS, or VS as ES: */
@@ -646,7 +646,7 @@ static unsigned si_get_vs_vgpr_comp_cnt(struct si_screen *sscreen, struct si_sha
unsigned max = 0;
if (shader->info.uses_instanceid) {
- if (sscreen->info.chip_class >= GFX10)
+ if (sscreen->info.gfx_level >= GFX10)
max = MAX2(max, 3);
else if (is_ls)
max = MAX2(max, 2); /* use (InstanceID / StepRate0) because StepRate0 == 1 */
@@ -660,7 +660,7 @@ static unsigned si_get_vs_vgpr_comp_cnt(struct si_screen *sscreen, struct si_sha
/* GFX11: We prefer to compute RelAutoIndex using (WaveID * WaveSize + ThreadID).
* Older chips didn't have WaveID in LS.
*/
- if (is_ls && sscreen->info.chip_class <= GFX10_3)
+ if (is_ls && sscreen->info.gfx_level <= GFX10_3)
max = MAX2(max, 1); /* RelAutoIndex */
return max;
@@ -669,7 +669,7 @@ static unsigned si_get_vs_vgpr_comp_cnt(struct si_screen *sscreen, struct si_sha
unsigned si_calc_inst_pref_size(struct si_shader *shader)
{
/* TODO: Disable for now. */
- if (shader->selector->screen->info.chip_class == GFX11)
+ if (shader->selector->screen->info.gfx_level == GFX11)
return 0;
/* inst_pref_size is calculated in cache line size granularity */
@@ -682,7 +682,7 @@ static void si_shader_ls(struct si_screen *sscreen, struct si_shader *shader)
struct si_pm4_state *pm4;
uint64_t va;
- assert(sscreen->info.chip_class <= GFX8);
+ assert(sscreen->info.gfx_level <= GFX8);
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
@@ -711,15 +711,15 @@ static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader)
va = shader->bo->gpu_address;
- if (sscreen->info.chip_class >= GFX9) {
- if (sscreen->info.chip_class >= GFX11) {
+ if (sscreen->info.gfx_level >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX11) {
ac_set_reg_cu_en(pm4, R_00B404_SPI_SHADER_PGM_RSRC4_HS,
S_00B404_INST_PREF_SIZE(si_calc_inst_pref_size(shader)) |
S_00B404_CU_EN(0xffff),
C_00B404_CU_EN, 16, &sscreen->info,
(void (*)(void*, unsigned, uint32_t))si_pm4_set_reg_idx3);
}
- if (sscreen->info.chip_class >= GFX10) {
+ if (sscreen->info.gfx_level >= GFX10) {
si_pm4_set_reg(pm4, R_00B520_SPI_SHADER_PGM_LO_LS, va >> 8);
} else {
si_pm4_set_reg(pm4, R_00B410_SPI_SHADER_PGM_LO_LS, va >> 8);
@@ -730,7 +730,7 @@ static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader)
shader->config.rsrc2 = S_00B42C_USER_SGPR(num_user_sgprs) |
S_00B42C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
- if (sscreen->info.chip_class >= GFX10)
+ if (sscreen->info.gfx_level >= GFX10)
shader->config.rsrc2 |= S_00B42C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5);
else
shader->config.rsrc2 |= S_00B42C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5);
@@ -746,16 +746,16 @@ static void si_shader_hs(struct si_screen *sscreen, struct si_shader *shader)
si_pm4_set_reg(
pm4, R_00B428_SPI_SHADER_PGM_RSRC1_HS,
S_00B428_VGPRS((shader->config.num_vgprs - 1) / (shader->wave_size == 32 ? 8 : 4)) |
- (sscreen->info.chip_class <= GFX9 ? S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8)
+ (sscreen->info.gfx_level <= GFX9 ? S_00B428_SGPRS((shader->config.num_sgprs - 1) / 8)
: 0) |
S_00B428_DX10_CLAMP(1) | S_00B428_MEM_ORDERED(si_shader_mem_ordered(shader)) |
- S_00B428_WGP_MODE(sscreen->info.chip_class >= GFX10) |
+ S_00B428_WGP_MODE(sscreen->info.gfx_level >= GFX10) |
S_00B428_FLOAT_MODE(shader->config.float_mode) |
- S_00B428_LS_VGPR_COMP_CNT(sscreen->info.chip_class >= GFX9
+ S_00B428_LS_VGPR_COMP_CNT(sscreen->info.gfx_level >= GFX9
? si_get_vs_vgpr_comp_cnt(sscreen, shader, false)
: 0));
- if (sscreen->info.chip_class <= GFX8) {
+ if (sscreen->info.gfx_level <= GFX8) {
si_pm4_set_reg(pm4, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, shader->config.rsrc2);
}
}
@@ -790,7 +790,7 @@ static void si_shader_es(struct si_screen *sscreen, struct si_shader *shader)
uint64_t va;
unsigned oc_lds_en;
- assert(sscreen->info.chip_class <= GFX8);
+ assert(sscreen->info.gfx_level <= GFX8);
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
@@ -956,7 +956,7 @@ static void si_emit_shader_gs(struct si_context *sctx)
radeon_opt_set_context_reg(sctx, R_028B90_VGT_GS_INSTANCE_CNT, SI_TRACKED_VGT_GS_INSTANCE_CNT,
shader->ctx_reg.gs.vgt_gs_instance_cnt);
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
/* R_028A44_VGT_GS_ONCHIP_CNTL */
radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL, SI_TRACKED_VGT_GS_ONCHIP_CNTL,
shader->ctx_reg.gs.vgt_gs_onchip_cntl);
@@ -981,30 +981,30 @@ static void si_emit_shader_gs(struct si_context *sctx)
/* These don't cause any context rolls. */
if (sctx->screen->info.spi_cu_en_has_effect) {
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
ac_set_reg_cu_en(&sctx->gfx_cs, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
shader->ctx_reg.gs.spi_shader_pgm_rsrc3_gs,
C_00B21C_CU_EN, 0, &sctx->screen->info,
(void (*)(void*, unsigned, uint32_t))
- (sctx->chip_class >= GFX10 ? radeon_set_sh_reg_idx3_func : radeon_set_sh_reg_func));
+ (sctx->gfx_level >= GFX10 ? radeon_set_sh_reg_idx3_func : radeon_set_sh_reg_func));
sctx->tracked_regs.reg_saved &= ~BITFIELD64_BIT(SI_TRACKED_SPI_SHADER_PGM_RSRC3_GS);
}
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
ac_set_reg_cu_en(&sctx->gfx_cs, R_00B204_SPI_SHADER_PGM_RSRC4_GS,
shader->ctx_reg.gs.spi_shader_pgm_rsrc4_gs,
C_00B204_CU_EN_GFX10, 16, &sctx->screen->info,
(void (*)(void*, unsigned, uint32_t))
- (sctx->chip_class >= GFX10 ? radeon_set_sh_reg_idx3_func : radeon_set_sh_reg_func));
+ (sctx->gfx_level >= GFX10 ? radeon_set_sh_reg_idx3_func : radeon_set_sh_reg_func));
sctx->tracked_regs.reg_saved &= ~BITFIELD64_BIT(SI_TRACKED_SPI_SHADER_PGM_RSRC4_GS);
}
} else {
radeon_begin_again(&sctx->gfx_cs);
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
radeon_opt_set_sh_reg_idx3(sctx, R_00B21C_SPI_SHADER_PGM_RSRC3_GS,
SI_TRACKED_SPI_SHADER_PGM_RSRC3_GS,
shader->ctx_reg.gs.spi_shader_pgm_rsrc3_gs);
}
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
radeon_opt_set_sh_reg_idx3(sctx, R_00B204_SPI_SHADER_PGM_RSRC4_GS,
SI_TRACKED_SPI_SHADER_PGM_RSRC4_GS,
shader->ctx_reg.gs.spi_shader_pgm_rsrc4_gs);
@@ -1023,7 +1023,7 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
unsigned max_stream = util_last_bit(sel->info.base.gs.active_stream_mask);
unsigned offset;
- assert(sscreen->info.chip_class < GFX11); /* gfx11 doesn't have the legacy pipeline */
+ assert(sscreen->info.gfx_level < GFX11); /* gfx11 doesn't have the legacy pipeline */
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
@@ -1064,7 +1064,7 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
va = shader->bo->gpu_address;
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
unsigned input_prim = sel->info.base.gs.input_primitive;
gl_shader_stage es_stage = shader->key.ge.part.gs.es->stage;
unsigned es_vgpr_comp_cnt, gs_vgpr_comp_cnt;
@@ -1094,7 +1094,7 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
else
num_user_sgprs = GFX9_GS_NUM_USER_SGPR;
- if (sscreen->info.chip_class >= GFX10) {
+ if (sscreen->info.gfx_level >= GFX10) {
si_pm4_set_reg(pm4, R_00B320_SPI_SHADER_PGM_LO_ES, va >> 8);
} else {
si_pm4_set_reg(pm4, R_00B210_SPI_SHADER_PGM_LO_ES, va >> 8);
@@ -1102,7 +1102,7 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
uint32_t rsrc1 = S_00B228_VGPRS((shader->config.num_vgprs - 1) / 4) | S_00B228_DX10_CLAMP(1) |
S_00B228_MEM_ORDERED(si_shader_mem_ordered(shader)) |
- S_00B228_WGP_MODE(sscreen->info.chip_class >= GFX10) |
+ S_00B228_WGP_MODE(sscreen->info.gfx_level >= GFX10) |
S_00B228_FLOAT_MODE(shader->config.float_mode) |
S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt);
uint32_t rsrc2 = S_00B22C_USER_SGPR(num_user_sgprs) |
@@ -1111,7 +1111,7 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
S_00B22C_LDS_SIZE(shader->config.lds_size) |
S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
- if (sscreen->info.chip_class >= GFX10) {
+ if (sscreen->info.gfx_level >= GFX10) {
rsrc2 |= S_00B22C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5);
} else {
rsrc1 |= S_00B228_SGPRS((shader->config.num_sgprs - 1) / 8);
@@ -1124,7 +1124,7 @@ static void si_shader_gs(struct si_screen *sscreen, struct si_shader *shader)
shader->ctx_reg.gs.spi_shader_pgm_rsrc3_gs = S_00B21C_CU_EN(0xffff) |
S_00B21C_WAVE_LIMIT(0x3F);
shader->ctx_reg.gs.spi_shader_pgm_rsrc4_gs =
- (sscreen->info.chip_class >= GFX11 ? S_00B204_CU_EN_GFX11(1) : S_00B204_CU_EN_GFX10(0xffff)) |
+ (sscreen->info.gfx_level >= GFX11 ? S_00B204_CU_EN_GFX11(1) : S_00B204_CU_EN_GFX10(0xffff)) |
S_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(0);
shader->ctx_reg.gs.vgt_gs_onchip_cntl =
@@ -1189,7 +1189,7 @@ static void gfx10_emit_shader_ngg_tail(struct si_context *sctx, struct si_shader
shader->ctx_reg.ngg.ge_ngg_subgrp_cntl);
radeon_opt_set_context_reg(sctx, R_028A84_VGT_PRIMITIVEID_EN, SI_TRACKED_VGT_PRIMITIVEID_EN,
shader->ctx_reg.ngg.vgt_primitiveid_en);
- if (sctx->chip_class < GFX11) {
+ if (sctx->gfx_level < GFX11) {
radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL, SI_TRACKED_VGT_GS_ONCHIP_CNTL,
shader->ctx_reg.ngg.vgt_gs_onchip_cntl);
}
@@ -1220,12 +1220,12 @@ static void gfx10_emit_shader_ngg_tail(struct si_context *sctx, struct si_shader
shader->ctx_reg.ngg.spi_shader_pgm_rsrc3_gs,
C_00B21C_CU_EN, 0, &sctx->screen->info,
(void (*)(void*, unsigned, uint32_t))
- (sctx->chip_class >= GFX10 ? radeon_set_sh_reg_idx3_func : radeon_set_sh_reg_func));
+ (sctx->gfx_level >= GFX10 ? radeon_set_sh_reg_idx3_func : radeon_set_sh_reg_func));
ac_set_reg_cu_en(&sctx->gfx_cs, R_00B204_SPI_SHADER_PGM_RSRC4_GS,
shader->ctx_reg.ngg.spi_shader_pgm_rsrc4_gs,
C_00B204_CU_EN_GFX10, 16, &sctx->screen->info,
(void (*)(void*, unsigned, uint32_t))
- (sctx->chip_class >= GFX10 ? radeon_set_sh_reg_idx3_func : radeon_set_sh_reg_func));
+ (sctx->gfx_level >= GFX10 ? radeon_set_sh_reg_idx3_func : radeon_set_sh_reg_func));
sctx->tracked_regs.reg_saved &= ~BITFIELD64_BIT(SI_TRACKED_SPI_SHADER_PGM_RSRC4_GS) &
~BITFIELD64_BIT(SI_TRACKED_SPI_SHADER_PGM_RSRC3_GS);
} else {
@@ -1420,7 +1420,7 @@ static void gfx10_shader_ngg(struct si_screen *sscreen, struct si_shader *shader
S_00B228_MEM_ORDERED(si_shader_mem_ordered(shader)) |
/* Disable the WGP mode on gfx10.3 because it can hang. (it happened on VanGogh)
* Let's disable it on all chips that disable exactly 1 CU per SA for GS. */
- S_00B228_WGP_MODE(sscreen->info.chip_class == GFX10) |
+ S_00B228_WGP_MODE(sscreen->info.gfx_level == GFX10) |
S_00B228_GS_VGPR_COMP_CNT(gs_vgpr_comp_cnt));
si_pm4_set_reg(pm4, R_00B22C_SPI_SHADER_PGM_RSRC2_GS,
S_00B22C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0) |
@@ -1432,7 +1432,7 @@ static void gfx10_shader_ngg(struct si_screen *sscreen, struct si_shader *shader
shader->ctx_reg.ngg.spi_shader_pgm_rsrc3_gs = S_00B21C_CU_EN(cu_mask) |
S_00B21C_WAVE_LIMIT(0x3F);
- if (sscreen->info.chip_class >= GFX11) {
+ if (sscreen->info.gfx_level >= GFX11) {
shader->ctx_reg.ngg.spi_shader_pgm_rsrc4_gs =
S_00B204_CU_EN_GFX11(0x1) | S_00B204_SPI_SHADER_LATE_ALLOC_GS_GFX10(late_alloc_wave64) |
S_00B204_INST_PREF_SIZE(si_calc_inst_pref_size(shader));
@@ -1487,7 +1487,7 @@ static void gfx10_shader_ngg(struct si_screen *sscreen, struct si_shader *shader
shader->ctx_reg.ngg.pa_cl_ngg_cntl =
S_028838_INDEX_BUF_EDGE_FLAG_ENA(gfx10_edgeflags_have_effect(shader)) |
/* Reuse for NGG. */
- S_028838_VERTEX_REUSE_DEPTH(sscreen->info.chip_class >= GFX10_3 ? 30 : 0);
+ S_028838_VERTEX_REUSE_DEPTH(sscreen->info.gfx_level >= GFX10_3 ? 30 : 0);
shader->pa_cl_vs_out_cntl = si_get_vs_out_cntl(shader->selector, shader, true);
/* Oversubscribe PC. This improves performance when there are too many varyings. */
@@ -1508,7 +1508,7 @@ static void gfx10_shader_ngg(struct si_screen *sscreen, struct si_shader *shader
shader->ctx_reg.ngg.ge_pc_alloc = S_030980_OVERSUB_EN(oversub_pc_lines > 0) |
S_030980_NUM_PC_LINES(oversub_pc_lines - 1);
- if (sscreen->info.chip_class >= GFX11) {
+ if (sscreen->info.gfx_level >= GFX11) {
shader->ge_cntl = S_03096C_PRIMS_PER_SUBGRP(shader->ngg.max_gsprims) |
S_03096C_VERTS_PER_SUBGRP(shader->ngg.hw_max_esverts) |
S_03096C_BREAK_PRIMGRP_AT_EOI(break_wave_at_eoi) |
@@ -1532,7 +1532,7 @@ static void gfx10_shader_ngg(struct si_screen *sscreen, struct si_shader *shader
*
* Tessellation is unaffected because it always sets GE_CNTL.VERT_GRP_SIZE = 0.
*/
- if ((sscreen->info.chip_class == GFX10) &&
+ if ((sscreen->info.gfx_level == GFX10) &&
(es_stage == MESA_SHADER_VERTEX || gs_stage == MESA_SHADER_VERTEX) && /* = no tess */
shader->ngg.hw_max_esverts != 256 &&
shader->ngg.hw_max_esverts > 5) {
@@ -1570,7 +1570,7 @@ static void si_emit_shader_vs(struct si_context *sctx)
radeon_opt_set_context_reg(sctx, R_028A84_VGT_PRIMITIVEID_EN, SI_TRACKED_VGT_PRIMITIVEID_EN,
shader->ctx_reg.vs.vgt_primitiveid_en);
- if (sctx->chip_class <= GFX8) {
+ if (sctx->gfx_level <= GFX8) {
radeon_opt_set_context_reg(sctx, R_028AB4_VGT_REUSE_OFF, SI_TRACKED_VGT_REUSE_OFF,
shader->ctx_reg.vs.vgt_reuse_off);
}
@@ -1595,7 +1595,7 @@ static void si_emit_shader_vs(struct si_context *sctx)
shader->vgt_vertex_reuse_block_cntl);
/* Required programming for tessellation. (legacy pipeline only) */
- if (sctx->chip_class >= GFX10 && shader->selector->stage == MESA_SHADER_TESS_EVAL) {
+ if (sctx->gfx_level >= GFX10 && shader->selector->stage == MESA_SHADER_TESS_EVAL) {
radeon_opt_set_context_reg(sctx, R_028A44_VGT_GS_ONCHIP_CNTL,
SI_TRACKED_VGT_GS_ONCHIP_CNTL,
S_028A44_ES_VERTS_PER_SUBGRP(250) |
@@ -1606,7 +1606,7 @@ static void si_emit_shader_vs(struct si_context *sctx)
radeon_end_update_context_roll(sctx);
/* GE_PC_ALLOC is not a context register, so it doesn't cause a context roll. */
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
radeon_begin_again(&sctx->gfx_cs);
radeon_opt_set_uconfig_reg(sctx, R_030980_GE_PC_ALLOC, SI_TRACKED_GE_PC_ALLOC,
shader->ctx_reg.vs.ge_pc_alloc);
@@ -1633,7 +1633,7 @@ static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader,
info->base.vs.window_space_position : 0;
bool enable_prim_id = shader->key.ge.mono.u.vs_export_prim_id || info->uses_primid;
- assert(sscreen->info.chip_class < GFX11);
+ assert(sscreen->info.gfx_level < GFX11);
pm4 = si_get_shader_pm4_state(shader);
if (!pm4)
@@ -1659,11 +1659,11 @@ static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader,
shader->ctx_reg.vs.vgt_primitiveid_en = enable_prim_id;
} else {
shader->ctx_reg.vs.vgt_gs_mode =
- ac_vgt_gs_mode(gs->info.base.gs.vertices_out, sscreen->info.chip_class);
+ ac_vgt_gs_mode(gs->info.base.gs.vertices_out, sscreen->info.gfx_level);
shader->ctx_reg.vs.vgt_primitiveid_en = 0;
}
- if (sscreen->info.chip_class <= GFX8) {
+ if (sscreen->info.gfx_level <= GFX8) {
/* Reuse needs to be set off if we write oViewport. */
shader->ctx_reg.vs.vgt_reuse_off = S_028AB4_REUSE_OFF(info->writes_viewport_index);
}
@@ -1691,7 +1691,7 @@ static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader,
nparams = MAX2(shader->info.nr_param_exports, 1);
shader->ctx_reg.vs.spi_vs_out_config = S_0286C4_VS_EXPORT_COUNT(nparams - 1);
- if (sscreen->info.chip_class >= GFX10) {
+ if (sscreen->info.gfx_level >= GFX10) {
shader->ctx_reg.vs.spi_vs_out_config |=
S_0286C4_NO_PC_EXPORT(shader->info.nr_param_exports == 0);
}
@@ -1715,12 +1715,12 @@ static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader,
oc_lds_en = shader->selector->stage == MESA_SHADER_TESS_EVAL ? 1 : 0;
- if (sscreen->info.chip_class >= GFX7) {
+ if (sscreen->info.gfx_level >= GFX7) {
ac_set_reg_cu_en(pm4, R_00B118_SPI_SHADER_PGM_RSRC3_VS,
S_00B118_CU_EN(cu_mask) | S_00B118_WAVE_LIMIT(0x3F),
C_00B118_CU_EN, 0, &sscreen->info,
(void (*)(void*, unsigned, uint32_t))
- (sscreen->info.chip_class >= GFX10 ? si_pm4_set_reg_idx3 : si_pm4_set_reg));
+ (sscreen->info.gfx_level >= GFX10 ? si_pm4_set_reg_idx3 : si_pm4_set_reg));
si_pm4_set_reg(pm4, R_00B11C_SPI_SHADER_LATE_ALLOC_VS, S_00B11C_LIMIT(late_alloc_wave64));
}
@@ -1736,12 +1736,12 @@ static void si_shader_vs(struct si_screen *sscreen, struct si_shader *shader,
uint32_t rsrc2 = S_00B12C_USER_SGPR(num_user_sgprs) | S_00B12C_OC_LDS_EN(oc_lds_en) |
S_00B12C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0);
- if (sscreen->info.chip_class >= GFX10)
+ if (sscreen->info.gfx_level >= GFX10)
rsrc2 |= S_00B12C_USER_SGPR_MSB_GFX10(num_user_sgprs >> 5);
- else if (sscreen->info.chip_class == GFX9)
+ else if (sscreen->info.gfx_level == GFX9)
rsrc2 |= S_00B12C_USER_SGPR_MSB_GFX9(num_user_sgprs >> 5);
- if (sscreen->info.chip_class <= GFX9)
+ if (sscreen->info.gfx_level <= GFX9)
rsrc1 |= S_00B128_SGPRS((shader->config.num_sgprs - 1) / 8);
if (!sscreen->use_ngg_streamout) {
@@ -1920,7 +1920,7 @@ static void si_shader_ps(struct si_screen *sscreen, struct si_shader *shader)
* the color and Z formats to SPI_SHADER_ZERO. The hw will skip export
* instructions if any are present.
*/
- if ((sscreen->info.chip_class <= GFX9 || info->base.fs.uses_discard ||
+ if ((sscreen->info.gfx_level <= GFX9 || info->base.fs.uses_discard ||
shader->key.ps.part.epilog.alpha_func != PIPE_FUNC_ALWAYS) &&
!spi_shader_col_format && !info->writes_z && !info->writes_stencil &&
!info->writes_samplemask)
@@ -1936,7 +1936,7 @@ static void si_shader_ps(struct si_screen *sscreen, struct si_shader *shader)
S_0286D8_PS_W32_EN(shader->wave_size == 32);
/* Workaround when there are no PS inputs but LDS is used. */
- if (sscreen->info.chip_class == GFX11 && !num_interp && shader->config.lds_size)
+ if (sscreen->info.gfx_level == GFX11 && !num_interp && shader->config.lds_size)
spi_ps_in_control |= S_0286D8_PARAM_GEN(1);
shader->ctx_reg.ps.num_interp = num_interp;
@@ -1957,7 +1957,7 @@ static void si_shader_ps(struct si_screen *sscreen, struct si_shader *shader)
S_00B028_DX10_CLAMP(1) | S_00B028_MEM_ORDERED(si_shader_mem_ordered(shader)) |
S_00B028_FLOAT_MODE(shader->config.float_mode);
- if (sscreen->info.chip_class < GFX10) {
+ if (sscreen->info.gfx_level < GFX10) {
rsrc1 |= S_00B028_SGPRS((shader->config.num_sgprs - 1) / 8);
}
@@ -1967,7 +1967,7 @@ static void si_shader_ps(struct si_screen *sscreen, struct si_shader *shader)
S_00B02C_USER_SGPR(SI_PS_NUM_USER_SGPR) |
S_00B32C_SCRATCH_EN(shader->config.scratch_bytes_per_wave > 0));
- if (sscreen->info.chip_class >= GFX11) {
+ if (sscreen->info.gfx_level >= GFX11) {
unsigned cu_mask_ps = gfx103_get_cu_mask_ps(sscreen);
ac_set_reg_cu_en(pm4, R_00B004_SPI_SHADER_PGM_RSRC4_PS,
@@ -2184,7 +2184,7 @@ void si_ps_key_update_framebuffer(struct si_context *sctx)
/* 1D textures are allocated and used as 2D on GFX9. */
key->ps.mono.fbfetch_msaa = sctx->framebuffer.nr_samples > 1;
key->ps.mono.fbfetch_is_1D =
- sctx->chip_class != GFX9 &&
+ sctx->gfx_level != GFX9 &&
(tex->target == PIPE_TEXTURE_1D || tex->target == PIPE_TEXTURE_1D_ARRAY);
key->ps.mono.fbfetch_layered =
tex->target == PIPE_TEXTURE_1D_ARRAY || tex->target == PIPE_TEXTURE_2D_ARRAY ||
@@ -2220,7 +2220,7 @@ void si_ps_key_update_framebuffer_blend(struct si_context *sctx)
sctx->framebuffer.spi_shader_col_format);
key->ps.part.epilog.spi_shader_col_format &= blend->cb_target_enabled_4bit;
- key->ps.part.epilog.dual_src_blend_swizzle = sctx->chip_class >= GFX11 &&
+ key->ps.part.epilog.dual_src_blend_swizzle = sctx->gfx_level >= GFX11 &&
blend->dual_src_blend &&
(sel->info.colors_written_4bit & 0xff) == 0xff;
@@ -2242,7 +2242,7 @@ void si_ps_key_update_framebuffer_blend(struct si_context *sctx)
* to the range supported by the type if a channel has less
* than 16 bits and the export format is 16_ABGR.
*/
- if (sctx->chip_class <= GFX7 && sctx->family != CHIP_HAWAII) {
+ if (sctx->gfx_level <= GFX7 && sctx->family != CHIP_HAWAII) {
key->ps.part.epilog.color_is_int8 = sctx->framebuffer.color_is_int8;
key->ps.part.epilog.color_is_int10 = sctx->framebuffer.color_is_int10;
}
@@ -2280,7 +2280,7 @@ void si_ps_key_update_blend_rasterizer(struct si_context *sctx)
key->ps.part.epilog.alpha_to_one = blend->alpha_to_one && rs->multisample_enable;
key->ps.part.epilog.alpha_to_coverage_via_mrtz =
- sctx->chip_class >= GFX11 && blend->alpha_to_coverage && rs->multisample_enable &&
+ sctx->gfx_level >= GFX11 && blend->alpha_to_coverage && rs->multisample_enable &&
(ps->info.writes_z || ps->info.writes_stencil || ps->info.writes_samplemask);
}
@@ -2405,7 +2405,7 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, struct si_sh
si_clear_vs_key_outputs(sctx, sel, key);
break;
case MESA_SHADER_TESS_CTRL:
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
si_get_vs_key_inputs(sctx, key, &key->ge.part.tcs.ls_prolog);
key->ge.part.tcs.ls = sctx->shader.vs.cso;
}
@@ -2417,7 +2417,7 @@ static inline void si_shader_selector_key(struct pipe_context *ctx, struct si_sh
si_clear_vs_key_outputs(sctx, sel, key);
break;
case MESA_SHADER_GEOMETRY:
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
if (sctx->shader.tes.cso) {
si_clear_vs_key_inputs(sctx, key, &key->ge.part.gs.vs_prolog);
key->ge.part.gs.es = sctx->shader.tes.cso;
@@ -2690,7 +2690,7 @@ current_not_ready:
shader->compiler_ctx_state.is_debug_context = sctx->is_debug;
/* If this is a merged shader, get the first shader's selector. */
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
if (sel->stage == MESA_SHADER_TESS_CTRL)
previous_stage_sel = ((struct si_shader_key_ge*)key)->part.tcs.ls;
else if (sel->stage == MESA_SHADER_GEOMETRY)
@@ -3070,7 +3070,7 @@ void si_get_active_slot_masks(struct si_screen *sscreen, const struct si_shader_
* and so we can benefit from a better cache hit rate if we keep image
* descriptors together.
*/
- if (sscreen->info.chip_class < GFX11 && num_msaa_images)
+ if (sscreen->info.gfx_level < GFX11 && num_msaa_images)
num_images = SI_NUM_IMAGES + num_msaa_images; /* add FMASK descriptors */
start = si_get_image_slot(num_images - 1) / 2;
@@ -3087,7 +3087,7 @@ static void *si_create_shader_selector(struct pipe_context *ctx,
if (!sel)
return NULL;
- if (sscreen->info.chip_class == GFX11 && state->stream_output.num_outputs) {
+ if (sscreen->info.gfx_level == GFX11 && state->stream_output.num_outputs) {
fprintf(stderr, "radeonsi: streamout unimplemented\n");
abort();
}
@@ -3134,8 +3134,8 @@ static void *si_create_shader_selector(struct pipe_context *ctx,
* - num_invocations * gs.vertices_out > 256
* - LDS usage is too high
*/
- sel->tess_turns_off_ngg = sscreen->info.chip_class >= GFX10 &&
- sscreen->info.chip_class <= GFX10_3 &&
+ sel->tess_turns_off_ngg = sscreen->info.gfx_level >= GFX10 &&
+ sscreen->info.gfx_level <= GFX10_3 &&
(sel->info.base.gs.invocations * sel->info.base.gs.vertices_out > 256 ||
sel->info.base.gs.invocations * sel->info.base.gs.vertices_out *
(sel->info.num_outputs * 4 + 1) > 6500 /* max dw per GS primitive */);
@@ -3158,7 +3158,7 @@ static void *si_create_shader_selector(struct pipe_context *ctx,
}
bool ngg_culling_allowed =
- sscreen->info.chip_class >= GFX10 &&
+ sscreen->info.gfx_level >= GFX10 &&
sscreen->use_ngg_culling &&
sel->info.writes_position &&
!sel->info.writes_viewport_index && /* cull only against viewport 0 */
@@ -3362,7 +3362,7 @@ bool si_update_ngg(struct si_context *sctx)
*/
if (sctx->screen->info.has_vgt_flush_ngg_legacy_bug && !new_ngg) {
sctx->flags |= SI_CONTEXT_VGT_FLUSH;
- if (sctx->chip_class == GFX10) {
+ if (sctx->gfx_level == GFX10) {
/* Workaround for https://gitlab.freedesktop.org/mesa/mesa/-/issues/2941 */
si_flush_gfx_cs(sctx, RADEON_FLUSH_ASYNC_START_NEXT_GFX_IB_NOW, NULL);
}
@@ -3489,7 +3489,7 @@ void si_update_ps_kill_enable(struct si_context *sctx)
void si_update_vrs_flat_shading(struct si_context *sctx)
{
- if (sctx->chip_class >= GFX10_3 && sctx->shader.ps.cso) {
+ if (sctx->gfx_level >= GFX10_3 && sctx->shader.ps.cso) {
struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
struct si_shader_info *info = &sctx->shader.ps.cso->info;
bool allow_flat_shading = info->allow_flat_shading;
@@ -3574,10 +3574,10 @@ static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
switch (shader->selector->stage) {
case MESA_SHADER_VERTEX:
if (shader->key.ge.as_ls) {
- if (sctx->chip_class <= GFX8)
+ if (sctx->gfx_level <= GFX8)
state_index = SI_STATE_IDX(ls);
} else if (shader->key.ge.as_es) {
- if (sctx->chip_class <= GFX8)
+ if (sctx->gfx_level <= GFX8)
state_index = SI_STATE_IDX(es);
} else if (shader->key.ge.as_ngg) {
state_index = SI_STATE_IDX(gs);
@@ -3590,7 +3590,7 @@ static void si_delete_shader(struct si_context *sctx, struct si_shader *shader)
break;
case MESA_SHADER_TESS_EVAL:
if (shader->key.ge.as_es) {
- if (sctx->chip_class <= GFX8)
+ if (sctx->gfx_level <= GFX8)
state_index = SI_STATE_IDX(es);
} else if (shader->key.ge.as_ngg) {
state_index = SI_STATE_IDX(gs);
@@ -3703,7 +3703,7 @@ static void si_emit_vgt_flush(struct radeon_cmdbuf *cs)
/* Initialize state related to ESGS / GSVS ring buffers */
bool si_update_gs_ring_buffers(struct si_context *sctx)
{
- assert(sctx->chip_class < GFX11);
+ assert(sctx->gfx_level < GFX11);
struct si_shader_selector *es =
sctx->shader.tes.cso ? sctx->shader.tes.cso : sctx->shader.vs.cso;
@@ -3717,7 +3717,7 @@ bool si_update_gs_ring_buffers(struct si_context *sctx)
/* On GFX6-GFX7, the value comes from VGT_GS_VERTEX_REUSE = 16.
* On GFX8+, the value comes from VGT_VERTEX_REUSE_BLOCK_CNTL = 30 (+2).
*/
- unsigned gs_vertex_reuse = (sctx->chip_class >= GFX8 ? 32 : 16) * num_se;
+ unsigned gs_vertex_reuse = (sctx->gfx_level >= GFX8 ? 32 : 16) * num_se;
unsigned alignment = 256 * num_se;
/* The maximum size is 63.999 MB per SE. */
unsigned max_size = ((unsigned)(63.999 * 1024 * 1024) & ~255) * num_se;
@@ -3742,7 +3742,7 @@ bool si_update_gs_ring_buffers(struct si_context *sctx)
*
* GFX9 doesn't have the ESGS ring.
*/
- bool update_esgs = sctx->chip_class <= GFX8 && esgs_ring_size &&
+ bool update_esgs = sctx->gfx_level <= GFX8 && esgs_ring_size &&
(!sctx->esgs_ring || sctx->esgs_ring->width0 < esgs_ring_size);
bool update_gsvs =
gsvs_ring_size && (!sctx->gsvs_ring || sctx->gsvs_ring->width0 < gsvs_ring_size);
@@ -3774,7 +3774,7 @@ bool si_update_gs_ring_buffers(struct si_context *sctx)
/* Set ring bindings. */
if (sctx->esgs_ring) {
- assert(sctx->chip_class <= GFX8);
+ assert(sctx->gfx_level <= GFX8);
si_set_ring_buffer(sctx, SI_RING_ESGS, sctx->esgs_ring, 0, sctx->esgs_ring->width0, false,
false, 0, 0, 0);
}
@@ -3787,7 +3787,7 @@ bool si_update_gs_ring_buffers(struct si_context *sctx)
/* These registers will be shadowed, so set them only once. */
struct radeon_cmdbuf *cs = &sctx->gfx_cs;
- assert(sctx->chip_class >= GFX7);
+ assert(sctx->gfx_level >= GFX7);
si_emit_vgt_flush(cs);
@@ -3795,7 +3795,7 @@ bool si_update_gs_ring_buffers(struct si_context *sctx)
/* Set the GS registers. */
if (sctx->esgs_ring) {
- assert(sctx->chip_class <= GFX8);
+ assert(sctx->gfx_level <= GFX8);
radeon_set_uconfig_reg(R_030900_VGT_ESGS_RING_SIZE,
sctx->esgs_ring->width0 / 256);
}
@@ -3813,9 +3813,9 @@ bool si_update_gs_ring_buffers(struct si_context *sctx)
if (!pm4)
return false;
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
if (sctx->esgs_ring) {
- assert(sctx->chip_class <= GFX8);
+ assert(sctx->gfx_level <= GFX8);
si_pm4_set_reg(pm4, R_030900_VGT_ESGS_RING_SIZE, sctx->esgs_ring->width0 / 256);
}
if (sctx->gsvs_ring)
@@ -3996,7 +3996,7 @@ bool si_update_spi_tmpring_size(struct si_context *sctx, unsigned bytes)
si_context_add_resource_size(sctx, &sctx->scratch_buffer->b.b);
}
- if (sctx->chip_class < GFX11 && !si_update_scratch_relocs(sctx))
+ if (sctx->gfx_level < GFX11 && !si_update_scratch_relocs(sctx))
return false;
}
@@ -4032,7 +4032,7 @@ void si_init_tess_factor_ring(struct si_context *sctx)
si_resource(sctx->tess_rings)->gpu_address + sctx->screen->hs.tess_offchip_ring_size;
unsigned tf_ring_size_field = sctx->screen->hs.tess_factor_ring_size / 4;
- if (sctx->chip_class >= GFX11)
+ if (sctx->gfx_level >= GFX11)
tf_ring_size_field /= sctx->screen->info.max_se;
assert((tf_ring_size_field & C_030938_SIZE) == 0);
@@ -4042,7 +4042,7 @@ void si_init_tess_factor_ring(struct si_context *sctx)
/* TODO: tmz + shadowed_regs support */
struct radeon_cmdbuf *cs = &sctx->gfx_cs;
- assert(sctx->chip_class >= GFX7);
+ assert(sctx->gfx_level >= GFX7);
radeon_add_to_buffer_list(sctx, &sctx->gfx_cs, si_resource(sctx->tess_rings),
RADEON_USAGE_READWRITE | RADEON_PRIO_SHADER_RINGS);
@@ -4053,10 +4053,10 @@ void si_init_tess_factor_ring(struct si_context *sctx)
radeon_set_uconfig_reg(R_030938_VGT_TF_RING_SIZE,
S_030938_SIZE(tf_ring_size_field));
radeon_set_uconfig_reg(R_030940_VGT_TF_MEMORY_BASE, factor_va >> 8);
- if (sctx->chip_class >= GFX10) {
+ if (sctx->gfx_level >= GFX10) {
radeon_set_uconfig_reg(R_030984_VGT_TF_MEMORY_BASE_HI,
S_030984_BASE_HI(factor_va >> 40));
- } else if (sctx->chip_class == GFX9) {
+ } else if (sctx->gfx_level == GFX9) {
radeon_set_uconfig_reg(R_030944_VGT_TF_MEMORY_BASE_HI,
S_030944_BASE_HI(factor_va >> 40));
}
@@ -4070,14 +4070,14 @@ void si_init_tess_factor_ring(struct si_context *sctx)
si_cs_preamble_add_vgt_flush(sctx);
/* Append these registers to the init config state. */
- if (sctx->chip_class >= GFX7) {
+ if (sctx->gfx_level >= GFX7) {
si_pm4_set_reg(sctx->cs_preamble_state, R_030938_VGT_TF_RING_SIZE,
S_030938_SIZE(tf_ring_size_field));
si_pm4_set_reg(sctx->cs_preamble_state, R_030940_VGT_TF_MEMORY_BASE, factor_va >> 8);
- if (sctx->chip_class >= GFX10)
+ if (sctx->gfx_level >= GFX10)
si_pm4_set_reg(sctx->cs_preamble_state, R_030984_VGT_TF_MEMORY_BASE_HI,
S_030984_BASE_HI(factor_va >> 40));
- else if (sctx->chip_class == GFX9)
+ else if (sctx->gfx_level == GFX9)
si_pm4_set_reg(sctx->cs_preamble_state, R_030944_VGT_TF_MEMORY_BASE_HI,
S_030944_BASE_HI(factor_va >> 40));
si_pm4_set_reg(sctx->cs_preamble_state, R_03093C_VGT_HS_OFFCHIP_PARAM,
@@ -4141,10 +4141,10 @@ struct si_pm4_state *si_build_vgt_shader_config(struct si_screen *screen, union
} else if (key.u.gs)
stages |= S_028B54_VS_EN(V_028B54_VS_STAGE_COPY_SHADER);
- if (screen->info.chip_class >= GFX9)
+ if (screen->info.gfx_level >= GFX9)
stages |= S_028B54_MAX_PRIMGRP_IN_WAVE(2);
- if (screen->info.chip_class >= GFX10) {
+ if (screen->info.gfx_level >= GFX10) {
stages |= S_028B54_HS_W32_EN(key.u.hs_wave32) |
S_028B54_GS_W32_EN(key.u.gs_wave32) |
S_028B54_VS_W32_EN(key.u.vs_wave32);
@@ -4161,7 +4161,7 @@ static void si_emit_scratch_state(struct si_context *sctx)
struct radeon_cmdbuf *cs = &sctx->gfx_cs;
radeon_begin(cs);
- if (sctx->chip_class >= GFX11) {
+ if (sctx->gfx_level >= GFX11) {
radeon_set_context_reg_seq(R_0286E8_SPI_TMPRING_SIZE, 3);
radeon_emit(sctx->spi_tmpring_size); /* SPI_TMPRING_SIZE */
radeon_emit(sctx->scratch_buffer->gpu_address >> 8); /* SPI_GFX_SCRATCH_BASE_LO */
diff --git a/src/gallium/drivers/radeonsi/si_state_streamout.c b/src/gallium/drivers/radeonsi/si_state_streamout.c
index baceace822f..464259d2d9c 100644
--- a/src/gallium/drivers/radeonsi/si_state_streamout.c
+++ b/src/gallium/drivers/radeonsi/si_state_streamout.c
@@ -283,14 +283,14 @@ static void si_flush_vgt_streamout(struct si_context *sctx)
radeon_begin(cs);
/* The register is at different places on different ASICs. */
- if (sctx->chip_class >= GFX9) {
+ if (sctx->gfx_level >= GFX9) {
reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
radeon_emit(PKT3(PKT3_WRITE_DATA, 3, 0));
radeon_emit(S_370_DST_SEL(V_370_MEM_MAPPED_REGISTER) | S_370_ENGINE_SEL(V_370_ME));
radeon_emit(R_0300FC_CP_STRMOUT_CNTL >> 2);
radeon_emit(0);
radeon_emit(0);
- } else if (sctx->chip_class >= GFX7) {
+ } else if (sctx->gfx_level >= GFX7) {
reg_strmout_cntl = R_0300FC_CP_STRMOUT_CNTL;
radeon_set_uconfig_reg(reg_strmout_cntl, 0);
} else {
diff --git a/src/gallium/drivers/radeonsi/si_state_viewport.c b/src/gallium/drivers/radeonsi/si_state_viewport.c
index a1d38b0b2bd..522d6d43b3a 100644
--- a/src/gallium/drivers/radeonsi/si_state_viewport.c
+++ b/src/gallium/drivers/radeonsi/si_state_viewport.c
@@ -241,7 +241,7 @@ static void si_emit_one_scissor(struct si_context *ctx, struct radeon_cmdbuf *cs
/* Workaround for a hw bug on GFX6 that occurs when PA_SU_HARDWARE_-
* SCREEN_OFFSET != 0 and any_scissor.BR_X/Y <= 0.
*/
- if (ctx->chip_class == GFX6 && (final.maxx == 0 || final.maxy == 0)) {
+ if (ctx->gfx_level == GFX6 && (final.maxx == 0 || final.maxy == 0)) {
radeon_emit(S_028250_TL_X(1) | S_028250_TL_Y(1) | S_028250_WINDOW_OFFSET_DISABLE(1));
radeon_emit(S_028254_BR_X(1) | S_028254_BR_Y(1));
radeon_end();
@@ -290,8 +290,8 @@ static void si_emit_guardband(struct si_context *ctx)
/* GFX6-GFX7 need to align the offset to an ubertile consisting of all SEs. */
const unsigned hw_screen_offset_alignment =
- ctx->chip_class >= GFX11 ? 32 :
- ctx->chip_class >= GFX8 ? 16 : MAX2(ctx->screen->se_tile_repeat, 16);
+ ctx->gfx_level >= GFX11 ? 32 :
+ ctx->gfx_level >= GFX8 ? 16 : MAX2(ctx->screen->se_tile_repeat, 16);
/* Indexed by quantization modes */
static int max_viewport_size[] = {65535, 16383, 4095};
diff --git a/src/gallium/drivers/radeonsi/si_test_dma_perf.c b/src/gallium/drivers/radeonsi/si_test_dma_perf.c
index 3fad2ee688c..9bb07c7ec62 100644
--- a/src/gallium/drivers/radeonsi/si_test_dma_perf.c
+++ b/src/gallium/drivers/radeonsi/si_test_dma_perf.c
@@ -108,7 +108,7 @@ void si_test_dma_perf(struct si_screen *sscreen)
unsigned cs_dwords_per_thread =
test_cs ? cs_dwords_per_thread_list[cs_method % NUM_SHADERS] : 0;
- if (sctx->chip_class == GFX6) {
+ if (sctx->gfx_level == GFX6) {
/* GFX6 doesn't support CP DMA operations through L2. */
if (test_cp && cache_policy != L2_BYPASS)
continue;
@@ -120,7 +120,7 @@ void si_test_dma_perf(struct si_screen *sscreen)
/* SI_RESOURCE_FLAG_UNCACHED setting RADEON_FLAG_UNCACHED doesn't affect
* chips before gfx9.
*/
- if (test_cs && cache_policy && sctx->chip_class < GFX9)
+ if (test_cs && cache_policy && sctx->gfx_level < GFX9)
continue;
printf("%s ,", placement_str[placement]);
@@ -331,7 +331,7 @@ void si_test_dma_perf(struct si_screen *sscreen)
/* Ban CP DMA clears via MC on <= GFX8. They are super slow
* on GTT, which we can get due to BO evictions.
*/
- if (sctx->chip_class <= GFX8 && placement == 1 && r->is_cp &&
+ if (sctx->gfx_level <= GFX8 && placement == 1 && r->is_cp &&
r->cache_policy == L2_BYPASS)
continue;
diff --git a/src/gallium/drivers/radeonsi/si_test_image_copy_region.c b/src/gallium/drivers/radeonsi/si_test_image_copy_region.c
index 8865133e11b..0c72a63c3ab 100644
--- a/src/gallium/drivers/radeonsi/si_test_image_copy_region.c
+++ b/src/gallium/drivers/radeonsi/si_test_image_copy_region.c
@@ -238,7 +238,7 @@ static void print_image_attrs(struct si_screen *sscreen, struct si_texture *tex)
{
const char *mode;
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
static const char *modes[32] = {
[ADDR_SW_LINEAR] = "LINEAR",
[ADDR_SW_4KB_S_X] = "4KB_S_X",
diff --git a/src/gallium/drivers/radeonsi/si_texture.c b/src/gallium/drivers/radeonsi/si_texture.c
index 7b110eeae80..d3bf3ad96fa 100644
--- a/src/gallium/drivers/radeonsi/si_texture.c
+++ b/src/gallium/drivers/radeonsi/si_texture.c
@@ -122,7 +122,7 @@ static unsigned si_texture_get_offset(struct si_screen *sscreen, struct si_textu
unsigned level, const struct pipe_box *box, unsigned *stride,
unsigned *layer_stride)
{
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
unsigned pitch;
if (tex->surface.is_linear) {
pitch = tex->surface.u.gfx9.pitch[level];
@@ -188,13 +188,13 @@ static int si_init_surface(struct si_screen *sscreen, struct radeon_surf *surfac
(ptex->bind & PIPE_BIND_SHARED) || is_imported) {
flags |= RADEON_SURF_NO_HTILE;
} else if (tc_compatible_htile &&
- (sscreen->info.chip_class >= GFX9 || array_mode == RADEON_SURF_MODE_2D)) {
+ (sscreen->info.gfx_level >= GFX9 || array_mode == RADEON_SURF_MODE_2D)) {
/* TC-compatible HTILE only supports Z32_FLOAT.
* GFX9 also supports Z16_UNORM.
* On GFX8, promote Z16 to Z32. DB->CB copies will convert
* the format for transfers.
*/
- if (sscreen->info.chip_class == GFX8)
+ if (sscreen->info.gfx_level == GFX8)
bpe = 4;
flags |= RADEON_SURF_TC_COMPATIBLE_HTILE;
@@ -205,7 +205,7 @@ static int si_init_surface(struct si_screen *sscreen, struct radeon_surf *surfac
}
/* Disable DCC? (it can't be disabled if modifiers are used) */
- if (sscreen->info.chip_class >= GFX8 && modifier == DRM_FORMAT_MOD_INVALID && !is_imported) {
+ if (sscreen->info.gfx_level >= GFX8 && modifier == DRM_FORMAT_MOD_INVALID && !is_imported) {
/* Global options that disable DCC. */
if (ptex->flags & SI_RESOURCE_FLAG_DISABLE_DCC)
flags |= RADEON_SURF_DISABLE_DCC;
@@ -222,11 +222,11 @@ static int si_init_surface(struct si_screen *sscreen, struct radeon_surf *surfac
flags |= RADEON_SURF_DISABLE_DCC;
/* R9G9B9E5 isn't supported for rendering by older generations. */
- if (sscreen->info.chip_class < GFX10_3 &&
+ if (sscreen->info.gfx_level < GFX10_3 &&
ptex->format == PIPE_FORMAT_R9G9B9E5_FLOAT)
flags |= RADEON_SURF_DISABLE_DCC;
- switch (sscreen->info.chip_class) {
+ switch (sscreen->info.gfx_level) {
case GFX8:
/* Stoney: 128bpp MSAA textures randomly fail piglit tests with DCC. */
if (sscreen->info.family == CHIP_STONEY && bpe == 16 && ptex->nr_samples >= 2)
@@ -276,7 +276,7 @@ static int si_init_surface(struct si_screen *sscreen, struct radeon_surf *surfac
if (sscreen->debug_flags & DBG(NO_FMASK))
flags |= RADEON_SURF_NO_FMASK;
- if (sscreen->info.chip_class == GFX9 && (ptex->flags & SI_RESOURCE_FLAG_FORCE_MICRO_TILE_MODE)) {
+ if (sscreen->info.gfx_level == GFX9 && (ptex->flags & SI_RESOURCE_FLAG_FORCE_MICRO_TILE_MODE)) {
flags |= RADEON_SURF_FORCE_MICRO_TILE_MODE;
surface->micro_tile_mode = SI_RESOURCE_FLAG_MICRO_TILE_MODE_GET(ptex->flags);
}
@@ -285,11 +285,11 @@ static int si_init_surface(struct si_screen *sscreen, struct radeon_surf *surfac
/* GFX11 shouldn't get here because the flag is only used by the CB MSAA resolving
* that GFX11 doesn't have.
*/
- assert(sscreen->info.chip_class <= GFX10_3);
+ assert(sscreen->info.gfx_level <= GFX10_3);
flags |= RADEON_SURF_FORCE_SWIZZLE_MODE;
- if (sscreen->info.chip_class >= GFX10)
+ if (sscreen->info.gfx_level >= GFX10)
surface->u.gfx9.swizzle_mode = ADDR_SW_64KB_R_X;
}
@@ -560,7 +560,7 @@ static bool si_displayable_dcc_needs_explicit_flush(struct si_texture *tex)
{
struct si_screen *sscreen = (struct si_screen *)tex->buffer.b.b.screen;
- if (sscreen->info.chip_class <= GFX8)
+ if (sscreen->info.gfx_level <= GFX8)
return false;
/* With modifiers and > 1 planes any applications will know that they
@@ -600,7 +600,7 @@ static bool si_resource_get_param(struct pipe_screen *screen, struct pipe_contex
if (resource->target == PIPE_BUFFER)
*value = 0;
else
- *value = ac_surface_get_plane_stride(sscreen->info.chip_class,
+ *value = ac_surface_get_plane_stride(sscreen->info.gfx_level,
&tex->surface, plane, level);
return true;
@@ -609,7 +609,7 @@ static bool si_resource_get_param(struct pipe_screen *screen, struct pipe_contex
*value = 0;
} else {
uint64_t level_offset = tex->surface.is_linear ? tex->surface.u.gfx9.offset[level] : 0;
- *value = ac_surface_get_plane_offset(sscreen->info.chip_class,
+ *value = ac_surface_get_plane_offset(sscreen->info.gfx_level,
&tex->surface, plane, layer) + level_offset;
}
return true;
@@ -692,9 +692,9 @@ static bool si_texture_get_handle(struct pipe_screen *screen, struct pipe_contex
return false;
if (plane) {
- whandle->offset = ac_surface_get_plane_offset(sscreen->info.chip_class,
+ whandle->offset = ac_surface_get_plane_offset(sscreen->info.gfx_level,
&tex->surface, plane, 0);
- whandle->stride = ac_surface_get_plane_stride(sscreen->info.chip_class,
+ whandle->stride = ac_surface_get_plane_stride(sscreen->info.gfx_level,
&tex->surface, plane, 0);
whandle->modifier = tex->surface.modifier;
return sscreen->ws->buffer_get_handle(sscreen->ws, res->buf, whandle);
@@ -749,7 +749,7 @@ static bool si_texture_get_handle(struct pipe_screen *screen, struct pipe_contex
if ((!res->b.is_shared || update_metadata) && whandle->offset == 0)
si_set_tex_bo_metadata(sscreen, tex);
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
slice_size = tex->surface.u.gfx9.surf_slice_size;
} else {
slice_size = (uint64_t)tex->surface.u.legacy.level[0].slice_size_dw * 4;
@@ -846,7 +846,7 @@ void si_print_texture_info(struct si_screen *sscreen, struct si_texture *tex,
u_log_printf(log, "%s", surf_info);
free(surf_info);
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
return;
}
@@ -947,10 +947,10 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
* GFX9 and later use the same tiling for both, so TC-compatible HTILE can be
* enabled on demand.
*/
- tex->tc_compatible_htile = (sscreen->info.chip_class == GFX8 &&
+ tex->tc_compatible_htile = (sscreen->info.gfx_level == GFX8 &&
tex->surface.flags & RADEON_SURF_TC_COMPATIBLE_HTILE) ||
/* Mipmapping always starts TC-compatible. */
- (sscreen->info.chip_class >= GFX8 &&
+ (sscreen->info.gfx_level >= GFX8 &&
tex->surface.flags & RADEON_SURF_TC_COMPATIBLE_HTILE &&
tex->buffer.b.b.last_level > 0);
@@ -958,7 +958,7 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
* - GFX8 only supports Z32_FLOAT.
* - GFX9 only supports Z32_FLOAT and Z16_UNORM. */
if (tex->surface.flags & RADEON_SURF_TC_COMPATIBLE_HTILE) {
- if (sscreen->info.chip_class >= GFX9 && base->format == PIPE_FORMAT_Z16_UNORM)
+ if (sscreen->info.gfx_level >= GFX9 && base->format == PIPE_FORMAT_Z16_UNORM)
tex->db_render_format = base->format;
else {
tex->db_render_format = PIPE_FORMAT_Z32_FLOAT;
@@ -980,13 +980,13 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
if (tex->is_depth) {
tex->htile_stencil_disabled = !tex->surface.has_stencil;
- if (sscreen->info.chip_class >= GFX9) {
+ if (sscreen->info.gfx_level >= GFX9) {
tex->can_sample_z = true;
tex->can_sample_s = true;
/* Stencil texturing with HTILE doesn't work
* with mipmapping on Navi10-14. */
- if (sscreen->info.chip_class == GFX10 && base->last_level > 0)
+ if (sscreen->info.gfx_level == GFX10 && base->last_level > 0)
tex->htile_stencil_disabled = true;
} else {
tex->can_sample_z = !tex->surface.u.legacy.depth_adjusted;
@@ -997,7 +997,7 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
* because we lose a little bit of Z precision in order to make space for
* stencil in HTILE.
*/
- if (sscreen->info.chip_class == GFX8 &&
+ if (sscreen->info.gfx_level == GFX8 &&
tex->surface.flags & RADEON_SURF_TC_COMPATIBLE_HTILE)
tex->htile_stencil_disabled = false;
}
@@ -1005,7 +1005,7 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
tex->db_compatible = surface->flags & RADEON_SURF_ZBUFFER;
} else {
if (tex->surface.cmask_offset) {
- assert(sscreen->info.chip_class < GFX11);
+ assert(sscreen->info.gfx_level < GFX11);
tex->cb_color_info |= S_028C70_FAST_CLEAR(1);
tex->cmask_buffer = &tex->buffer;
}
@@ -1057,7 +1057,7 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
if (tex->is_depth && tex->surface.meta_offset) {
uint32_t clear_value = 0;
- if (sscreen->info.chip_class >= GFX9 || tex->tc_compatible_htile)
+ if (sscreen->info.gfx_level >= GFX9 || tex->tc_compatible_htile)
clear_value = 0x0000030F;
assert(num_clears < ARRAY_SIZE(clears));
@@ -1078,7 +1078,7 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
assert(num_clears < ARRAY_SIZE(clears));
si_init_buffer_clear(&clears[num_clears++], &tex->buffer.b.b, tex->surface.meta_offset,
tex->surface.meta_size, DCC_CLEAR_0000);
- } else if (sscreen->info.chip_class >= GFX9) {
+ } else if (sscreen->info.gfx_level >= GFX9) {
/* Clear to uncompressed. Clearing this to black is complicated. */
assert(num_clears < ARRAY_SIZE(clears));
si_init_buffer_clear(&clears[num_clears++], &tex->buffer.b.b, tex->surface.meta_offset,
@@ -1125,7 +1125,7 @@ static struct si_texture *si_texture_create_object(struct pipe_screen *screen,
assert(num_clears < ARRAY_SIZE(clears));
si_init_buffer_clear(&clears[num_clears++], &tex->buffer.b.b, tex->surface.display_dcc_offset,
tex->surface.u.gfx9.color.display_dcc_size,
- sscreen->info.chip_class >= GFX11 ? GFX11_DCC_CLEAR_1111_UNORM
+ sscreen->info.gfx_level >= GFX11 ? GFX11_DCC_CLEAR_1111_UNORM
: GFX8_DCC_CLEAR_1111);
}
@@ -1187,7 +1187,7 @@ static enum radeon_surf_mode si_choose_tiling(struct si_screen *sscreen,
/* Avoid Z/S decompress blits by forcing TC-compatible HTILE on GFX8,
* which requires 2D tiling.
*/
- if (sscreen->info.chip_class == GFX8 && tc_compatible_htile)
+ if (sscreen->info.gfx_level == GFX8 && tc_compatible_htile)
return RADEON_SURF_MODE_2D;
/* Handle common candidates for the linear mode.
@@ -1255,7 +1255,7 @@ si_texture_create_with_modifier(struct pipe_screen *screen,
bool is_flushed_depth = templ->flags & SI_RESOURCE_FLAG_FLUSHED_DEPTH ||
templ->flags & SI_RESOURCE_FLAG_FORCE_LINEAR;
bool tc_compatible_htile =
- sscreen->info.chip_class >= GFX8 &&
+ sscreen->info.gfx_level >= GFX8 &&
/* There are issues with TC-compatible HTILE on Tonga (and
* Iceland is the same design), and documented bug workarounds
* don't help. For example, this fails:
@@ -1346,7 +1346,7 @@ bool si_texture_commit(struct si_context *ctx, struct si_resource *res, unsigned
unsigned blks = util_format_get_blocksize(format);
unsigned samples = MAX2(1, res->b.b.nr_samples);
- assert(ctx->chip_class >= GFX9);
+ assert(ctx->gfx_level >= GFX9);
unsigned row_pitch = surface->u.gfx9.prt_level_pitch[level] *
surface->prt_tile_height * surface->prt_tile_depth * blks * samples;
@@ -1599,9 +1599,9 @@ static struct pipe_resource *si_texture_from_winsys_buffer(struct si_screen *ssc
while (next_plane) {
struct si_auxiliary_texture *ptex = (struct si_auxiliary_texture *)next_plane;
if (plane >= nplanes || ptex->buffer != tex->buffer.buf ||
- ptex->offset != ac_surface_get_plane_offset(sscreen->info.chip_class,
+ ptex->offset != ac_surface_get_plane_offset(sscreen->info.gfx_level,
&tex->surface, plane, 0) ||
- ptex->stride != ac_surface_get_plane_stride(sscreen->info.chip_class,
+ ptex->stride != ac_surface_get_plane_stride(sscreen->info.gfx_level,
&tex->surface, plane, 0)) {
si_texture_reference(&tex, NULL);
return NULL;
@@ -1624,7 +1624,7 @@ static struct pipe_resource *si_texture_from_winsys_buffer(struct si_screen *ssc
return NULL;
}
- if (ac_surface_get_plane_offset(sscreen->info.chip_class, &tex->surface, 0, 0) +
+ if (ac_surface_get_plane_offset(sscreen->info.gfx_level, &tex->surface, 0, 0) +
tex->surface.total_size > buf->size ||
buf->alignment_log2 < tex->surface.alignment_log2) {
si_texture_reference(&tex, NULL);
@@ -2003,7 +2003,7 @@ bool vi_dcc_formats_compatible(struct si_screen *sscreen, enum pipe_format forma
const struct util_format_description *desc1, *desc2;
/* All formats are compatible on GFX11. */
- if (sscreen->info.chip_class >= GFX11)
+ if (sscreen->info.gfx_level >= GFX11)
return true;
/* No format change - exit early. */
@@ -2137,7 +2137,7 @@ static void si_surface_destroy(struct pipe_context *pipe, struct pipe_surface *s
FREE(surface);
}
-unsigned si_translate_colorswap(enum chip_class chip_class, enum pipe_format format,
+unsigned si_translate_colorswap(enum amd_gfx_level gfx_level, enum pipe_format format,
bool do_endian_swap)
{
const struct util_format_description *desc = util_format_description(format);
@@ -2147,7 +2147,7 @@ unsigned si_translate_colorswap(enum chip_class chip_class, enum pipe_format for
if (format == PIPE_FORMAT_R11G11B10_FLOAT) /* isn't plain */
return V_028C70_SWAP_STD;
- if (chip_class >= GFX10_3 &&
+ if (gfx_level >= GFX10_3 &&
format == PIPE_FORMAT_R9G9B9E5_FLOAT) /* isn't plain */
return V_028C70_SWAP_STD;
@@ -2335,7 +2335,7 @@ static int si_get_sparse_texture_virtual_page_size(struct pipe_screen *screen,
* ARB_sparse_texture2 need MS texture support, but we relax it by just return
* no page size for GFX10+ to keep shader query capbility.
*/
- if (multi_sample && sscreen->info.chip_class != GFX9)
+ if (multi_sample && sscreen->info.gfx_level != GFX9)
return 0;
/* Unsupport formats. */
@@ -2378,7 +2378,7 @@ void si_init_screen_texture_functions(struct si_screen *sscreen)
* which works around some applications using modifiers that are not
* allowed in combination with lack of error reporting in
* gbm_dri_surface_create */
- if (sscreen->info.chip_class >= GFX9 && sscreen->info.kernel_has_modifiers) {
+ if (sscreen->info.gfx_level >= GFX9 && sscreen->info.kernel_has_modifiers) {
sscreen->b.resource_create_with_modifiers = si_texture_create_with_modifiers;
sscreen->b.query_dmabuf_modifiers = si_query_dmabuf_modifiers;
sscreen->b.is_dmabuf_modifier_supported = si_is_dmabuf_modifier_supported;
diff --git a/src/gallium/drivers/radeonsi/si_uvd.c b/src/gallium/drivers/radeonsi/si_uvd.c
index 11436b07350..2a3ccf35dcf 100644
--- a/src/gallium/drivers/radeonsi/si_uvd.c
+++ b/src/gallium/drivers/radeonsi/si_uvd.c
@@ -93,7 +93,7 @@ static struct pb_buffer *si_uvd_set_dtb(struct ruvd_msg *msg, struct vl_video_bu
struct si_texture *luma = (struct si_texture *)buf->resources[0];
struct si_texture *chroma = (struct si_texture *)buf->resources[1];
enum ruvd_surface_type type =
- (sscreen->info.chip_class >= GFX9) ? RUVD_SURFACE_TYPE_GFX9 : RUVD_SURFACE_TYPE_LEGACY;
+ (sscreen->info.gfx_level >= GFX9) ? RUVD_SURFACE_TYPE_GFX9 : RUVD_SURFACE_TYPE_LEGACY;
msg->body.decode.dt_field_mode = buf->base.interlaced;