summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJerome Glisse <jglisse@redhat.com>2010-02-23 23:02:02 +0100
committerJerome Glisse <jglisse@redhat.com>2010-02-23 23:02:02 +0100
commitd18e6b648bd5535d8d02063173284d6f16d18633 (patch)
tree2ca4e9da9813e8020d14d094cc7e67487147847e
parentf246c3cd76c42b77aff80485dfe6b57e6c7b3db7 (diff)
batches -> scheduler
-rw-r--r--r600_batch.c108
-rw-r--r--r600_winsys.h4
-rw-r--r--r700_atom.c4
3 files changed, 58 insertions, 58 deletions
diff --git a/r600_batch.c b/r600_batch.c
index b5b5ba4..cadf5c5 100644
--- a/r600_batch.c
+++ b/r600_batch.c
@@ -68,74 +68,74 @@ void r600_atom_flush_add(struct r600_batch *batch, struct radeon_bo *bo, u32 fla
}
/*
- * r600_batches
+ * r600_scheduler
*/
-static void r600_batches_clear_locked(struct r600_winsys *rw, struct r600_batches *batches)
+static void r600_scheduler_clear_locked(struct r600_winsys *rw, struct r600_scheduler *scheduler)
{
struct r600_batch *batch, *n;
int i, j;
- for (i = 0; i < batches->nbatch; i++) {
+ for (i = 0; i < scheduler->nbatch; i++) {
for (j = 0; j < R600_BATCH_NATOMS; j++) {
- r600_atom_destroy(batches->batch[i].atoms[j]);
+ r600_atom_destroy(scheduler->batch[i].atoms[j]);
}
}
- batches->ib->cpkts = 0;
- batches->nbatch = 0;
- r700_batches_states_default(rw, batches);
- batches->npkts = batches->ib->cpkts;
+ scheduler->ib->cpkts = 0;
+ scheduler->nbatch = 0;
+ r700_scheduler_states_default(rw, scheduler);
+ scheduler->npkts = scheduler->ib->cpkts;
}
-int r600_batches_flush(struct r600_winsys *rw)
+int r600_scheduler_flush(struct r600_winsys *rw)
{
- struct r600_batches *batches = &rw->batches;
+ struct r600_scheduler *scheduler = &rw->scheduler;
int r, i, j;
- for (i = 0; i < batches->nbatch; i++) {
- for (j = 0; j < batches->batch[i].nflush; j++) {
- r600_emit_flush(rw, batches->ib,
- batches->batch[i].flush[j].bo,
- batches->batch[i].flush[j].flags);
+ for (i = 0; i < scheduler->nbatch; i++) {
+ for (j = 0; j < scheduler->batch[i].nflush; j++) {
+ r600_emit_flush(rw, scheduler->ib,
+ scheduler->batch[i].flush[j].bo,
+ scheduler->batch[i].flush[j].flags);
}
- for (j = 0; j < batches->batch[i].nemit_atoms; j++) {
- r = batches->batch[i].emit_atoms[j]->emit(rw,
- batches->batch[i].emit_atoms[j],
- &batches->batch[i],
- batches->ib);
+ for (j = 0; j < scheduler->batch[i].nemit_atoms; j++) {
+ r = scheduler->batch[i].emit_atoms[j]->emit(rw,
+ scheduler->batch[i].emit_atoms[j],
+ &scheduler->batch[i],
+ scheduler->ib);
if (r)
goto out_err;
}
- r = r600_draw_cmd_emit(batches->ib, &batches->batch[i].drm);
+ r = r600_draw_cmd_emit(scheduler->ib, &scheduler->batch[i].drm);
/* flush + wait until */
- batches->ib->ptr[batches->ib->cpkts++] = PKT3(PKT3_EVENT_WRITE, 0);
- batches->ib->ptr[batches->ib->cpkts++] = 0x00000016;
- batches->ib->ptr[batches->ib->cpkts++] = PKT3(PKT3_SET_CONFIG_REG, 1);
- batches->ib->ptr[batches->ib->cpkts++] = 0x00000010;
- batches->ib->ptr[batches->ib->cpkts++] = 0x00028000;
+ scheduler->ib->ptr[scheduler->ib->cpkts++] = PKT3(PKT3_EVENT_WRITE, 0);
+ scheduler->ib->ptr[scheduler->ib->cpkts++] = 0x00000016;
+ scheduler->ib->ptr[scheduler->ib->cpkts++] = PKT3(PKT3_SET_CONFIG_REG, 1);
+ scheduler->ib->ptr[scheduler->ib->cpkts++] = 0x00000010;
+ scheduler->ib->ptr[scheduler->ib->cpkts++] = 0x00028000;
}
- printf("ib %d dw\n", batches->ib->cpkts);
- r = radeon_ib_schedule(rw, batches->ib);
+ printf("ib %d dw\n", scheduler->ib->cpkts);
+ r = radeon_ib_schedule(rw, scheduler->ib);
out_err:
/* FIXME helper function */
- batches->ib->cpkts = 0;
- batches->ib->nrelocs = 0;
- r600_batches_clear_locked(rw, batches);
+ scheduler->ib->cpkts = 0;
+ scheduler->ib->nrelocs = 0;
+ r600_scheduler_clear_locked(rw, scheduler);
return r;
}
-int r600_batches_queue(struct r600_winsys *rw, struct r600_request *rq)
+int r600_scheduler_queue(struct r600_winsys *rw, struct r600_request *rq)
{
struct drm_r600_batch *batch = rq->data;
struct r600_batch *rbatch;
- struct r600_batches *batches = &rw->batches;
+ struct r600_scheduler *scheduler = &rw->scheduler;
int r, i, j;
- if (batches->nbatch >= R600_MAX_BATCH) {
- r = r600_batches_flush(rw);
+ if (scheduler->nbatch >= R600_MAX_BATCH) {
+ r = r600_scheduler_flush(rw);
if (r)
return r;
}
- rbatch = &batches->batch[batches->nbatch];
+ rbatch = &scheduler->batch[scheduler->nbatch];
memset(rbatch, 0, sizeof(struct r600_batch));
i = 0;
if (batch->blend == NULL || batch->cb_cntl == NULL ||
@@ -183,14 +183,14 @@ reprocess:
/* if batch is bigger than ib size it's an invalid one, this should
* not happen
*/
- if (rbatch->npkts > batches->ib->length_dw) {
+ if (rbatch->npkts > scheduler->ib->length_dw) {
fprintf(stderr, "single batch to big (%d) to fit into ib (%d)\n",
- rbatch->npkts, batches->ib->length_dw);
+ rbatch->npkts, scheduler->ib->length_dw);
goto out_err;
}
/* flush or not ? */
- if (batches->npkts + rbatch->npkts > batches->ib->length_dw) {
- r = r600_batches_flush(rw);
+ if (scheduler->npkts + rbatch->npkts > scheduler->ib->length_dw) {
+ r = r600_scheduler_flush(rw);
if (r)
goto out_err;
goto reprocess;
@@ -198,12 +198,12 @@ reprocess:
/* batch is queued */
for (i = 0; i < R600_BATCH_NATOMS; i++) {
if (rbatch->atoms[i]) {
- batches->last_id[i] = rbatch->atoms[i]->id;
+ scheduler->last_id[i] = rbatch->atoms[i]->id;
}
}
- printf("batch %d dw batches with %d dw\n", rbatch->npkts, batches->npkts);
- batches->npkts += rbatch->npkts;
- batches->nbatch++;
+ printf("batch %d dw scheduler with %d dw\n", rbatch->npkts, scheduler->npkts);
+ scheduler->npkts += rbatch->npkts;
+ scheduler->nbatch++;
return 0;
out_err:
for (i = 0; i < R600_BATCH_NATOMS; i++) {
@@ -214,23 +214,23 @@ out_err:
return r;
}
-static int r600_batches_init(struct r600_winsys *rw, struct r600_batches *batches)
+static int r600_scheduler_init(struct r600_winsys *rw, struct r600_scheduler *scheduler)
{
int r;
- memset(batches, 0 , sizeof(struct r600_batches));
- r = radeon_ib_get(rw, &batches->ib);
+ memset(scheduler, 0 , sizeof(struct r600_scheduler));
+ r = radeon_ib_get(rw, &scheduler->ib);
if (r)
return r;
- r600_batches_clear_locked(rw, batches);
+ r600_scheduler_clear_locked(rw, scheduler);
return 0;
}
-static void r600_batches_cleanup_locked(struct r600_winsys *rw, struct r600_batches *batches)
+static void r600_scheduler_cleanup_locked(struct r600_winsys *rw, struct r600_scheduler *scheduler)
{
- r600_batches_clear_locked(rw, batches);
- radeon_ib_free(batches->ib);
- batches->ib = NULL;
+ r600_scheduler_clear_locked(rw, scheduler);
+ radeon_ib_free(scheduler->ib);
+ scheduler->ib = NULL;
}
int r600_atoms_init(struct r600_winsys *rdev)
@@ -238,10 +238,10 @@ int r600_atoms_init(struct r600_winsys *rdev)
rdev->npipes = 2;
rdev->nbanks = 4;
rdev->group_bytes = 256;
- return r600_batches_init(rdev, &rdev->batches);
+ return r600_scheduler_init(rdev, &rdev->scheduler);
}
void r600_atoms_release(struct r600_winsys *rdev)
{
- r600_batches_cleanup_locked(rdev, &rdev->batches);
+ r600_scheduler_cleanup_locked(rdev, &rdev->scheduler);
}
diff --git a/r600_winsys.h b/r600_winsys.h
index cb838c6..43fc25e 100644
--- a/r600_winsys.h
+++ b/r600_winsys.h
@@ -235,9 +235,9 @@ struct r600_request {
struct radeon_bo *bo[32];
};
+extern int r600_scheduler_queue(struct r600_winsys*, struct r600_request*);
+extern int r600_scheduler_flush(struct r600_winsys*);
extern void *r600_atom_state(struct r600_atom *atom);
-extern int r600_batches_queue(struct r600_winsys*, struct r600_request*);
-extern int r600_batches_flush(struct r600_winsys*);
extern struct r600_atom *r600_atom_create(struct r600_winsys*, struct r600_request*);
extern struct r600_atom *r600_atom_destroy(struct r600_atom *atom);
diff --git a/r700_atom.c b/r700_atom.c
index 9bd2d44..c3b8afd 100644
--- a/r700_atom.c
+++ b/r700_atom.c
@@ -17,9 +17,9 @@
#include "r600.h"
#include "r600d.h"
-void r700_batches_states_default(struct r600_winsys *rdev, struct r600_batches *batches)
+void r700_scheduler_states_default(struct r600_winsys *rdev, struct r600_scheduler *scheduler)
{
- struct radeon_ib *ib = batches->ib;
+ struct radeon_ib *ib = scheduler->ib;
#if 0
ib->ptr[ib->cpkts++] = PKT3(PKT3_START_3D_CMDBUF, 0);