summaryrefslogtreecommitdiff
path: root/.gitlab-ci/lava.yml.jinja2
blob: e9735d6b3af25f750bb6cc2273c367ceadd54664 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
job_name: mesa-{{ test_suite }}-{{ deqp_version }}-{{ gpu_version }} {{ pipeline_info }}
device_type: {{ device_type }}
context:
  extra_nfsroot_args: " init=/init rootwait"
timeouts:
  job:
    minutes: 30
priority: 75
visibility:
    group:
      - "Collabora+fdo"
{% if tags %}
{% set lavatags = tags.split(',') %}
tags:
{% for tag in lavatags %}
  - {{ tag }}
{% endfor %}
{% endif %}
actions:
- deploy:
    timeout:
      minutes: 10
    to: tftp
    kernel:
      url: {{ base_artifacts_url }}/{{ kernel_image_name }}
{% if kernel_image_type %}
      {{ kernel_image_type }}
{% endif %}
    nfsrootfs:
      url: {{ base_artifacts_url }}/lava-rootfs.tgz
      compression: gz
{% if dtb %}
    dtb:
      url: {{ base_artifacts_url }}/{{ dtb }}.dtb
{% endif %}
    os: oe
- boot:
    timeout:
      minutes: 25
    method: {{ boot_method }}
{% if boot_method == "fastboot" %}
{#
   For fastboot, LAVA doesn't know how to unpack the rootfs/apply overlay/repack,
   so we transfer the overlay over the network after boot.
#}
    transfer_overlay:
      download_command: wget -S --progress=dot:giga
      unpack_command: tar -C / -xzf
{% else %}
    commands: nfs
{% endif %}
    prompts:
      - 'lava-shell:'
- test:
    timeout:
      minutes: 30
    failure_retry: 1
    definitions:
    - repository:
        metadata:
          format: Lava-Test Test Definition 1.0
          name: mesa
          description: "Mesa test plan"
          os:
          - oe
          scope:
          - functional
        run:
          steps:
          - mount -t proc none /proc
          - mount -t sysfs none /sys
          - mount -t devtmpfs none /dev || echo possibly already mounted
          - mkdir -p /dev/pts
          - mount -t devpts devpts /dev/pts
          - mkdir -p /dev/shm
          - mount -t tmpfs tmpfs /dev/shm
          - mount -t tmpfs tmpfs /tmp

          - echo "nameserver 8.8.8.8" > /etc/resolv.conf
          - echo "$NFS_SERVER_IP caching-proxy" >> /etc/hosts
          - for i in 1 2 3; do sntp -sS pool.ntp.org && break || sleep 2; done

          - modprobe amdgpu || true

          # Disable GPU frequency scaling
          - DEVFREQ_GOVERNOR=`find /sys/devices -name governor | grep gpu || true`
          - echo performance > $DEVFREQ_GOVERNOR || true

          # Disable CPU frequency scaling
          - echo performance | tee -a /sys/devices/system/cpu/cpufreq/policy*/scaling_governor || true

          # Disable GPU runtime PM
          - GPU_AUTOSUSPEND=`find /sys/devices -name autosuspend_delay_ms | grep gpu | head -1`
          - echo -1 > $GPU_AUTOSUSPEND || true

{% if env_vars %}
          - export {{ env_vars }}
{% endif %}

          # runner script assumes some stuff is in pwd
          - cd /

          - wget -S --progress=dot:giga -O- {{ mesa_url }} | tar -xz
          - mkdir -p $CI_PROJECT_DIR
          - ln -sf /install $CI_PROJECT_DIR/install

          - export DEQP_NO_SAVE_RESULTS=1
          - export GPU_VERSION={{ gpu_version }}
          - export DEQP_VER={{ deqp_version }}

          # Store Mesa's disk cache under /tmp, rather than sending it out over NFS.
          - export XDG_CACHE_HOME=/tmp

          - export PYTHONPATH=$(python3 -c "import sys;print(\":\".join(sys.path))")
          - export PIGLIT_REPLAY_EXTRA_ARGS="--keep-image"
          - export PIGLIT_REPLAY_REFERENCE_IMAGES_BASE_URL="/mesa-tracie-results/${CI_PROJECT_PATH}"
          - export PIGLIT_REPLAY_ARTIFACTS_BASE_URL="/artifacts/${CI_PROJECT_PATH}/${CI_PIPELINE_ID}/${CI_JOB_ID}"
          - export MINIO_ARTIFACTS_PATH="minio://${MINIO_HOST}/${PIGLIT_REPLAY_ARTIFACTS_BASE_URL}/"
          - export PIGLIT_REPLAY_DESCRIPTION_FILE="/install/traces-${DRIVER_NAME}.yml"
          - export PIGLIT_REPLAY_DEVICE_NAME=gl-{{ gpu_version }}
          - export PIGLIT_RESULTS={{ gpu_version }}-${PIGLIT_PROFILES}

          - export LIBGL_DRIVERS_PATH=/install/lib/dri

          # If we want Xorg to be running for the test, then we start it up before the
          # LAVA_TEST_SCRIPT because we need to use xinit to start X (otherwise
          # without using -displayfd you can race with Xorg's startup), but xinit will eat
          # your client's return code
          - "if [ -n $LAVA_START_XORG ]; then
               echo 'touch /xorg-started; sleep 100000' > /xorg-script;
               env LD_LIBRARY_PATH=/install/lib/ xinit /bin/sh /xorg-script -- /usr/bin/Xorg -noreset -dpms -logfile /Xorg.0.log &
               for i in 1 2 3 4 5; do
                 if [ -e /xorg-started ]; then
                   break;
                 fi;
                 sleep 5;
               done;
               export DISPLAY=:0;
             fi"

          - "if sh $LAVA_TEST_SCRIPT; then
                  export RESULT=pass;
             else
                  export RESULT=fail;
             fi"

          - "if [ -d results ]; then
                tar -czf results.tar.gz results/;
                ci-fairy minio login $CI_JOB_JWT;
                ci-fairy minio cp results.tar.gz $MINIO_ARTIFACTS_PATH/results.tar.gz;
             fi"

          - "echo mesa: $RESULT"
        parse:
          pattern: '(?P<test_case_id>\S*):\s+(?P<result>(pass|fail))'
      from: inline
      name: mesa
      path: inline/mesa.yaml