Tests: speed up render tests by running multiple in the same process
[blender.git] / tests / python / modules / render_report.py
1 # Apache License, Version 2.0
2 #
3 # Compare renders or screenshots against reference versions and generate
4 # a HTML report showing the differences, for regression testing.
5
6 import glob
7 import os
8 import pathlib
9 import shutil
10 import subprocess
11 import sys
12 import time
13
14 from . import global_report
15
16
17 class COLORS_ANSI:
18     RED = '\033[00;31m'
19     GREEN = '\033[00;32m'
20     ENDC = '\033[0m'
21
22
23 class COLORS_DUMMY:
24     RED = ''
25     GREEN = ''
26     ENDC = ''
27
28
29 COLORS = COLORS_DUMMY
30
31
32 def print_message(message, type=None, status=''):
33     if type == 'SUCCESS':
34         print(COLORS.GREEN, end="")
35     elif type == 'FAILURE':
36         print(COLORS.RED, end="")
37     status_text = ...
38     if status == 'RUN':
39         status_text = " RUN      "
40     elif status == 'OK':
41         status_text = "       OK "
42     elif status == 'PASSED':
43         status_text = "  PASSED  "
44     elif status == 'FAILED':
45         status_text = "  FAILED  "
46     else:
47         status_text = status
48     if status_text:
49         print("[{}]" . format(status_text), end="")
50     print(COLORS.ENDC, end="")
51     print(" {}" . format(message))
52     sys.stdout.flush()
53
54
55 def blend_list(dirpath):
56     for root, dirs, files in os.walk(dirpath):
57         for filename in files:
58             if filename.lower().endswith(".blend"):
59                 filepath = os.path.join(root, filename)
60                 yield filepath
61
62
63 def test_get_name(filepath):
64     filename = os.path.basename(filepath)
65     return os.path.splitext(filename)[0]
66
67
68 def test_get_images(output_dir, filepath, reference_dir):
69     testname = test_get_name(filepath)
70     dirpath = os.path.dirname(filepath)
71
72     old_dirpath = os.path.join(dirpath, reference_dir)
73     old_img = os.path.join(old_dirpath, testname + ".png")
74
75     ref_dirpath = os.path.join(output_dir, os.path.basename(dirpath), "ref")
76     ref_img = os.path.join(ref_dirpath, testname + ".png")
77     os.makedirs(ref_dirpath, exist_ok=True)
78     if os.path.exists(old_img):
79         shutil.copy(old_img, ref_img)
80
81     new_dirpath = os.path.join(output_dir, os.path.basename(dirpath))
82     os.makedirs(new_dirpath, exist_ok=True)
83     new_img = os.path.join(new_dirpath, testname + ".png")
84
85     diff_dirpath = os.path.join(output_dir, os.path.basename(dirpath), "diff")
86     os.makedirs(diff_dirpath, exist_ok=True)
87     diff_img = os.path.join(diff_dirpath, testname + ".diff.png")
88
89     return old_img, ref_img, new_img, diff_img
90
91
92 class Report:
93     __slots__ = (
94         'title',
95         'output_dir',
96         'reference_dir',
97         'idiff',
98         'pixelated',
99         'verbose',
100         'update',
101         'failed_tests',
102         'passed_tests',
103         'compare_tests',
104         'compare_engines'
105     )
106
107     def __init__(self, title, output_dir, idiff):
108         self.title = title
109         self.output_dir = output_dir
110         self.reference_dir = 'reference_renders'
111         self.idiff = idiff
112         self.compare_engines = None
113
114         self.pixelated = False
115         self.verbose = os.environ.get("BLENDER_VERBOSE") is not None
116         self.update = os.getenv('BLENDER_TEST_UPDATE') is not None
117
118         if os.environ.get("BLENDER_TEST_COLOR") is not None:
119             global COLORS, COLORS_ANSI
120             COLORS = COLORS_ANSI
121
122         self.failed_tests = ""
123         self.passed_tests = ""
124         self.compare_tests = ""
125
126         os.makedirs(output_dir, exist_ok=True)
127
128     def set_pixelated(self, pixelated):
129         self.pixelated = pixelated
130
131     def set_reference_dir(self, reference_dir):
132         self.reference_dir = reference_dir
133
134     def set_compare_engines(self, engine, other_engine):
135         self.compare_engines = (engine, other_engine)
136
137     def run(self, dirpath, render_cb):
138         # Run tests and output report.
139         dirname = os.path.basename(dirpath)
140         ok = self._run_all_tests(dirname, dirpath, render_cb)
141         self._write_data(dirname)
142         self._write_html()
143         if self.compare_engines:
144             self._write_html(comparison=True)
145         return ok
146
147     def _write_data(self, dirname):
148         # Write intermediate data for single test.
149         outdir = os.path.join(self.output_dir, dirname)
150         os.makedirs(outdir, exist_ok=True)
151
152         filepath = os.path.join(outdir, "failed.data")
153         pathlib.Path(filepath).write_text(self.failed_tests)
154
155         filepath = os.path.join(outdir, "passed.data")
156         pathlib.Path(filepath).write_text(self.passed_tests)
157
158         if self.compare_engines:
159             filepath = os.path.join(outdir, "compare.data")
160             pathlib.Path(filepath).write_text(self.compare_tests)
161
162     def _write_html(self, comparison=False):
163         # Gather intermediate data for all tests.
164         if comparison:
165             failed_data = []
166             passed_data = sorted(glob.glob(os.path.join(self.output_dir, "*/compare.data")))
167         else:
168             failed_data = sorted(glob.glob(os.path.join(self.output_dir, "*/failed.data")))
169             passed_data = sorted(glob.glob(os.path.join(self.output_dir, "*/passed.data")))
170
171         failed_tests = ""
172         passed_tests = ""
173
174         for filename in failed_data:
175             filepath = os.path.join(self.output_dir, filename)
176             failed_tests += pathlib.Path(filepath).read_text()
177         for filename in passed_data:
178             filepath = os.path.join(self.output_dir, filename)
179             passed_tests += pathlib.Path(filepath).read_text()
180
181         tests_html = failed_tests + passed_tests
182
183         # Write html for all tests.
184         if self.pixelated:
185             image_rendering = 'pixelated'
186         else:
187             image_rendering = 'auto'
188
189         failed = len(failed_tests) > 0
190         if failed:
191             message = "<p>Run <tt>BLENDER_TEST_UPDATE=1 ctest</tt> to create or update reference images for failed tests.</p>"
192         else:
193             message = ""
194
195         if comparison:
196             title = "Render Test Compare"
197             columns_html = "<tr><th>Name</th><th>%s</th><th>%s</th>" % self.compare_engines
198         else:
199             title = self.title
200             columns_html = "<tr><th>Name</th><th>New</th><th>Reference</th><th>Diff</th>"
201
202         html = """
203 <html>
204 <head>
205     <title>{title}</title>
206     <style>
207         img {{ image-rendering: {image_rendering}; width: 256px; background-color: #000; }}
208         img.render {{
209             background-color: #fff;
210             background-image:
211               -moz-linear-gradient(45deg, #eee 25%, transparent 25%),
212               -moz-linear-gradient(-45deg, #eee 25%, transparent 25%),
213               -moz-linear-gradient(45deg, transparent 75%, #eee 75%),
214               -moz-linear-gradient(-45deg, transparent 75%, #eee 75%);
215             background-image:
216               -webkit-gradient(linear, 0 100%, 100% 0, color-stop(.25, #eee), color-stop(.25, transparent)),
217               -webkit-gradient(linear, 0 0, 100% 100%, color-stop(.25, #eee), color-stop(.25, transparent)),
218               -webkit-gradient(linear, 0 100%, 100% 0, color-stop(.75, transparent), color-stop(.75, #eee)),
219               -webkit-gradient(linear, 0 0, 100% 100%, color-stop(.75, transparent), color-stop(.75, #eee));
220
221             -moz-background-size:50px 50px;
222             background-size:50px 50px;
223             -webkit-background-size:50px 51px; /* override value for shitty webkit */
224
225             background-position:0 0, 25px 0, 25px -25px, 0px 25px;
226         }}
227         table td:first-child {{ width: 256px; }}
228     </style>
229     <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-alpha.6/css/bootstrap.min.css">
230 </head>
231 <body>
232     <div class="container">
233         <br/>
234         <h1>{title}</h1>
235         {message}
236         <br/>
237         <table class="table table-striped">
238             <thead class="thead-default">
239                 {columns_html}
240             </thead>
241             {tests_html}
242         </table>
243         <br/>
244     </div>
245 </body>
246 </html>
247             """ . format(title=title,
248                          message=message,
249                          image_rendering=image_rendering,
250                          tests_html=tests_html,
251                          columns_html=columns_html)
252
253         filename = "report.html" if not comparison else "compare.html"
254         filepath = os.path.join(self.output_dir, filename)
255         pathlib.Path(filepath).write_text(html)
256
257         print_message("Report saved to: " + pathlib.Path(filepath).as_uri())
258
259
260         # Update global report
261         link_name = "Renders" if not comparison else "Comparison"
262         global_output_dir = os.path.dirname(self.output_dir)
263         global_failed = failed if not comparison else None
264         global_report.add(global_output_dir, self.title, link_name, filepath, global_failed)
265
266     def _relative_url(self, filepath):
267         relpath = os.path.relpath(filepath, self.output_dir)
268         return pathlib.Path(relpath).as_posix()
269
270     def _write_test_html(self, testname, filepath, error):
271         name = test_get_name(filepath)
272         name = name.replace('_', ' ')
273
274         old_img, ref_img, new_img, diff_img = test_get_images(self.output_dir, filepath, self.reference_dir)
275
276         status = error if error else ""
277         tr_style = """ style="background-color: #f99;" """ if error else ""
278
279         new_url = self._relative_url(new_img)
280         ref_url = self._relative_url(ref_img)
281         diff_url = self._relative_url(diff_img)
282
283         test_html = """
284             <tr{tr_style}>
285                 <td><b>{name}</b><br/>{testname}<br/>{status}</td>
286                 <td><img src="{new_url}" onmouseover="this.src='{ref_url}';" onmouseout="this.src='{new_url}';" class="render"></td>
287                 <td><img src="{ref_url}" onmouseover="this.src='{new_url}';" onmouseout="this.src='{ref_url}';" class="render"></td>
288                 <td><img src="{diff_url}"></td>
289             </tr>""" . format(tr_style=tr_style,
290                               name=name,
291                               testname=testname,
292                               status=status,
293                               new_url=new_url,
294                               ref_url=ref_url,
295                               diff_url=diff_url)
296
297         if error:
298             self.failed_tests += test_html
299         else:
300             self.passed_tests += test_html
301
302         if self.compare_engines:
303             ref_url = os.path.join("..", self.compare_engines[1], new_url)
304
305             test_html = """
306                 <tr{tr_style}>
307                     <td><b>{name}</b><br/>{testname}<br/>{status}</td>
308                     <td><img src="{new_url}" onmouseover="this.src='{ref_url}';" onmouseout="this.src='{new_url}';" class="render"></td>
309                     <td><img src="{ref_url}" onmouseover="this.src='{new_url}';" onmouseout="this.src='{ref_url}';" class="render"></td>
310                 </tr>""" . format(tr_style=tr_style,
311                                   name=name,
312                                   testname=testname,
313                                   status=status,
314                                   new_url=new_url,
315                                   ref_url=ref_url)
316
317             self.compare_tests += test_html
318
319     def _diff_output(self, filepath, tmp_filepath):
320         old_img, ref_img, new_img, diff_img = test_get_images(self.output_dir, filepath, self.reference_dir)
321
322         # Create reference render directory.
323         old_dirpath = os.path.dirname(old_img)
324         os.makedirs(old_dirpath, exist_ok=True)
325
326         # Copy temporary to new image.
327         if os.path.exists(new_img):
328             os.remove(new_img)
329         if os.path.exists(tmp_filepath):
330             shutil.copy(tmp_filepath, new_img)
331
332         if os.path.exists(ref_img):
333             # Diff images test with threshold.
334             command = (
335                 self.idiff,
336                 "-fail", "0.016",
337                 "-failpercent", "1",
338                 ref_img,
339                 tmp_filepath,
340             )
341             try:
342                 subprocess.check_output(command)
343                 failed = False
344             except subprocess.CalledProcessError as e:
345                 if self.verbose:
346                     print_message(e.output.decode("utf-8"))
347                 failed = e.returncode != 1
348         else:
349             if not self.update:
350                 return False
351
352             failed = True
353
354         if failed and self.update:
355             # Update reference image if requested.
356             shutil.copy(new_img, ref_img)
357             shutil.copy(new_img, old_img)
358             failed = False
359
360         # Generate diff image.
361         command = (
362             self.idiff,
363             "-o", diff_img,
364             "-abs", "-scale", "16",
365             ref_img,
366             tmp_filepath
367         )
368
369         try:
370             subprocess.check_output(command)
371         except subprocess.CalledProcessError as e:
372             if self.verbose:
373                 print_message(e.output.decode("utf-8"))
374
375         return not failed
376
377     def _run_tests(self, filepaths, render_cb):
378         # Run all tests together for performance, since Blender
379         # startup time is a significant factor.
380         tmp_filepaths = []
381         for filepath in filepaths:
382             testname = test_get_name(filepath)
383             print_message(testname, 'SUCCESS', 'RUN')
384             tmp_filepaths.append(os.path.join(self.output_dir, "tmp_" + testname))
385
386         run_errors = render_cb(filepaths, tmp_filepaths)
387         errors = []
388
389         for error, filepath, tmp_filepath in zip(run_errors, filepaths, tmp_filepaths):
390             if not error:
391                 if os.path.getsize(tmp_filepath) == 0:
392                     error = "VERIFY"
393                 elif not self._diff_output(filepath, tmp_filepath):
394                     error = "VERIFY"
395
396             if os.path.exists(tmp_filepath):
397                 os.remove(tmp_filepath)
398
399             errors.append(error)
400
401             testname = test_get_name(filepath)
402             if not error:
403                 print_message(testname, 'SUCCESS', 'OK')
404             else:
405                 if error == "SKIPPED":
406                     print_message("Skipped after previous render caused error")
407                 elif error == "NO_ENGINE":
408                     print_message("Can't perform tests because the render engine failed to load!")
409                 elif error == "NO_START":
410                     print_message('Can not perform tests because blender fails to start.',
411                                   'Make sure INSTALL target was run.')
412                 elif error == 'VERIFY':
413                     print_message("Rendered result is different from reference image")
414                 else:
415                     print_message("Unknown error %r" % error)
416                 print_message(testname, 'FAILURE', 'FAILED')
417
418         return errors
419
420     def _run_all_tests(self, dirname, dirpath, render_cb):
421         passed_tests = []
422         failed_tests = []
423         all_files = list(blend_list(dirpath))
424         all_files.sort()
425         print_message("Running {} tests from 1 test case." .
426                       format(len(all_files)),
427                       'SUCCESS', "==========")
428         time_start = time.time()
429         errors = self._run_tests(all_files, render_cb)
430         for filepath, error in zip(all_files, errors):
431             testname = test_get_name(filepath)
432             if error:
433                 if error == "NO_ENGINE":
434                     return False
435                 elif error == "NO_START":
436                     return False
437                 failed_tests.append(testname)
438             else:
439                 passed_tests.append(testname)
440             self._write_test_html(dirname, filepath, error)
441         time_end = time.time()
442         elapsed_ms = int((time_end - time_start) * 1000)
443         print_message("")
444         print_message("{} tests from 1 test case ran. ({} ms total)" .
445                       format(len(all_files), elapsed_ms),
446                       'SUCCESS', "==========")
447         print_message("{} tests." .
448                       format(len(passed_tests)),
449                       'SUCCESS', 'PASSED')
450         if failed_tests:
451             print_message("{} tests, listed below:" .
452                           format(len(failed_tests)),
453                           'FAILURE', 'FAILED')
454             failed_tests.sort()
455             for test in failed_tests:
456                 print_message("{}" . format(test), 'FAILURE', "FAILED")
457
458         return not bool(failed_tests)