1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20 import qm.common
21 import qm.fields
22 from qm.test.base import *
23 from qm.test.result import *
24 from qm.test.file_result_stream import FileResultStream
25
26
27
28
29
30 -class TextResultStream(FileResultStream):
31 """A 'TextResultStream' displays test results textually.
32
33 A 'TextResultStream' displays information textually, in human
34 readable form. This 'ResultStream' is used when QMTest is run
35 without a graphical user interface."""
36
37 arguments = [
38 qm.fields.EnumerationField(
39 name = "format",
40 title = "Format",
41 description = """The output format used by this result stream.
42
43 There are three sections to the output:
44
45 (S) Summary statistics.
46
47 (I) Individual test-by-test results.
48
49 (U) Individual test-by-test results for tests with unexpected
50 outcomes.
51
52 For each of the sections of individual test-by-test results, the
53 results can be shown either in one of three modes:
54
55 (A) Show all annotations.
56
57 (N) Show no annotations.
58
59 (U) Show annotations only if the test has an unexpected outcome.
60
61 In the "brief" format, results for all tests are shown as
62 they execute, with unexpected results displayed in full
63 detail, followed by a list of all the tests with
64 unexpected results in full detail, followed by the summary
65 information. This format is useful for interactive use:
66 the user can see that the tests are running as they go,
67 can attempt to fix failures while letting the remainder of
68 the tests run, and can easily see the summary of the
69 results later if the window in which the run is occurring
70 is left unattended.
71
72 In the "batch" format, statistics are displayed first
73 followed by full results for tests with unexpected
74 outcomes. The batch format is useful when QMTest is run
75 in batch mode, such as from an overnight job. The first
76 few lines of the results (often presented by email) give
77 an overview of the results; the remainder of the file
78 gives details about any tests with unexpected outcomes.
79
80 The "full" format is like "brief" except that all
81 annotations are shown for tests as they are run.
82
83 In the "stats" format only the summary statistics are
84 displayed.""",
85 enumerals = ["brief", "batch", "full", "stats"]),
86 qm.fields.TextField(
87 name = "statistics_format",
88 title = "Statistics Format",
89 verbatim = "true",
90 multiline = "true",
91 description = """The format string used to display statistics.
92
93 The format string is an ordinary Python format string.
94 The following fill-ins are available:
95
96 'TOTAL' -- The total number of tests.
97
98 'EXPECTED' -- The total number of tests that had an
99 expected outcome.
100
101 'EXPECTED_PERCENT' -- The percentage of tests with
102 expected outcomes.
103
104 'UNEXPECTED' -- The total number of tests that had an
105 unexpected outcome.
106
107 For each outcome 'O', there are additional fill-ins:
108
109 'O' -- The total number of tests with outcome 'O'.
110
111 'O_PERCENT' -- The percentage of tests with outcome 'O' to
112 total tests, as a floating point value.
113
114 'O_UNEXPECTED' -- The total number of tests with an
115 unexpected outcome of 'O'.
116
117 'O_UNEXEPECTED_PERCENT' -- The ratio of tests without an
118 unexpected outcome of 'O' to total tests, as a floating
119 point value."""),
120 ]
121
122 - def __init__(self, arguments = None, **args):
123 """Construct a 'TextResultStream'.
124
125 'args' -- As for 'Extension.__init__'."""
126
127
128 super(TextResultStream, self).__init__(arguments, **args)
129
130
131 if not self.format:
132 self.format = "batch"
133 try:
134 if self.file.isatty():
135 self.format = "brief"
136 except:
137 pass
138
139 self.__first_test = 1
140
141 self.__unexpected_test_results = []
142 self.__unexpected_resource_results = []
143
144 self.__num_tests = 0
145
146 self.__outcome_counts = {}
147 for o in Result.outcomes:
148 self.__outcome_counts[o] = 0
149
150 self.__unexpected_outcome_counts = {}
151 for o in Result.outcomes:
152 self.__unexpected_outcome_counts[o] = 0
153
154
155 - def WriteResult(self, result):
156 """Output a test or resource result.
157
158 'result' -- A 'Result'."""
159
160
161 if result.GetKind() == Result.TEST:
162
163 self.__num_tests += 1
164
165 outcome = result.GetOutcome()
166 self.__outcome_counts[outcome] += 1
167
168
169 test_id = result.GetId()
170 expected_outcome = self._GetExpectedOutcome(result.GetId())
171 if self.format != "stats" and outcome != expected_outcome:
172 self.__unexpected_outcome_counts[outcome] += 1
173 self.__unexpected_test_results.append(result)
174 else:
175 if (self.format != "stats"
176 and result.GetOutcome() != result.PASS):
177 self.__unexpected_resource_results.append(result)
178
179
180 if self.format == "batch" or self.format == "stats":
181 return
182
183
184 if self.__first_test:
185 self._DisplayHeading("TEST RESULTS")
186 self.__first_test = 0
187
188
189 self._DisplayResult(result, self.format)
190
191
192 if (self.format == "full"
193 or (self.format == "brief"
194 and result.GetOutcome() != Result.PASS)):
195 self._DisplayAnnotations(result)
196
197
198 - def Summarize(self):
199 """Output summary information about the results.
200
201 When this method is called, the test run is complete. Summary
202 information should be displayed for the user, if appropriate.
203 Any finalization, such as the closing of open files, should
204 also be performed at this point."""
205
206 if self.format == "batch":
207 self._DisplayStatistics()
208
209
210 if self.format in ("full", "brief", "batch"):
211 compare_ids = lambda r1, r2: cmp(r1.GetId(), r2.GetId())
212
213
214 self.__unexpected_test_results.sort(compare_ids)
215
216 if self.expected_outcomes:
217 self._DisplayHeading("TESTS WITH UNEXPECTED OUTCOMES")
218 else:
219 self._DisplayHeading("TESTS THAT DID NOT PASS")
220 self._SummarizeResults(self.__unexpected_test_results)
221
222 if self.__unexpected_resource_results:
223
224 self.__unexpected_resource_results.sort(compare_ids)
225
226 self._DisplayHeading("RESOURCES THAT DID NOT PASS")
227 self._SummarizeResults(self.__unexpected_resource_results)
228
229 if self.format != "batch":
230 self._DisplayStatistics()
231
232
233 super(TextResultStream, self).Summarize()
234
235
237 """Write out statistical information about the results.
238
239 Write out statistical information about the results."""
240
241
242 if self.statistics_format:
243 self._FormatStatistics(self.statistics_format)
244 elif self.expected_outcomes:
245 self._SummarizeRelativeTestStats()
246 else:
247 self._SummarizeTestStats()
248
249
251 """Generate statistics about the overall results."""
252
253
254 self.file.write("\n")
255 self._DisplayHeading("STATISTICS")
256
257
258
259 if self.__num_tests != 0:
260
261 format = " %(TOTAL)6d tests total\n"
262
263 for o in Result.outcomes:
264 if self.__outcome_counts[o] != 0:
265 format += (" %%(%s)6d (%%(%s)3.0f%%%%) tests %s\n"
266 % (o, o + "_PERCENT", o))
267 format += "\n"
268 else:
269 format = ""
270
271 self._FormatStatistics(format)
272
273
275 """Generate statistics showing results relative to expectations."""
276
277
278 self.file.write("\n")
279 self._DisplayHeading("STATISTICS")
280
281
282
283 if self.__num_tests != 0:
284
285 format = (" %(EXPECTED)6d (%(EXPECTED_PERCENT)3.0f%%) "
286 "tests as expected\n")
287
288 for o in Result.outcomes:
289 if self.__unexpected_outcome_counts[o] != 0:
290 format += (" %%(%s)6d (%%(%s)3.0f%%%%) tests "
291 "unexpected %s\n"
292 % (o + "_UNEXPECTED",
293 o + "_UNEXPECTED_PERCENT",
294 o))
295 format += "\n"
296 else:
297 format = ""
298
299 self._FormatStatistics(format)
300
301
303 """Output statistical information.
304
305 'format' -- A format string with (optional) fill-ins
306 corresponding to statistical information.
307
308 The formatted string is written to the result file."""
309
310
311 num_tests = self.__num_tests
312 unexpected = len(self.__unexpected_test_results)
313 expected = num_tests - unexpected
314 values = { "TOTAL" : num_tests,
315 "EXPECTED" : expected,
316 "UNEXPECTED" : unexpected }
317 if num_tests:
318 values["EXPECTED_PERCENT"] = (100. * expected) / num_tests
319 else:
320 values["EXPECTED_PERCENT"] = 0.0
321 for o in Result.outcomes:
322 count = self.__outcome_counts[o]
323 values[o] = count
324 if num_tests:
325 values[o + "_PERCENT"] = (100. * count) / num_tests
326 else:
327 values[o + "_PERCENT"] = 0.0
328 count = self.__unexpected_outcome_counts[o]
329 values[o + "_UNEXPECTED"] = count
330 if num_tests:
331 values[o + "_UNEXPECTED_PERCENT"] = (100. * count) / num_tests
332 else:
333 values[o + "_UNEXPECTED_PERCENT"] = 0.0
334
335 self.file.write(format % values)
336
337
338 - def _SummarizeResults(self, results):
339 """Summarize each of the results.
340
341 'results' -- The sequence of 'Result' objects to summarize."""
342
343 if len(results) == 0:
344 self.file.write(" None.\n\n")
345 return
346
347
348 for result in results:
349 self._DisplayResult(result, self.format)
350 if self.format == "batch":
351 self._DisplayAnnotations(result)
352
353
354 - def _DisplayResult(self, result, format):
355 """Display 'result'.
356
357 'result' -- The 'Result' of a test or resource execution.
358
359 'format' -- The format to use when displaying results."""
360
361 id_ = result.GetId()
362 kind = result.GetKind()
363 outcome = result.GetOutcome()
364
365
366 if self.expected_outcomes:
367
368
369 expected_outcome = \
370 self.expected_outcomes.get(id_, Result.PASS)
371 if (outcome == Result.PASS
372 and expected_outcome == Result.FAIL):
373 self._WriteOutcome(id_, kind, "XPASS")
374 elif (outcome == Result.FAIL
375 and expected_outcome == Result.FAIL):
376 self._WriteOutcome(id_, kind, "XFAIL")
377 elif outcome != expected_outcome:
378 self._WriteOutcome(id_, kind, outcome, expected_outcome)
379 else:
380 self._WriteOutcome(id_, kind, outcome)
381 else:
382 self._WriteOutcome(id_, kind, outcome)
383
384
385 cause = result.GetCause()
386 if cause:
387 cause = qm.common.html_to_text(cause)
388 for l in cause.splitlines():
389 self.file.write(" " + l + "\n")
390
391 self.file.write('\n')
392
393
394 - def _DisplayAnnotations(self, result):
395 """Display the annotations associated with 'result'.
396
397 'result' -- The 'Result' to dispay."""
398
399 keys = result.keys()
400 keys.sort()
401 for name in keys:
402
403 if name == Result.CAUSE:
404 continue
405
406 self.file.write(" %s:\n" % name)
407
408
409 text = qm.common.html_to_text(result[name])
410
411
412 for l in text.splitlines():
413 self.file.write(" " + l + "\n")
414 self.file.write("\n")
415
416
417 - def _WriteOutcome(self, name, kind, outcome, expected_outcome=None):
418 """Write a line indicating the outcome of a test or resource.
419
420 'name' -- The name of the test or resource.
421
422 'kind' -- The kind of result being displayed.
423
424 'outcome' -- A string giving the outcome.
425
426 'expected_outcome' -- If not 'None', the expected outcome."""
427
428 if kind == Result.RESOURCE_SETUP:
429 name = "Setup " + name
430 elif kind == Result.RESOURCE_CLEANUP:
431 name = "Cleanup " + name
432
433 if expected_outcome:
434 self.file.write(" %-46s: %-8s, expected %-8s\n"
435 % (name, outcome, expected_outcome))
436 else:
437 self.file.write(" %-46s: %-8s\n" % (name, outcome))
438
439
440 - def _DisplayHeading(self, heading):
441 """Display 'heading'.
442
443 'heading' -- The string to use as a heading for the next
444 section of the report."""
445
446 self.file.write("--- %s %s\n\n" %
447 (heading, "-" * (73 - len(heading))))
448