Package qm :: Package test :: Module report
[hide private]
[frames] | no frames]

Source Code for Module qm.test.report

  1  ######################################################################## 
  2  # 
  3  # File:   report.py 
  4  # Author: Stefan Seefeld 
  5  # Date:   2005-02-13 
  6  # 
  7  # Contents: 
  8  #   QMTest ReportGenerator class. 
  9  # 
 10  # Copyright (c) 2005 by CodeSourcery, LLC.  All rights reserved.  
 11  # 
 12  # For license terms see the file COPYING. 
 13  # 
 14  ######################################################################## 
 15   
 16  ######################################################################## 
 17  # Imports 
 18  ######################################################################## 
 19   
 20  import qm 
 21  import qm.xmlutil 
 22  from qm.common import PythonException 
 23  from qm.test import base 
 24  from qm.test.result import Result 
 25  from qm.test.reader_test_run import ReaderTestRun 
 26  import xml.sax 
 27  import sys 
 28   
 29  ######################################################################## 
 30  # Classes 
 31  ######################################################################## 
 32   
33 -class ReportGenerator:
34 """A 'ReportGenerator' generates a test report from one or more 35 result files.""" 36
37 - def __init__(self, output, database=None):
38 39 if output and output != '-': 40 self.output = open(output, 'w+') 41 else: 42 self.output = sys.stdout 43 self.database = database 44 self.__document = qm.xmlutil.create_dom_document( 45 public_id="QMTest/Report", 46 document_element_tag="report")
47 48
49 - def GenerateReport(self, flat, arguments):
50 """Generates a report file with results collected from a set of 51 result files. 52 53 'flat' -- True to indicate a flat result listing, False if tests should be 54 reported according to the database directory structure. 55 56 'arguments' -- command arguments of the form [result [-e expectation]]+ 57 58 returns -- None.""" 59 60 # Construct a list of (result / expectation file) tuples. 61 # As the expectation file is optional, see whether there 62 # is an '-e' option, and then adjust the remainder accordingly. 63 input = [] 64 while arguments: 65 if len(arguments) >= 3 and arguments[1] == '-e': 66 input.append((arguments[0], arguments[2])) 67 arguments = arguments[3:] 68 else: 69 input.append((arguments[0],None)) 70 arguments = arguments[1:] 71 72 # Write out the prologue. 73 self.output.write("<?xml version='1.0' encoding='ISO-8859-1'?>\n") 74 self.output.write("<report>\n") 75 76 test_runs = self._LoadTestRuns(input) 77 78 self.output.write(" <runs>\n") 79 for test_run, expectations in test_runs: 80 self.output.write(" <run>\n") 81 82 annotations = test_run.GetAnnotations() 83 for key, value in annotations.iteritems(): 84 85 element = self.__document.createElement("annotation") 86 element.setAttribute("key", key) 87 text = self.__document.createTextNode(value) 88 element.appendChild(text) 89 element.writexml(self.output, addindent = " ", newl = "\n") 90 self.output.write(" </run>\n") 91 self.output.write(" </runs>\n") 92 93 if flat: 94 self._ReportFlat(test_runs) 95 else: 96 self._Report(test_runs) 97 98 self.output.write("</report>\n")
99 100
101 - def _LoadTestRuns(self, input):
102 """Load test runs from the provided input. 103 104 'input' -- A list of pairs of file names referring to result files / 105 expectation files. The expectation file member may be None. 106 107 returns -- A list of pairs of TestRun objects.""" 108 109 runs = [] 110 for result_file, exp_file in input: 111 results = None 112 expectations = None 113 114 try: 115 file = result_file 116 reader = base.load_results(file, self.database) 117 results = ReaderTestRun(reader) 118 if exp_file: 119 file = exp_file 120 reader = base.load_results(file, self.database) 121 expectations = ReaderTestRun(reader) 122 except IOError, e: 123 raise PythonException("Error reading '%s'"%file, IOError, e) 124 except xml.sax.SAXException, e: 125 raise PythonException("Error loading '%s'"%file, 126 xml.sax.SAXException, e) 127 runs.append((results, expectations)) 128 return runs
129 130
131 - def _GetIds(self, test_runs):
132 """Return a list of ids to report results from. 133 This list is obtained from the database if it is present, 134 or else by taking the union of all items reported in the 135 test runs. 136 137 'test_runs' -- A list of result / expectation table pairs. 138 139 returns -- The tuple of resource-setup-ids, test-ids, 140 and resource-cleanup-ids.""" 141 142 test_ids = [] 143 resource_setup_ids = [] 144 resource_cleanup_ids = [] 145 if self.database: 146 test_ids = self.database.GetTestIds() 147 resource_setup_ids = self.database.GetResourceIds() 148 resource_cleanup_ids = resource_setup_ids 149 else: 150 for results, e in test_runs: 151 for result in results.GetAllResults("", Result.TEST): 152 if not result.GetId() in test_ids: 153 test_ids.append(result.GetId()) 154 for result in results.GetAllResults("", Result.RESOURCE_SETUP): 155 if not result.GetId() in resource_setup_ids: 156 resource_setup_ids.append(result.GetId()) 157 for result in results.GetAllResults("", Result.RESOURCE_CLEANUP): 158 if not result.GetId() in resource_cleanup_ids: 159 resource_cleanup_ids.append(result.GetId()) 160 return test_ids, resource_setup_ids, resource_cleanup_ids
161 162
163 - def _ReportFlat(self, test_runs):
164 """Generate test report with the given set of test runs. 165 The report will contain a flat list of item ids. 166 167 'test_runs' -- List of pairs of TestRun objects.""" 168 169 ids = self._GetIds(test_runs) 170 kinds = [Result.TEST, Result.RESOURCE_SETUP, Result.RESOURCE_CLEANUP] 171 172 element = self.__document.createElement('results') 173 # Report all items, sorted by their kind. 174 for k in [0, 1, 2]: 175 for id in ids[k]: 176 self._ReportItem(kinds[k], id, id, test_runs, element) 177 178 element.writexml(self.output, indent = " ", addindent = " ", 179 newl = "\n")
180 181
182 - def _Report(self, test_runs):
183 """Generate test report with the given set of test runs. 184 The report will contain a tree structure with items appearing in their 185 respective subdirectory. 186 187 'test_runs' -- List of pairs of TestRun objects.""" 188 189 element = self.__document.createElement('results') 190 root = self._ReportSubdirectory('', test_runs, element) 191 root.writexml(self.output, indent=" ", addindent=" ", newl="\n")
192 193
194 - def _ReportSubdirectory(self, directory, test_runs, element=None):
195 """Generate a DOM node for the given directory containing its results. 196 197 'directory' -- The directory for which to generate the report node. 198 199 'test_runs' -- The List of TestRuns. 200 201 'element' -- DOM element to store results into. 202 If this is None, an element will be created. 203 204 returns -- DOM element node containing the xmlified results.""" 205 206 if not element: 207 element = self.__document.createElement('subdirectory') 208 element.setAttribute('name', self.database.SplitLabel(directory)[1]) 209 210 # Start with the subdirectories. 211 for dir in self.database.GetSubdirectories(directory): 212 child = self._ReportSubdirectory(self.database.JoinLabels(directory, dir), 213 test_runs) 214 element.appendChild(child) 215 216 # Report all items, sorted by kind. 217 for id in self.database.GetIds('test', directory, False): 218 self._ReportItem('test', id, self.database.SplitLabel(id)[1], 219 test_runs, element) 220 for id in self.database.GetIds('resource', directory, False): 221 self._ReportItem('resource_setup', id, self.database.SplitLabel(id)[1], 222 test_runs, element) 223 self._ReportItem('resource_cleanup', id, self.database.SplitLabel(id)[1], 224 test_runs, element) 225 return element
226 227
228 - def _ReportItem(self, kind, item_id, name, test_runs, parent):
229 """Report a single item. 230 231 'kind' -- The kind of item to report. 232 233 'item_id' -- The item id to report. 234 235 'name' -- The item's name (usually either the absolute or relative id). 236 237 'test_runs' -- The list of test runs. 238 239 'parent' -- An XML element to insert new nodes into.""" 240 241 # Create one item node per id... 242 item = self.__document.createElement('item') 243 item.setAttribute('id', name) 244 item.setAttribute('qid', item_id) 245 item.setAttribute('kind', kind) 246 parent.appendChild(item) 247 248 # ...and fill it with one result per test run. 249 for results, expectations in test_runs: 250 result = results.GetResult(item_id, kind) 251 if not result: 252 result = Result(kind, item_id, Result.UNTESTED) 253 # Inject two new annotations containing the expectation values. 254 if expectations: 255 exp = expectations.GetResult(item_id, kind) 256 if exp: 257 result['qmtest.expected_outcome'] = exp.GetOutcome() 258 cause = exp.get('qmtest.cause') 259 if cause: 260 result['qmtest.expected_cause'] = cause 261 262 child = result.MakeDomNode(self.__document) 263 # Remove redundant attributes 264 child.removeAttribute('id') 265 child.removeAttribute('kind') 266 item.appendChild(child)
267