Logo Search packages:      
Sourcecode: qm version File versions

def qm::test::cmdline::QMTest::__ExecuteSummarize (   self  )  [private]

Read in test run results and summarize.

Definition at line 1152 of file cmdline.py.

01152                                 :
        """Read in test run results and summarize."""

        # If no results file is specified, use a default value.
        if len(self.__arguments) == 0:
            results_path = "results.qmr"
            results_path = self.__arguments[0]

        # The remaining arguments, if any, are test and suite IDs.
        id_arguments = self.__arguments[1:]
        # Are there any?
        if len(id_arguments) > 0:
            filter = 1
            # Expand arguments into test IDs.
                test_ids, suite_ids \
                          = self.GetDatabase().ExpandIds(id_arguments)
            except (qm.test.database.NoSuchTestError,
                    qm.test.database.NoSuchSuiteError), exception:
                raise qm.cmdline.CommandError, \
                      qm.error("no such ID", id=str(exception))
            except ValueError, exception:
                raise qm.cmdline.CommandError, \
                      qm.error("no such ID", id=str(exception))
            # No IDs specified.  Show all test and resource results.
            # Don't show any results by test suite though.
            filter = 0
            suite_ids = []

        # Get an iterator over the results.
            results = base.load_results(open(results_path, "rb"),
        except (IOError, xml.sax.SAXException), exception:
            raise QMException, \
                  qm.error("invalid results file",

        any_unexpected_outcomes = 0

        # Compute the list of result streams to which output should be
        # written.
        streams = self.__GetResultStreams()
        # Send the annotations through.
        for s in streams:

        # Get the expected outcomes.
        outcomes = self.__GetExpectedOutcomes()

        # Our filtering function.  Should use itertools.ifilter, once
        # we can depend on having Python 2.3.
        def good(r):
            return r.GetKind() == Result.TEST \
                   and r.GetId() in test_ids

        # Simulate the events that would have occurred during an
        # actual test run.
        for r in results:
            if not filter or good(r):
                for s in streams:
                if (r.GetOutcome()
                    != outcomes.get(r.GetId(), Result.PASS)):
                    any_unexpected_outcomes = 1
        for s in streams:

        if any_unexpected_outcomes:
            return 1
        return 0

    def __ExecuteRemote(self):

Generated by  Doxygen 1.6.0   Back to index