def isExpectedResult(self, checkitem, result, extra_info): for pattern in self._expected_failure_patterns: if checkitem not in pattern["results"]: continue if str(result) not in pattern["results"][checkitem]: if "None" in pattern["results"][checkitem]: debug("This rule only matches skipped checks.") elif "0" in pattern["results"][checkitem]: debug("This rule only matches failed checks.") else: warning("This rule doesn't match any checks: %s", pattern) continue if "arguments" in pattern: for k, v in pattern["arguments"].items(): if k not in self.arguments: break if self.arguments[k] not in v: break else: return True else: return True return False
def _start(self, glob): desc = self.arguments.get("desc") basename = self.arguments.get("outputfile-basename") category = self.arguments.get("category") files = [] paths = {} if desc: info("No path set, trying to use stdout and stding specific files") if not 'stderr' in desc and not 'stdout' in desc: warning("Neither of stdout-path, stderr-path and path specified" "Can not use the monitor") return False if 'stderr' in desc: stderr_file, stderr_path = self._getTempFiles(basename, "stderr", glob, category) self.test.setStderr(stderr_file) files.append(stderr_file) paths["stderr-file"] = stderr_path if 'stdout' in desc: stdout_file, stdout_path = self._getTempFiles(basename, "stdout", glob, category) self.test.setStdout(stdout_file) files.append(stdout_file) paths["stdout-file"] = stdout_path else: _file, path = self._getTempFiles(basename, "stdoutanderr", glob, category) self.test.setStdOutAndErr(path) files.append(_file) paths["stdout-and-stderr-file"] = path return files, paths
def remoteTearDown(self): if not PythonDBusTest.remoteTearDown(self): return False gst.log("Tearing Down") # unref pipeline and so forth if self._waitcb: gobject.source_remove(self._waitcb) self._waitcb = None if self.pipeline: self.pipeline.set_state(gst.STATE_NULL) self.validateStep("no-errors-seen", self._errors == []) if not self._errors == []: self.extraInfo("errors", self._errors) if not self._tags == {}: debug("Got tags %r", self._tags) for key, val in self._tags.iteritems(): if isinstance(val, int): # make sure that only values < 2**31 (MAX_INT32) are ints # TODO : this is gonna screw up MASSIVELY with values > 2**63 if val >= 2**31: self._tags[key] = long(val) # FIXME : if the value is a list, the dbus python bindings screw up # # For the time being we remove the values of type list, but this is REALLY # bad. listval = [x for x in self._tags.keys() if type(self._tags[x]) == list] if listval: warning("Removing this from the taglist since they're list:%r", listval) for val in listval: del self._tags[val] self.extraInfo("tags", dbus.Dictionary(self._tags, signature="sv")) if not self._elements == []: self.extraInfo("elements-used", self._elements) return True
def setUp(self): Monitor.setUp(self) self._logfile, self._logfilepath = self.testrun.get_temp_file(nameid="valgrind-memcheck") # prepend valgrind options ourargs = ["valgrind", "--tool=memcheck", "--leak-check=full", "--trace-children=yes", "--leak-resolution=med", "--num-callers=20", "--log-file=%s" % self._logfilepath] # add the suppression files sups = self.arguments.get("suppression-files") if sups: if isinstance(sups,list): for sup in sups: ourargs.append("--suppressions=%s" % sup) else: for sup in sups.split(','): ourargs.append("--suppressions=%s" % sup) ourargs.extend(self.test._preargs) self.test._preargs = ourargs # set some env variables self.test._environ["G_SLICE"] = "always-malloc" # multiply timeout by 4 if not self.test.setTimeout(self.test.getTimeout() * 4): warning("Couldn't change the timeout !") return False # multiply async-setup-timeout by 4 ! if not self.test.setAsyncSetupTimeout(self.test.getAsyncSetupTimeout() * 4): warning("Couldn't change the asynchronous setup timeout !") return False return True
def _runNext(self): """ Run next testrun if available """ if not self._running: warning("Not running") return False if self._current: warning("Already running a TestRun [%s]" % self._current) return False if self._testruns == []: debug("No more TestRun(s) available") if self._singlerun: debug("Single-Run mode, now exiting") self.quit() return False self._current = self._testruns.pop(0) debug("Current testrun is now %s" % self._current) # connect signals self._current.connect("start", self._currentStartCb) self._current.connect("done", self._currentDoneCb) self._current.connect("aborted", self._currentAbortedCb) # give access to the data storage object self._current.setStorage(self._storage) self._current._clientid = self._clientid # and run it! self._current.run() return False
def createPipeline(self): if self._encodeVideo == False and self._encodeAudio == False: warning("NO audio and NO video ??") return None if (self._encodeVideo and self._encodeAudio) and not self._muxerFact: warning("NO muxer but we have two tracks ??") return None p = gst.Pipeline() # muxer and filesink if self._muxerFact: self._muxer = gst.element_factory_make(self._muxerFact, "muxer") else: self._muxer = gst.element_factory_make("identity", "muxer") filesink = gst.element_factory_make("filesink") filesink.props.location = self._outPath p.add(self._muxer, filesink) self._muxer.link(filesink) if self._encodeAudio: self._audioSource = make_audio_test_source(duration=self._mediaDuration) enc = gst.element_factory_make(self._audioFact) self._audioEncoder = gst.element_factory_make("audioresample") aconv = gst.element_factory_make("audioconvert") vq = gst.element_factory_make("queue", "audioqueue") p.add(self._audioSource, self._audioEncoder, aconv, enc, vq) gst.element_link_many(self._audioEncoder, aconv, enc, vq) cptpad = self._muxer.get_compatible_pad(vq.get_pad("src"), enc.get_pad("src").get_caps()) if cptpad == None: self.validateStep("muxer-can-use-encoders", False) return None gst.debug("Using pad %r for audio encoder" % cptpad) vq.get_pad("src").link(cptpad) self._audioSource.connect("pad-added", self._audioSourcePadAdded) if self._encodeVideo: self._videoSource = make_video_test_source(duration=self._mediaDuration) enc = gst.element_factory_make(self._videoFact) self._videoEncoder = gst.element_factory_make("ffmpegcolorspace") vq = gst.element_factory_make("queue", "videoqueue") p.add(self._videoSource, self._videoEncoder, enc, vq) gst.element_link_many(self._videoEncoder, enc, vq) cptpad = self._muxer.get_compatible_pad(vq.get_pad("src"), enc.get_pad("src").get_caps()) if cptpad == None: self.validateStep("muxer-can-use-encoders", False) return None gst.debug("Using pad %r for video encoder" % cptpad) vq.get_pad("src").link(cptpad) self._videoSource.connect("pad-added", self._videoSourcePadAdded) self.validateStep("muxer-can-use-encoders") return p
def stopAllThreads(self): debug("stopping all threads") joinedthreads = 0 while(joinedthreads < len(self.threads)): for thread in self.threads: debug("Trying to stop thread %r" % thread) try: thread.join() joinedthreads += 1 except: warning("what happened ??")
def remoteSetUp(self): self._fakesink = None self._gotFirstBuffer = False self._gotNewSegment = False self._start = self.arguments.get("start", 0) self._duration = self.arguments.get("duration", gst.SECOND) self._mstart = self.arguments.get("media-start", 5 * gst.SECOND) self._mduration = self.arguments.get("media-duration", self._duration) warning("Got caps-string:%r", self.arguments.get("caps-string", "audio/x-raw-int;audio/x-raw-float")) self._caps = gst.Caps(str(self.arguments.get("caps-string", "audio/x-raw-int;audio/x-raw-float"))) GStreamerTest.remoteSetUp(self)
def remoteTest(self): # kickstart pipeline to initial state PythonDBusTest.remoteTest(self) debug("Setting pipeline to initial state %r", self.__pipeline_initial_state__) gst.log("Setting pipeline to initial state %r" % self.__pipeline_initial_state__) res = self.pipeline.set_state(self.__pipeline_initial_state__) debug("set_state returned %r", res) gst.log("set_state() returned %r" % res) self.validateStep("pipeline-change-state", not res == gst.STATE_CHANGE_FAILURE) if res == gst.STATE_CHANGE_FAILURE: warning("Setting pipeline to initial state failed, stopping test") gst.warning("State change failed, stopping") self.stop()
def _analyzeDecodebin(self): debug("Querying length") for stream in self._streams: debug("pad %r / raw:%r", stream.pad, stream.raw) if stream.raw: stream.caps = stream.pad.get_negotiated_caps() if not stream.caps: stream.caps = stream.pad.get_caps() try: length, format = stream.pad.query_duration(gst.FORMAT_TIME) except: warning("duration query failed") length = -1 stream.length = length debug("stream length %s", gst.TIME_ARGS(stream.length))
def addMonitor(self, monitor, monitorargs=None): """ Add a monitor to this test instance. Checks will be done to ensure that the monitor can be applied on this instance. Returns True if the monitor was applied succesfully. """ debug("monitor:%r, args:%r", monitor, monitorargs) # check if monitor is valid if not isinstance(self, monitor.__applies_on__): warning("The given monitor cannot be applied on this test") return False self._monitors.append((monitor, monitorargs))
def _getStreamsDuration(self): # returns duration, issimilar # if streams duration differ by more than 20%, issimilar is False vstreams = [s for s in self._streams if (s.raw)] if not vstreams: return (-1, False) if len(vstreams) == 1: return (vstreams[0].length, True) l = vstreams[0].length for s in vstreams[1:]: debug("length:%s", gst.TIME_ARGS(s.length)) diff = abs(s.length - l) if diff > (l / 5): warning("length different by more than 20%%") return (l, False) return (l, True)
def setUp(self): Monitor.setUp(self) if self.test._stderr: warning("stderr is already being used, can't setUp monitor") return False # set gst_debug to requested level loglevel = self.arguments.get("debug-level", "*:2") self.test._environ["GST_DEBUG"] = loglevel if loglevel.endswith("5"): # multiply timeout by 2 if not self.test.setTimeout(self.test.getTimeout() * 2): warning("Couldn't change the timeout !") return False # get file for redirection self._logfile, self._logfilepath = self.testrun.get_temp_file(nameid="gst-debug-log") debug("Got temporary file %s", self._logfilepath) self.test._stderr = self._logfile return True
def stop(self): """ Stop the test Can be called by both the test itself AND external elements """ if self._stopping: warning("we were already stopping !!!") return info("STOPPING %r" % self) self._stopping = True stoptime = time.time() # if we still have the timeoutid, we didn't timeout notimeout = False if self._testtimeoutid: notimeout = True self.validateStep("no-timeout", notimeout) self.tearDown() if self._teststarttime: debug("stoptime:%r , teststarttime:%r", stoptime, self._teststarttime) self.extraInfo("test-total-duration", stoptime - self._teststarttime) for instance in self._monitorinstances: instance.tearDown() self.emit("done")
def _createTestInstanceCallBack(self, retval): debug("%s retval:%r", self.uuid, retval) if retval: delay = time.time() - self._subprocessconnecttime rname = "net.gstreamer.Insanity.Test.Test%s" % self.uuid rpath = "/net/gstreamer/Insanity/Test/Test%s" % self.uuid # remote instance was successfully created, let's get it try: remoteobj = self._bus.get_object(rname, rpath) except: warning("Couldn't get the remote instance for test %r", self.uuid) self.stop() return self.extraInfo("remote-instance-creation-delay", delay) self.validateStep("remote-instance-created") self._remoteinstance = dbus.Interface(remoteobj, "net.gstreamer.Insanity.Test") self._remoteinstance.connect_to_signal("remoteReadySignal", self._remoteReadyCb) self._remoteinstance.connect_to_signal("remoteStopSignal", self._remoteStopCb) self._remoteinstance.connect_to_signal("remoteValidateStepSignal", self._remoteValidateStepCb) self._remoteinstance.connect_to_signal("remoteExtraInfoSignal", self._remoteExtraInfoCb) self.callRemoteSetUp() else: self.stop()
def getIterationCheckList(self, iteration, warn=True): """ Returns the instance checklist as a list of tuples of: * checkitem name * value indicating whether the success of that checklist item That value can be one of: SKIPPED, SUCCESS, FAILURE, EXPECTED_FAILURE """ allk = self.getFullCheckList().keys() unexpected_failures = [] def to_enum(key, val): if val: return self.SUCCESS elif self._expected_failures.get(key, False): return self.EXPECTED_FAILURE else: unexpected_failures.append(key) return self.FAILURE d = dict((k, to_enum(k, v)) for k, v in self.iteration_checklist[iteration]) d["no-unexpected-failures"] = 1 for k in allk: if k not in d: if self.isExpectedResult(k, self.SKIPPED, self.iteration_extrainfo[iteration]): d[k] = self.EXPECTED_FAILURE else: unexpected_failures.append(k) d[k] = self.SKIPPED if unexpected_failures: if warn: warning("The following tests failed unexpectedly: %s", unexpected_failures) d["no-unexpected-failures"] = 0 return d.items()
def stop(self): """ Stop the test Can be called by both the test itself AND external elements """ if self._stopping: warning("we were already stopping !!!") return info("STOPPING %r" % self) self._stopping = True # if we still have the timeoutid, we didn't timeout notimeout = False if self._testtimeoutid: gobject.source_remove(self._testtimeoutid) self._testtimeoutid = 0 notimeout = True self.validateChecklistItem("no-timeout", notimeout) self._stopMonitors() self.emit("stop", self._iteration) self.iteration_checklist[self._iteration] = self._checklist self.iteration_extrainfo[self._iteration] = self._extrainfo self.iteration_outputfiles[self._iteration] = self._outputfiles self.iteration_success_percentage[self._iteration] = self.getSuccessPercentage()
def _runNext(self): """ Run the next test+arg+monitor combination """ if len(self._runninginstances) >= self._maxnbtests: warning("We were already running the max number of tests") return False info("Getting next test arguments for this batch") try: kwargs = self._currentarguments.next() except StopIteration: if len(self._runninginstances): info("No more arguments, but still a test running") return False info("No more arguments, we're finished with this batch") self._runNextBatch() return False # grab the next arguments testclass = self._currenttest monitors = self._currentmonitors # create test with arguments debug("Creating test %r with arguments %r" % (testclass, kwargs)) test = testclass(testrun=self, bus=self._bus, bus_address=self._bus_address, **kwargs) if monitors: for monitor in monitors: test.addMonitor(*monitor) test.connect("start", self._singleTestStart) test.connect("done", self._singleTestDone) test.connect("check", self._singleTestCheck) # start test allok = test.run() if allok: # add instance to running tests self._runninginstances.append(test) warning("Just added a test %d/%d", len(self._runninginstances), self._maxnbtests) # if we can still create a new test, call ourself again if len(self._runninginstances) < self._maxnbtests: warning("still more test to run (current:%d/max:%d)", len(self._runninginstances), self._maxnbtests) gobject.idle_add(self._runNext) return False
def _voidRemoteErrBackHandler(self, exc, caller=None, fatal=True): error("%r : %s", caller, exc) if fatal: warning("FATAL : aborting test") # a fatal error happened, DIVE DIVE DIVE ! self.tearDown()