Ejemplo n.º 1
0
class PerformanceMetric:
    def __init__(self):
        self.switches = {"VIDEO": Trace("seconds", "bps"), "AUDIO": Trace("seconds", "bps")}
        self.buffer_levels = {"VIDEO": BufferLevelMetric(), "AUDIO": BufferLevelMetric()}
        self.bps_history = Trace("seconds", "bps")

        self.bps_history.append(0, 0)
        self.buffer_levels["VIDEO"].append(0, 0)
        self.buffer_levels["AUDIO"].append(0, 0)
        self.switches["VIDEO"].append(0, 0)
        self.switches["AUDIO"].append(0, 0)

    @property
    def underrun_count(self):
        return self.buffer_levels["VIDEO"].underrun_count + self.buffer_levels["AUDIO"].underrun_count

    def min_buffer_level(self):
        return min(self.buffer_levels["VIDEO"].current_value, self.buffer_levels["AUDIO"].current_value)

    """ So far, use reciproc of underruns to give a score. 1.0 perfect score """

    def score(self):
        return 1.0 / (self.underrun_count + 1)

    def print_stats(self):
        print("Score: %.4f" % self.score())
        print("Underruns: %d" % self.underrun_count)
        print self.switches["VIDEO"]
Ejemplo n.º 2
0
class TestRunExecCounts(unittest.TestCase):
    """A simple sanity test of line-counting, via runctx (exec)"""
    def setUp(self):
        self.my_py_filename = fix_ext_py(__file__)
        self.addCleanup(sys.settrace, sys.gettrace())

    def test_exec_counts(self):
        self.tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
        code = r'''traced_func_loop(2, 5)'''
        code = compile(code, __file__, 'exec')
        self.tracer.runctx(code, globals(), vars())

        firstlineno = get_firstlineno(traced_func_loop)
        expected = {
            (self.my_py_filename, firstlineno + 1): 1,
            (self.my_py_filename, firstlineno + 2): 6,
            (self.my_py_filename, firstlineno + 3): 5,
            (self.my_py_filename, firstlineno + 4): 1,
        }

        # When used through 'run', some other spurious counts are produced, like
        # the settrace of threading, which we ignore, just making sure that the
        # counts fo traced_func_loop were right.
        #
        for k in expected.keys():
            self.assertEqual(self.tracer.results().counts[k], expected[k])
Ejemplo n.º 3
0
class TestCallers(unittest.TestCase):
    """White-box testing of callers tracing"""
    def setUp(self):
        self.addCleanup(sys.settrace, sys.gettrace())
        self.tracer = Trace(count=0, trace=0, countcallers=1)
        self.filemod = my_file_and_modname()

    @unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
                     'pre-existing trace function throws off measurements')
    def test_loop_caller_importing(self):
        self.tracer.runfunc(traced_func_importing_caller, 1)

        expected = {
            ((os.path.splitext(trace.__file__)[0] + '.py', 'trace', 'Trace.runfunc'),
                (self.filemod + ('traced_func_importing_caller',))): 1,
            ((self.filemod + ('traced_func_simple_caller',)),
                (self.filemod + ('traced_func_linear',))): 1,
            ((self.filemod + ('traced_func_importing_caller',)),
                (self.filemod + ('traced_func_simple_caller',))): 1,
            ((self.filemod + ('traced_func_importing_caller',)),
                (self.filemod + ('traced_func_importing',))): 1,
            ((self.filemod + ('traced_func_importing',)),
                (fix_ext_py(testmod.__file__), 'testmod', 'func')): 1,
        }
        self.assertEqual(self.tracer.results().callers, expected)
Ejemplo n.º 4
0
    def test_linear_methods(self):
        # XXX todo: later add 'static_method_linear' and 'class_method_linear'
        # here, once issue1764286 is resolved
        #
        for methname in ["inst_method_linear"]:
            tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
            traced_obj = TracedClass(25)
            method = getattr(traced_obj, methname)
            tracer.runfunc(method, 20)

            firstlineno = get_firstlineno(method)
            expected = {(self.my_py_filename, firstlineno + 1): 1}
            self.assertEqual(tracer.results().counts, expected)
Ejemplo n.º 5
0
    def get_next_trace(self):
        new_trace = Trace([400], self.screen, noise=True)
        for i, (current_peak, next_peak) in enumerate(zip(self.current_trace.peaks, new_trace.peaks)):
            new_pos = self.get_new_peak_position(current_peak)
            while new_pos <= self.bottom_pad or new_pos >= self.dim-self.top_pad:
                new_pos = self.get_new_peak_position(current_peak)
            new_trace.peaks[i] = new_pos
            #print new_trace.peaks

        if not self.artificial:
            new_trace.data = self.survey.data[self.current_real_trace, :]
            self.current_real_trace += 1

        return new_trace
Ejemplo n.º 6
0
 def init_traces(self):
     with open('1.tra', 'r') as f:
         trace_number = int(f.readline())
         for trace_counter in range(trace_number):
             t = Trace()
             point_number = int(f.readline())
             points = []
             for point_counter in range(point_number):
                 point_s = f.readline()[:-1]
                 point = Point([int(x) for x in point_s.split(' ')])
                 points.append(point)
             t.points = points
             self.traces.append(t)
             self.current_temp_places.append(None)
Ejemplo n.º 7
0
def mh_query2(model, pred, answer, samples_count, lag=1):
    """
    Metropolis-Hastings algorithm for sampling
    :param model: model to execute
    :param samples_count: how much samples we want
    :type samples_count: int
    :return: samples
    :rtype: list
    """
    MCMC_shared.mh_flag = True
    MCMC_shared.iteration = 0
    samples = []
    miss = True
    model()
    transitions = 0
    rejection = 0
    while len(samples) < samples_count:
        MCMC_shared.iteration += 1
        variables = MCMC_shared.trace.get_vector()
        vector_vals_drift = variables.values()
        vector = [val[0] for val in vector_vals_drift]
        drifts = [val[1] for val in vector_vals_drift]
        shifted_vector = numpy.random.multivariate_normal(vector, numpy.diag(drifts))
        new_trace = Trace(MCMC_shared.trace)
        new_trace.set_vector(dict(zip(variables.keys(), shifted_vector.tolist())), MCMC_shared.iteration)
        old_trace = MCMC_shared.trace
        MCMC_shared.trace = new_trace
        sample = model()
        while not miss and new_trace._likelihood == -float("inf"):
            new_trace.clean(MCMC_shared.iteration)
            new_trace._likelihood = 0
            for name, (chunk, iteration) in new_trace.mem.items():
                if chunk.erp.log_likelihood(chunk.x, *chunk.erp_parameters) == -float("inf"):
                    new_chunk = Chunk(chunk.erp,
                                      numpy.random.normal(old_trace.get(name).x, chunk.drift / 2),
                                      chunk.erp_parameters,
                                      drift=chunk.drift)
                    new_trace.store(name, new_chunk, iteration)
            sample = model()
        MCMC_shared.trace = old_trace
        probability = log(uniform())
        # r = erp.log_proposal_prob()
        if probability < new_trace._likelihood - old_trace._likelihood and (miss or pred(sample)):
            if miss and pred(sample):
                miss = False
                MCMC_shared.drift = 0.05
            transitions += 1
            if (transitions % lag) == 0:
                if not miss:
                    # print sample, rejection
                    samples.append(answer(sample))
            rejection = 0
            MCMC_shared.trace = new_trace
            MCMC_shared.trace.clean(MCMC_shared.iteration)
        else:
            rejection += 1

    return samples
Ejemplo n.º 8
0
	def upsert_bulk(self, _index, type_key, id_key, bulk):
		"Updates a bulk of documents in the same index."
		"The type and id of each document are encoded in the document. Keys are provided to retrieve them"
		"The type and id fields are removed in the document after inserted as _type and _id"
		"Returns the number of documents upserted."
		docs_upserted = 0
		for document in bulk:
			_type = document[type_key]
			_id = document[id_key]
			del document[type_key]
			del document[id_key]
			upserted = self.upsert_document(_index, _type, _id, document)
			Trace.info("upserted: " + json.dumps(upserted))
			if (upserted["_id"] == _id):
				docs_upserted += 1
		return docs_upserted
Ejemplo n.º 9
0
    def trim_lat_df(self, start, lat_df):
        if lat_df.empty:
            return lat_df

        lat_df = Trace.squash_df(lat_df, start, lat_df.index[-1], "t_delta")
        # squash_df only updates t_delta, remove t_start to make sure it's not used
        return lat_df.drop('t_start', 1)
Ejemplo n.º 10
0
 def _run_with_trace(self):
     from trace import Trace
     trace = Trace(ignoredirs=[sys.prefix, sys.exec_prefix], trace=False,
                   count=True)
     try:
         trace.runfunc(self._run_tests)
     finally:
         results = trace.results()
         real_stdout = sys.stdout
         sys.stdout = open(self.coverage_summary, 'w')
         try:
             results.write_results(show_missing=True, summary=True,
                                   coverdir=self.coverage_dir)
         finally:
             sys.stdout.close()
             sys.stdout = real_stdout
Ejemplo n.º 11
0
class BufferLevelMetric(Trace):
    def __init__(self):
        Trace.__init__(self, "seconds", "seconds")
        self._level = 0.0 # in seconds
        self._underruns = Trace("seconds", "underrun duration (seconds)")

    @property
    def underrun_count(self):
        return self._underruns.count

    def increase_by(self, absolute_time, level_increase):
        val = self.current_value
        if val is None:
            val = 0.0
        self.append(absolute_time, val + level_increase)

    def decrease_by(self, absolute_time, level_decrease):
        val = self.current_value
        if val is None:
            val = 0.0
        val -= level_decrease
        if val <= 0.0:
            # record an underrun [time, duration_of_underrun]
            self._underruns.append(absolute_time, abs(val))
            # clamp level to 0.0
            val = 0.0
        self.append(absolute_time, val)

    @property
    def level(self):
        """
        Alias for current_value or current_y_value
        :return: current_y_value
        """
        return self.current_value

    def __unicode__(self):
        val = self.current_value
        if val is not None:
            return "BufferLevel(t=%.2fs): %.2fs" % (self.current_x_value, self.current_value)
        else:
            return "BufferLevel(t=0): 0"

    def __str__(self):
        return self.__unicode__()
Ejemplo n.º 12
0
    def __init__(self, *args, **kwargs):
        super(TestTrace, self).__init__(*args, **kwargs)

        self.test_trace = os.path.join(self.traces_dir, 'test_trace.txt')

        self.platform = self._get_platform()

        self.trace_path = os.path.join(self.traces_dir, 'trace.txt')
        self.trace = Trace(self.platform, self.trace_path, self.events)
Ejemplo n.º 13
0
    def __init__(self):
        self.switches = {"VIDEO": Trace("seconds", "bps"), "AUDIO": Trace("seconds", "bps")}
        self.buffer_levels = {"VIDEO": BufferLevelMetric(), "AUDIO": BufferLevelMetric()}
        self.bps_history = Trace("seconds", "bps")

        self.bps_history.append(0, 0)
        self.buffer_levels["VIDEO"].append(0, 0)
        self.buffer_levels["AUDIO"].append(0, 0)
        self.switches["VIDEO"].append(0, 0)
        self.switches["AUDIO"].append(0, 0)
Ejemplo n.º 14
0
  def ReviewUrl(self, userEmail):
    if self._review_url is None:
      if self.review is None:
        return None

      u = self.review
      if u.startswith('persistent-'):
        u = u[len('persistent-'):]
      if u.split(':')[0] not in ('http', 'https', 'sso', 'ssh'):
        u = 'http://%s' % u
      if u.endswith('/Gerrit'):
        u = u[:len(u) - len('/Gerrit')]
      if u.endswith('/ssh_info'):
        u = u[:len(u) - len('/ssh_info')]
      if not u.endswith('/'):
        u += '/'
      http_url = u

      if u in REVIEW_CACHE:
        self._review_url = REVIEW_CACHE[u]
      elif 'REPO_HOST_PORT_INFO' in os.environ:
        host, port = os.environ['REPO_HOST_PORT_INFO'].split()
        self._review_url = self._SshReviewUrl(userEmail, host, port)
        REVIEW_CACHE[u] = self._review_url
      elif u.startswith('sso:') or u.startswith('ssh:'):
        self._review_url = u  # Assume it's right
        REVIEW_CACHE[u] = self._review_url
      else:
        try:
          info_url = u + 'ssh_info'
          from trace import Trace
          Trace("urlopen %s" % info_url)
          try:
            info = urllib.request.urlopen(info_url).read()
          except Exception:
            info = 'NOT_AVAILABLE'

          if info == 'NOT_AVAILABLE' or '<' in info:
            # If `info` contains '<', we assume the server gave us some sort
            # of HTML response back, like maybe a login page.
            #
            # Assume HTTP if SSH is not enabled or ssh_info doesn't look right.
            self._review_url = http_url
          else:
            host, port = info.split()
            self._review_url = self._SshReviewUrl(userEmail, host, port)
        except urllib.error.HTTPError as e:
          raise UploadError('%s: %s' % (self.review, str(e)))
        except urllib.error.URLError as e:
          raise UploadError('%s: %s' % (self.review, str(e)))
        except HTTPException as e:
          raise UploadError('%s: %s' % (self.review, e.__class__.__name__))

        REVIEW_CACHE[u] = self._review_url
    return self._review_url + self.projectname
Ejemplo n.º 15
0
def plotdir(run_dir, platform):
    global args
    tasks = None
    pa = None

    # Load RTApp performance data
    try:
        pa = PerfAnalysis(run_dir)

        # Get the list of RTApp tasks
        tasks = pa.tasks()
        logging.info('Tasks: %s', tasks)
    except ValueError:
        pa = None
        logging.info('No performance data found')

    # Load Trace Analysis modules
    trace = Trace(run_dir, platform=platform)

    # Define time ranges for all the temporal plots
    trace.setXTimeRange(args.tmin, args.tmax)

    # Tasks plots
    if 'tasks' in args.plots:
        trace.analysis.tasks.plotTasks(tasks)
        if pa:
            for task in tasks:
                pa.plotPerf(task)

    # Cluster and CPUs plots
    if 'clusters' in args.plots:
        trace.analysis.frequency.plotClusterFrequencies()
    if 'cpus' in args.plots:
        trace.analysis.cpus.plotCPU()

    # SchedTune plots
    if 'stune' in args.plots:
        trace.analysis.eas.plotSchedTuneConf()
    if 'ediff' in args.plots:
        trace.analysis.eas.plotEDiffTime()
    if 'edspace' in args.plots:
        trace.analysis.eas.plotEDiffSpace()
Ejemplo n.º 16
0
def plotdir(run_dir, platform):
    global args
    tasks = None
    pa = None

    # Load RTApp performance data
    try:
        pa = PerfAnalysis(run_dir)

        # Get the list of RTApp tasks
        tasks = pa.tasks()
        logging.info('Tasks: %s', tasks)
    except ValueError:
        pa = None
        logging.info('No performance data found')

    # Load Trace Analysis modules
    trace = Trace(platform, run_dir)

    # Define time ranges for all the temporal plots
    trace.setXTimeRange(args.tmin, args.tmax)

    # Tasks plots
    if 'tasks' in args.plots:
        trace.analysis.tasks.plotTasks(tasks)
        if pa:
            for task in tasks:
                pa.plotPerf(task)

    # Cluster and CPUs plots
    if 'clusters' in args.plots:
        trace.analysis.frequency.plotClusterFrequencies()
    if 'cpus' in args.plots:
        trace.analysis.cpus.plotCPU()

    # SchedTune plots
    if 'stune' in args.plots:
        trace.analysis.eas.plotSchedTuneConf()
    if 'ediff' in args.plots:
        trace.analysis.eas.plotEDiffTime();
    if 'edspace' in args.plots:
        trace.analysis.eas.plotEDiffSpace();
Ejemplo n.º 17
0
def average_traces(traces):
    one_curve = traces[0]
    mean_SA = np.mean([trace.SA for trace in traces], axis=0)
    std_err = np.std([trace.SA for trace in traces], axis=0)
    prop_err = np.sqrt(np.sum([trace.sigSA**2 for trace in traces],
                              axis=0)) / (len(traces) - 1)
    tot_err = np.sqrt(std_err**2 + prop_err**2)
    averaged_vector = Trace(one_curve.q, np.empty_like(one_curve.q),
                            np.empty_like(one_curve.q), tot_err, mean_SA,
                            np.empty_like(one_curve.q))
    return averaged_vector
Ejemplo n.º 18
0
class TestRunExecCounts(unittest.TestCase):
    """A simple sanity test of line-counting, via runctx (exec)"""
    def setUp(self):
        self.my_py_filename = fix_ext_py(__file__)
        self.addCleanup(sys.settrace, sys.gettrace())

    def test_exec_counts(self):
        self.tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
        code = 'traced_func_loop(2, 5)'
        code = compile(code, __file__, 'exec')
        self.tracer.runctx(code, globals(), vars())
        firstlineno = get_firstlineno(traced_func_loop)
        expected = {
            (self.my_py_filename, firstlineno + 1): 1,
            (self.my_py_filename, firstlineno + 2): 6,
            (self.my_py_filename, firstlineno + 3): 5,
            (self.my_py_filename, firstlineno + 4): 1
        }
        for k in expected.keys():
            self.assertEqual(self.tracer.results().counts[k], expected[k])
Ejemplo n.º 19
0
    def load_v01(self, path, desc):
        """Load version 0.1 sample from the filesystem."""
        tnames = desc["traces"]
        for tname in tnames:
            fname = os.path.join(path, tname + ".trace")
            tr = Trace().load(fname)
            self.add_trace(tname, tr)

        for ename in desc["extra"]:
            data = desc["extra"][ename]
            self.add_extra(ename, data)
Ejemplo n.º 20
0
 def create_generator_from_logs(self, tmid_name: str, log_file: TextIO,
                                generate_only_faulty_method_traces: bool):
     '''
     :param tmid_name: identifier for the test method, example: 'BigFractionFormatTest.testDenominatorFormat'
     :param log_file: file handler for the log file
     :param generate_only_faulty_method_traces: wiil only generate traces for faulty methods
     :return:
     '''
     for log in log_file:
         trace = Trace(tmid_name, log)
         if generate_only_faulty_method_traces and trace.mid_name not in self.actual_faults_method_fullname_set:
             continue
         trace.tmid_guid = self.map_from_methodfullname_to_guid[
             trace.tmid_name]
         if trace.mid_name not in self.map_from_methodfullname_to_guid:
             self.map_from_methodfullname_to_guid[
                 trace.mid_name] = self.gen_method_id()
         trace.mid_guid = self.map_from_methodfullname_to_guid[
             trace.mid_name]
         yield (trace.tmid_guid, trace.mid_guid, trace.vector)
    def __init__(self, bitrates):
        Adaptation.__init__(self, bitrates)
        self.max_seconds = 50.0
        self.level_low_seconds = 10.0  # critical buffer level. Fill as fast as possible until level_high_seconds reached if below this value
        self.level_high_seconds = 30.0  # stable buffer level. Try to maintain current bitrate or improve it
        self.bps_history = Trace("time", "bps")
        self.bitrate_selections = {"AUDIO": Trace("time", "Audio-Bitrate selections"),
                                   "VIDEO": Trace("time", "Video-Bitrate selections")}

        self.sim_state = None
        # self.bitrate_selections["VIDEO"].append(-1, 0)
        # self.bitrate_selections["AUDIO"].append(-1, 0)

        self.my_bps = Trace("seconds", "bps")

        self.ma4_filter = alg.IterativeMovingAverage(4)
        self.ma10_filter = alg.IterativeMovingAverage(10)
        self.ma50_filter = alg.IterativeMovingAverage(80)

        self. last_index = 0
Ejemplo n.º 22
0
    def make_trace(self, in_data):
        """
        Get a trace from an embedded string of textual trace data
        """
        with open(self.test_trace, "w") as fout:
            fout.write(in_data)

        return Trace(self.test_trace,
                     self.events,
                     self.platform,
                     normalize_time=False)
Ejemplo n.º 23
0
    def test_exec_counts(self):
        self.tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
        code = r'''traced_func_loop(2, 5)'''
        code = compile(code, __file__, 'exec')
        self.tracer.runctx(code, globals(), vars())

        firstlineno = get_firstlineno(traced_func_loop)
        expected = {
            (self.my_py_filename, firstlineno + 1): 1,
            (self.my_py_filename, firstlineno + 2): 6,
            (self.my_py_filename, firstlineno + 3): 5,
            (self.my_py_filename, firstlineno + 4): 1,
        }

        # When used through 'run', some other spurious counts are produced, like
        # the settrace of threading, which we ignore, just making sure that the
        # counts fo traced_func_loop were right.
        #
        for k in expected.keys():
            self.assertEqual(self.tracer.results().counts[k], expected[k])
Ejemplo n.º 24
0
class TestFuncs(unittest.TestCase):
    """White-box testing of funcs tracing"""
    def setUp(self):
        self.addCleanup(sys.settrace, sys.gettrace())
        self.tracer = Trace(count=0, trace=0, countfuncs=1)
        self.filemod = my_file_and_modname()
        self._saved_tracefunc = sys.gettrace()

    def tearDown(self):
        if self._saved_tracefunc is not None:
            sys.settrace(self._saved_tracefunc)

    def test_simple_caller(self):
        self.tracer.runfunc(traced_func_simple_caller, 1)

        expected = {
            self.filemod + ('traced_func_simple_caller', ): 1,
            self.filemod + ('traced_func_linear', ): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)

    def test_loop_caller_importing(self):
        self.tracer.runfunc(traced_func_importing_caller, 1)

        expected = {
            self.filemod + ('traced_func_simple_caller', ): 1,
            self.filemod + ('traced_func_linear', ): 1,
            self.filemod + ('traced_func_importing_caller', ): 1,
            self.filemod + ('traced_func_importing', ): 1,
            (fix_ext_py(testmod.__file__), 'testmod', 'func'): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)

    @unittest.skipIf(
        hasattr(sys, 'gettrace') and sys.gettrace(),
        'pre-existing trace function throws off measurements')
    def test_inst_method_calling(self):
        obj = TracedClass(20)
        self.tracer.runfunc(obj.inst_method_calling, 1)

        expected = {
            self.filemod + ('TracedClass.inst_method_calling', ): 1,
            self.filemod + ('TracedClass.inst_method_linear', ): 1,
            self.filemod + ('traced_func_linear', ): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)
Ejemplo n.º 25
0
    def test_time_range(self):
        """
        TestTrace: time_range is the duration of the trace
        """
        expected_duration = 6.676497

        trace = Trace(self.trace_path,
                      self.events,
                      self.platform,
                      normalize_time=False)

        self.assertAlmostEqual(trace.time_range, expected_duration, places=6)
Ejemplo n.º 26
0
class TestFuncs(unittest.TestCase):
    """White-box testing of funcs tracing"""
    def setUp(self):
        self.addCleanup(sys.settrace, sys.gettrace())
        self.tracer = Trace(count=0, trace=0, countfuncs=1)
        self.filemod = my_file_and_modname()
        self._saved_tracefunc = sys.gettrace()

    def tearDown(self):
        if self._saved_tracefunc is not None:
            sys.settrace(self._saved_tracefunc)

    def test_simple_caller(self):
        self.tracer.runfunc(traced_func_simple_caller, 1)

        expected = {
            self.filemod + ('traced_func_simple_caller',): 1,
            self.filemod + ('traced_func_linear',): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)

    def test_loop_caller_importing(self):
        self.tracer.runfunc(traced_func_importing_caller, 1)

        expected = {
            self.filemod + ('traced_func_simple_caller',): 1,
            self.filemod + ('traced_func_linear',): 1,
            self.filemod + ('traced_func_importing_caller',): 1,
            self.filemod + ('traced_func_importing',): 1,
            (fix_ext_py(testmod.__file__), 'testmod', 'func'): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)

    @unittest.skipIf(hasattr(sys, 'gettrace') and sys.gettrace(),
                     'pre-existing trace function throws off measurements')
    def test_inst_method_calling(self):
        obj = TracedClass(20)
        self.tracer.runfunc(obj.inst_method_calling, 1)

        expected = {
            self.filemod + ('TracedClass.inst_method_calling',): 1,
            self.filemod + ('TracedClass.inst_method_linear',): 1,
            self.filemod + ('traced_func_linear',): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)
Ejemplo n.º 27
0
 def generate_sequential_function_calls(self):
     """generate sequential function calls
     for tracing source code and plotting sequence diagram.
     """
     # generating sequence diagram for a use-case
     # _ = GenerateSequenceDiagram(
     #     self.driver_path, self.driver_name, self.source_code_path[0])
     spec = importlib.util.spec_from_file_location(self.driver_name,
                                                   self.driver_path)
     global foo
     foo = self.foo
     foo = importlib.util.module_from_spec(spec)
     spec.loader.exec_module(foo)
     tracer = Trace(countfuncs=1, countcallers=1, timing=1)
     tracer.run('foo.{}()'.format(self.driver_function))
     results = tracer.results()
     caller_functions = results.callers
     function_sequence = []  # consists of all functions called in sequence
     for caller, callee in caller_functions:
         _, caller_module, caller_function = caller
         _, callee_module, callee_function = callee
         if caller_module not in self.source_code_modules or callee_module not in self.source_code_modules:
             logging.debug(
                 "Following modules are not in source code and thus to be ignored:"
             )
             logging.debug(caller_module)
             continue
         function_sequence.append([(caller_module, caller_function),
                                   (callee_module, callee_function)])
     logging.debug("Function sequence is: ")
     for sequence in function_sequence:
         logging.debug(sequence)
     self.df = self.write_in_excel.integrate_sequence_diagram_in_df(
         self.df, function_sequence, self.use_case, self.driver_function,
         self.skip_cols)
     self.write_in_excel.write_df_to_excel(self.df, 'sheet_one',
                                           self.skip_cols,
                                           self.classes_covered,
                                           self.use_case)
Ejemplo n.º 28
0
class TestCallers(unittest.TestCase):
    """White-box testing of callers tracing"""
    def setUp(self):
        self.tracer = Trace(count=0, trace=0, countcallers=1)
        self.filemod = my_file_and_modname()

    def test_loop_caller_importing(self):
        self.tracer.runfunc(traced_func_importing_caller, 1)

        expected = {
            ((os.path.splitext(trace.__file__)[0] + '.py', 'trace', 'Trace.runfunc'), (self.filemod + ('traced_func_importing_caller', ))):
            1,
            ((self.filemod + ('traced_func_simple_caller', )), (self.filemod + ('traced_func_linear', ))):
            1,
            ((self.filemod + ('traced_func_importing_caller', )), (self.filemod + ('traced_func_simple_caller', ))):
            1,
            ((self.filemod + ('traced_func_importing_caller', )), (self.filemod + ('traced_func_importing', ))):
            1,
            ((self.filemod + ('traced_func_importing', )), (fix_ext_py(testmod.__file__), 'testmod', 'func')):
            1,
        }
        self.assertEqual(self.tracer.results().callers, expected)
Ejemplo n.º 29
0
    def test_time_range_window(self):
        """
        TestTrace: time_range is the duration of the trace in the given window
        """
        expected_duration = 4.0

        trace = Trace(self.trace_path,
                      self.events,
                      self.platform,
                      normalize_time=False,
                      window=(76.402065, 80.402065))

        self.assertAlmostEqual(trace.time_range, expected_duration, places=6)
Ejemplo n.º 30
0
class TestCallers(unittest.TestCase):
    """White-box testing of callers tracing"""
    def setUp(self):
        self.tracer = Trace(count=0, trace=0, countcallers=1)
        self.filemod = my_file_and_modname()

    def test_loop_caller_importing(self):
        self.tracer.runfunc(traced_func_importing_caller, 1)

        expected = {
            ((os.path.splitext(trace.__file__)[0] + '.py', 'trace', 'Trace.runfunc'),
                (self.filemod + ('traced_func_importing_caller',))): 1,
            ((self.filemod + ('traced_func_simple_caller',)),
                (self.filemod + ('traced_func_linear',))): 1,
            ((self.filemod + ('traced_func_importing_caller',)),
                (self.filemod + ('traced_func_simple_caller',))): 1,
            ((self.filemod + ('traced_func_importing_caller',)),
                (self.filemod + ('traced_func_importing',))): 1,
            ((self.filemod + ('traced_func_importing',)),
                (fix_ext_py(testmod.__file__), 'testmod', 'func')): 1,
        }
        self.assertEqual(self.tracer.results().callers, expected)
Ejemplo n.º 31
0
def get_windows_symlink(file_path):
    if os.path.isdir(file_path):
        root = os.path.abspath(os.path.join(file_path, os.pardir))
        file_object = os.path.split(file_path)[1]
        if not file_object:
            file_object = os.path.split(os.path.split(file_object)[0])[1]
    else:
        root = os.path.dirname(file_path)
        file_object = os.path.split(file_path)[1]

    cmd = ['cmd', '/c', 'dir', '/AL', root]
    try:
        Trace(' '.join(cmd))
        out = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
    except:
        return None

    lines = [s.strip() for s in out.split('\n')]
    if len(lines) < 6:
        return None

    pattern = re.compile('.*<(.*)>\\s*(.*) \[(.*)\]$')
    for line in lines[5:]:
        result = pattern.match(line)
        if result:
            ftype = result.group(1)
            fname = result.group(2)
            flink = result.group(3)
            if file_object == fname:
                if ftype == 'SYMLINK' or ftype == 'SYMLINKD':
                    new_path = os.path.realpath(
                        os.path.join(os.path.dirname(file_path), flink))
                    Trace("Relative link found: %s -> %s -> %s", fname, flink,
                          new_path)
                else:
                    new_path = flink
                    Trace("Absolute link found: %s -> %s", fname, flink)
                return new_path
    return None
Ejemplo n.º 32
0
    def post(self):
        self.logger.info('POST= %s' % self.request.body)

        traceReq = jsonpickle.decode(self.request.body)
        trace = Trace(traceReq['traceseq'], traceReq['customerseq'],
                      traceReq['title'], traceReq['mainclass'],
                      traceReq['subclass'])

        traceAddSql = "INSERT INTO tb_e2e_trace_list (trace_seq, customer_seq, trace_title, create_dttm) \
                        VALUES (       %d,           %d,    '%s',       now())" % (
            trace.traceseq, trace.customerseq, trace.title)
        self.logger.info("traceAddSql= %s" % traceAddSql)
        self.dbengine.execute(traceAddSql)
Ejemplo n.º 33
0
 def __parse_trace_dirs(self):
     ## assume directory struct of <....>\<app_package>\trace_<ID>
     # so each item should just be trace_<ID>
     for item in os.listdir(self.app_dir):
         # check just in case something else is in the directory that is not
         # a trace directory
         item_info = item.split("_")
         if ((item_info[0]) == "trace"):
             #print ("Trace: "+item_info[1])
             trace_dir = self.app_dir + "\\" + item
             # should be <ID>
             #print("app: "+str(self.id))
             self.traces[item_info[1]] = Trace(trace_dir, self.id)
Ejemplo n.º 34
0
def trace(func, **kwargs) -> TraceResult:
    # find the file where the original function is defined
    original_func = inspect.unwrap(func)
    file_name = original_func.__code__.co_filename

    t = Trace(trace=False)
    # Ignore everything except the file where the function is defined.
    # It makes the tracing much faster.
    t.ignore = Only(file_name)  # type: ignore

    old_trace = sys.gettrace()
    try:
        func_result: Any = t.runfunc(func, **kwargs)  # type: ignore
    finally:
        # restore previous tracer
        sys.settrace(old_trace)     # pragma: no cover
    return _collect_trace_results(  # pragma: no cover
        t=t,
        func=original_func,
        file_name=file_name,
        func_result=func_result,
    )
Ejemplo n.º 35
0
def parse_dat(filename):
    data = read_table(filename,
                      delimiter="    ",
                      engine='python',
                      skiprows=1,
                      names=['q', 'I', 'sigI'])
    q = data.q
    SA = data.I
    sigSA = data.sigI
    S = np.empty_like(data.q)
    sigS = np.empty_like(data.q)
    Nj = np.empty_like(data.q)
    return Trace(q, sigS, S, sigSA, SA, Nj)
Ejemplo n.º 36
0
def redirect_all(executable):
    old_sysin = sys.stdin
    old_sysout = sys.stdout
    old_syserr = sys.stderr
    Trace("redirecting to %s" % executable)
    p = subprocess.Popen([executable],
                         stdin=subprocess.PIPE,
                         stdout=old_sysout,
                         stderr=old_syserr)
    sys.stdout = p.stdin
    sys.stderr = p.stdin
    old_sysout.close()
    global child_process
    child_process = p
    def __init__(self, bitrates):
        Adaptation.__init__(self, bitrates)
        self.max_seconds = 50.0
        self.level_critical_seconds = 20.0  # critical buffer level. Fill as fast as possible until level_high_seconds reached if below this value
        self.level_low_seconds = 50.0  # low buffer level. Fill as fast as possible until level_high_seconds reached if below this value
        self.level_high_seconds = 100.0  # stable buffer level. Try to maintain current bitrate or improve it
        self.bitrate_selections = {
            "AUDIO": Trace("time", "Audio-Bitrate selections"),
            "VIDEO": Trace("time", "Video-Bitrate selections")
        }

        self.sim_state = None
        self.segment_choices = []

        self.buffer_ma4 = alg.IterativeMovingAverage(4)
        self.buffer_ma10 = alg.IterativeMovingAverage(10)

        self.ma4_filter = alg.IterativeMovingAverage(4)
        self.ma10_filter = alg.IterativeMovingAverage(10)
        self.ma50_filter = alg.IterativeMovingAverage(80)

        self.last_index = {"VIDEO": 0, "AUDIO": 0}

        self.fix_bps = False

        self.state_vars = StateVars()
        self.state_vars.bps_history = Trace("time", "bps")
        self.state_vars.ma4_history = Trace("time", "bps ma4")
        self.state_vars.ma10_history = Trace("time", "bps ma10")
        self.state_vars.ma50_history = Trace("time", "bps ma50")
        self.state_vars.buffer_history = Trace("sec", "sec")
        self.state_vars.buffer_ma4_history = Trace("sec", "sec")
        self.state_vars.buffer_ma10_history = Trace("sec", "sec")
        self.state_vars.name = Trace("sec", "sec")
        self.selected_bps = {"AUDIO": 0, "VIDEO": 0}

        self.hold_bps = 0
Ejemplo n.º 38
0
def windows_symlink(src, dst):
    globalConfig = git_config.GitConfig.ForUser()

    src = to_windows_path(src)
    dst = to_windows_path(dst)
    is_dir = True if os.path.isdir(
        os.path.realpath(os.path.join(os.path.dirname(dst), src))) else False

    no_symlinks = globalConfig.GetBoolean("portable.windowsNoSymlinks")
    if no_symlinks is None or no_symlinks == False:
        symlink_options_dir = '/D'
        symlink_options_file = ''
    else:
        src = os.path.abspath(os.path.join(os.path.dirname(dst), src))
        Trace("Using no symlinks for %s from %s to %s",
              "dir" if is_dir else "file", src, dst)
        symlink_options_dir = '/J'
        symlink_options_file = '/H'

    if is_dir:
        cmd = ['cmd', '/c', 'mklink', symlink_options_dir, dst, src]
        cmd = filter(len, cmd)
        Trace(' '.join(cmd))
        try:
            subprocess.Popen(cmd, stdout=subprocess.PIPE).wait()
        except Exception as e:
            Trace("failed to create dir symlink: %s", e.strerror)
            pass
    else:
        cmd = ['cmd', '/c', 'mklink', symlink_options_file, dst, src]
        cmd = filter(len, cmd)
        Trace(' '.join(cmd))
        try:
            subprocess.Popen(cmd, stdout=subprocess.PIPE).wait()
        except Exception as e:
            Trace("failed to create file symlink: %s", e.strerror)
            pass
Ejemplo n.º 39
0
    def get_trace(self, experiment):
        if not hasattr(self, "__traces"):
            self.__traces = {}
        if experiment.out_dir in self.__traces:
            return self.__traces[experiment.out_dir]

        if ('ftrace' not in experiment.conf['flags']
                or 'ftrace' not in self.test_conf):
            raise ValueError(
                'Tracing not enabled. If this test needs a trace, add "ftrace" '
                'to your test/experiment configuration flags')

        events = self.test_conf['ftrace']['events']
        trace = Trace(experiment.out_dir, events, self.te.platform)

        self.__traces[experiment.out_dir] = trace
        return trace
Ejemplo n.º 40
0
class TestFuncs(unittest.TestCase):
    """White-box testing of funcs tracing"""
    def setUp(self):
        self.tracer = Trace(count=0, trace=0, countfuncs=1)
        self.filemod = my_file_and_modname()

    def test_simple_caller(self):
        self.tracer.runfunc(traced_func_simple_caller, 1)

        expected = {
            self.filemod + ('traced_func_simple_caller',): 1,
            self.filemod + ('traced_func_linear',): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)

    def test_loop_caller_importing(self):
        self.tracer.runfunc(traced_func_importing_caller, 1)

        expected = {
            self.filemod + ('traced_func_simple_caller',): 1,
            self.filemod + ('traced_func_linear',): 1,
            self.filemod + ('traced_func_importing_caller',): 1,
            self.filemod + ('traced_func_importing',): 1,
            (fix_ext_py(testmod.__file__), 'testmod', 'func'): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)

    def test_inst_method_calling(self):
        obj = TracedClass(20)
        self.tracer.runfunc(obj.inst_method_calling, 1)

        expected = {
            self.filemod + ('TracedClass.inst_method_calling',): 1,
            self.filemod + ('TracedClass.inst_method_linear',): 1,
            self.filemod + ('traced_func_linear',): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)
Ejemplo n.º 41
0
class TestFuncs(unittest.TestCase):
    """White-box testing of funcs tracing"""

    def setUp(self):
        self.addCleanup(sys.settrace, sys.gettrace())
        self.tracer = Trace(count=0, trace=0, countfuncs=1)
        self.filemod = my_file_and_modname()

    def test_simple_caller(self):
        self.tracer.runfunc(traced_func_simple_caller, 1)

        expected = {self.filemod + ("traced_func_simple_caller",): 1, self.filemod + ("traced_func_linear",): 1}
        self.assertEqual(self.tracer.results().calledfuncs, expected)

    def test_loop_caller_importing(self):
        self.tracer.runfunc(traced_func_importing_caller, 1)

        expected = {
            self.filemod + ("traced_func_simple_caller",): 1,
            self.filemod + ("traced_func_linear",): 1,
            self.filemod + ("traced_func_importing_caller",): 1,
            self.filemod + ("traced_func_importing",): 1,
            (fix_ext_py(testmod.__file__), "testmod", "func"): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)

    @unittest.skipIf(hasattr(sys, "gettrace") and sys.gettrace(), "pre-existing trace function throws off measurements")
    def test_inst_method_calling(self):
        obj = TracedClass(20)
        self.tracer.runfunc(obj.inst_method_calling, 1)

        expected = {
            self.filemod + ("TracedClass.inst_method_calling",): 1,
            self.filemod + ("TracedClass.inst_method_linear",): 1,
            self.filemod + ("traced_func_linear",): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)
Ejemplo n.º 42
0
class TestFuncs(unittest.TestCase):
    """White-box testing of funcs tracing"""
    def setUp(self):
        self.tracer = Trace(count=0, trace=0, countfuncs=1)
        self.filemod = my_file_and_modname()

    def test_simple_caller(self):
        self.tracer.runfunc(traced_func_simple_caller, 1)

        expected = {
            self.filemod + ('traced_func_simple_caller', ): 1,
            self.filemod + ('traced_func_linear', ): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)

    def test_loop_caller_importing(self):
        self.tracer.runfunc(traced_func_importing_caller, 1)

        expected = {
            self.filemod + ('traced_func_simple_caller', ): 1,
            self.filemod + ('traced_func_linear', ): 1,
            self.filemod + ('traced_func_importing_caller', ): 1,
            self.filemod + ('traced_func_importing', ): 1,
            (fix_ext_py(testmod.__file__), 'testmod', 'func'): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)

    def test_inst_method_calling(self):
        obj = TracedClass(20)
        self.tracer.runfunc(obj.inst_method_calling, 1)

        expected = {
            self.filemod + ('TracedClass.inst_method_calling', ): 1,
            self.filemod + ('TracedClass.inst_method_linear', ): 1,
            self.filemod + ('traced_func_linear', ): 1,
        }
        self.assertEqual(self.tracer.results().calledfuncs, expected)
Ejemplo n.º 43
0
def parse_tpkl_2(filename):
    """
    A function to parse custom recarray objects.
    
    Parameters:
    filename (str): path of file to be analyzed
    
    Returns:
    Trace:custom object built to hold a single scattering curve and
    associated values
    """
    TPKL_HEADER_BYTES = 279 ### this value could vary...original value
    # TPKL_HEADER_BYTES = 290 ### march 2018
    with open(filename, "rb") as f:
        f.seek(TPKL_HEADER_BYTES)
        data = np.fromfile(f, dtype=dt)
        d2 = DataFrame.from_records(data)
    return Trace(d2.q, d2.sigS, d2.S, d2.sigSA, d2.SA, d2.Nj)
Ejemplo n.º 44
0
 def _ReadJson(self):
     try:
         if os.path.getmtime(self._json) \
         <= os.path.getmtime(self.file):
             os.remove(self._json)
             return None
     except OSError:
         return None
     try:
         Trace(': parsing %s', self.file)
         fd = open(self._json)
         try:
             return json.load(fd)
         finally:
             fd.close()
     except (IOError, ValueError):
         os.remove(self._json)
         return None
Ejemplo n.º 45
0
    def __init__(self, args, name, required_options=None, **options):
        self.name = name
        if Device.__trace is None:
            Device.__trace = Trace.get_tracer()

        if required_options is not None:
            for optname in required_options:
                if optname not in options:
                    raise RuntimeError(
                        f'{self.name} missing required option {optname}')

        # handle some common options
        self.address = options['address'] if 'address' in options else None
        self.interrupt = options[
            'interrupt'] if 'interrupt' in options else None
        self.size = None
        self.debug = self.name in args.debug_device
        self._asserted_ipl = 0
Ejemplo n.º 46
0
def create_trace(prefix, num_examples):
    """
    Generates addition data with the given string prefix (i.e. 'train', 'test') and the specified
    number of examples.

    :param prefix: String prefix for saving the file ('train', 'test')
    :param num_examples: Number of examples to generate.
    """
    data = []
    for j in range(num_examples):
        rand_int = np.random.randint(len(COUNTRY_REGION))
        c_find = COUNTRY_REGION[rand_int][0]
        trace = Trace(c_find).trace
        data.append((c_find, trace))

    with open('{}.pik'.format(prefix), 'wb') as f:
        pickle.dump(data, f)

    print('Data Generated.')
Ejemplo n.º 47
0
def average_traces(traces):
    """
    Calculates the average scattering across a series of experiemental replicates.
    
    Parameters:
    traces (list of Trace objects): list to be filtered
    
    Returns:
    averaged_vector (Trace object)
    """
    one_curve = traces[0]
    mean_SA = np.mean([trace.SA for trace in traces], axis=0)
    std_err = np.std([trace.SA for trace in traces], axis=0)
    prop_err = np.sqrt(np.sum([trace.sigSA**2 for trace in traces],
                              axis=0)) / (len(traces) - 1)
    tot_err = np.sqrt(std_err**2 + prop_err**2)
    averaged_vector = Trace(one_curve.q, np.empty_like(one_curve.q),
                            np.empty_like(one_curve.q), tot_err, mean_SA,
                            np.empty_like(one_curve.q))
    return averaged_vector
Ejemplo n.º 48
0
def parse_tpkl_depreciated(filename):
    """
    A function to parse custom recarray objects.
    This variation is dependent upon table.py from the Anfinrud Lab
    
    Parameters:
    filename (str): path of file to be analyzed
    
    Returns:
    Trace:custom object built to hold a single scattering curve and
    associated values
    """
    data = load(filename)
    q = data.q
    sigS = data.sigS
    S = data.S
    sigSA = data.sigSA
    SA = data.SA
    Nj = data.Nj
    return Trace(q, sigS, S, sigSA, SA, Nj)
Ejemplo n.º 49
0
    def test_exec_counts(self):
        self.tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
        code = r'''traced_func_loop(2, 5)'''
        code = compile(code, __file__, 'exec')
        self.tracer.runctx(code, globals(), vars())

        firstlineno = get_firstlineno(traced_func_loop)
        expected = {
            (self.my_py_filename, firstlineno + 1): 1,
            (self.my_py_filename, firstlineno + 2): 6,
            (self.my_py_filename, firstlineno + 3): 5,
            (self.my_py_filename, firstlineno + 4): 1,
        }

        # When used through 'run', some other spurious counts are produced, like
        # the settrace of threading, which we ignore, just making sure that the
        # counts fo traced_func_loop were right.
        #
        for k in expected.keys():
            self.assertEqual(self.tracer.results().counts[k], expected[k])
Ejemplo n.º 50
0
def static_traces(parent, samp, reps, temps, series, option=None):

    static_vectors = {i: {j: [] for j in series} for i in temps}

    for temp in temps:

        for dilution in series:
            static = []
            for n in reps:
                static_string = ("{0}/{1}_off{2}T{3}_{4}.tpkl".format(
                    parent, samp, dilution, temp, n))
                # print(static_string)

                try:
                    static_data = parse.parse(static_string)
                    # print("test1")
                    # static_data.scale(reference, qmin=QMIN, qmax=QMAX)
                    static_data.scale(reference,
                                      qmin=QMIN,
                                      qmax=QMAX,
                                      approach='algebraic')
                    # print("test2")
                    static_scaled = Trace(static_data.q,
                                          np.empty_like(static_data.q),
                                          np.empty_like(static_data.q),
                                          static_data.scaled_sigSA,
                                          static_data.scaled_SA,
                                          static_data.Nj)
                    static.append(static_scaled)
                except:
                    # print(buff_string+"\tfailed")
                    print("{} failed to parse or scale".format(static_string))
                    pass
            try:
                static_filtered = iterative_chi_filter(static)
                static_filt_avg = average_traces(static_filtered)
                static_vectors[temp][dilution].append(static_filt_avg)
            except:
                print("temp {}C failed for {}".format(temp, dilution))

    return static_vectors
Ejemplo n.º 51
0
    def max_idle_time(self, trace, start, end, cpus):
        """
        :returns: The maximum idle time of 'cpus' in the [start, end] interval
        """
        idle_df = pd.DataFrame()
        max_time = 0
        max_cpu = 0

        for cpu in cpus:
            busy_df = self.get_active_df(trace, cpu)
            busy_df = Trace.squash_df(busy_df, start, end)
            busy_df = busy_df[busy_df.state == 0]

            if busy_df.empty:
                continue

            local_max = busy_df.delta.max()
            if local_max > max_time:
                max_time = local_max
                max_cpu = cpu

        return max_time, max_cpu
class CastLabsAdaptation(Adaptation):
    MA_SIZE = 5

    def __init__(self, bitrates):
        Adaptation.__init__(self, bitrates)
        self.max_seconds = 50.0
        self.level_low_seconds = 10.0  # critical buffer level. Fill as fast as possible until level_high_seconds reached if below this value
        self.level_high_seconds = 30.0  # stable buffer level. Try to maintain current bitrate or improve it
        self.bps_history = Trace("time", "bps")
        self.bitrate_selections = {"AUDIO": Trace("time", "Audio-Bitrate selections"),
                                   "VIDEO": Trace("time", "Video-Bitrate selections")}

        self.sim_state = None
        # self.bitrate_selections["VIDEO"].append(-1, 0)
        # self.bitrate_selections["AUDIO"].append(-1, 0)

        self.my_bps = Trace("seconds", "bps")

        self.ma4_filter = alg.IterativeMovingAverage(4)
        self.ma10_filter = alg.IterativeMovingAverage(10)
        self.ma50_filter = alg.IterativeMovingAverage(80)

        self. last_index = 0

    def current_buffer_level(self, type_str):
        return self.simulator.buffer_level(type_str)

    def min_buffer_level(self):
        return self.sim_state.metric.min_buffer_level()

    def clamp(self, val, range):
        return min(max(range[0], val), range[1])

    def next_bitrate(self, type_str):
        index  = 0
        avg = 0
        clamp_range = [0, len(self.bitrates[type_str]) - 1]
        if self.bps_history.length != 0:
            avg = self.ma50_filter(self.bps_history.current_value)
            index = self.clamp(bisect.bisect(self.bitrates[type_str], avg) - 1, clamp_range)
            self.last_index = index
            self.my_bps.append(self.sim_state.t, avg)
        bps = self.bitrates[type_str][index]
        return bps

    def is_buffering(self):
        if self.sim_state is None:
            return False
        else:
            return self.min_buffer_level() < 3.0

    def evaluate(self, next_segment_type, seg_choices, state):
        """
        Updates internal performance statistics
        :param state: current state of the player simulator including buffer levels and http statistics
        :type state: dict of states
        :param next_segment_type: Type of the next segment, "AUDIO" or "VIDEO"
        :type next_segment_type: string
        :return: Next segment that should be downloaded
        """
        self.sim_state = state
        if state.http is not None:
            self.bps_history.append(state.t, state.http.bps)

        bps = self.next_bitrate(next_segment_type)

        if bps != self.bitrate_selections[next_segment_type].current_value:
            if next_segment_type == "VIDEO" and self.my_bps.length > 0:
                print "SWITCH @ t=%.2f: %d -> %d [Avg: %.2f / idx: %d]" % (self.sim_state.t, self.bitrate_selections[next_segment_type].current_value, bps, self.my_bps.current_value, self.last_index)
                print self.bitrates[next_segment_type][self.last_index]
            self.bitrate_selections[next_segment_type].append(state.t, bps)
            print self.bitrate_selections[next_segment_type].x_data
            print self.bitrate_selections[next_segment_type].y_data
        return Segment.find_segment_for_bitrate(seg_choices, bps)
Ejemplo n.º 53
0
 def setUp(self):
     self.addCleanup(sys.settrace, sys.gettrace())
     self.tracer = Trace(count=0, trace=0, countcallers=1)
     self.filemod = my_file_and_modname()
Ejemplo n.º 54
0
def mh_query(model, pred, answer, samples_count, lag=1):
    """
    Metropolis-Hastings algorithm for sampling
    :param model: model to execute
    :param samples_count: how much samples we want
    :type samples_count: int
    :return: samples
    :rtype: list
    """
    MCMC_shared.mh_flag = True
    MCMC_shared.iteration = 0
    samples = []
    model()
    prev_name_idx = 0
    transitions = 0
    rejected = 0
    burn_in = 100
    miss = True
    while len(samples) < samples_count:
        MCMC_shared.iteration += 1
        variables = MCMC_shared.trace.names()
        # index = random.randint(0, len(variables) - 1)
        # selected_name = variables[index]
        selected_name = variables[prev_name_idx % len(MCMC_shared.trace.names())]
        prev_name_idx += 1
        current = MCMC_shared.trace.get(selected_name)
        erp, erp_params = current.erp, current.erp_parameters
        new_value = erp.proposal_kernel(current.x, *erp_params)
        # print erp_params
        fwdProb = erp.log_proposal_prob(current.x, new_value, *erp_params)
        rvsProb = erp.log_proposal_prob(new_value, current.x, *erp_params)
        # r и f для flip == 0
        # l = erp.log_likelihood(new_value, *erp_params)
        # new_trace = deepcopy(trace)
        new_trace = Trace(MCMC_shared.trace)
        new_trace.store(selected_name, Chunk(erp, new_value, erp_params), MCMC_shared.iteration)
        old_trace = MCMC_shared.trace
        MCMC_shared.trace = new_trace
        sample = model()
        MCMC_shared.trace = old_trace
        probability = log(uniform())
        # print sample
        # print new_trace._likelihood, old_trace._likelihood
        if probability < new_trace._likelihood - old_trace._likelihood + rvsProb - fwdProb and \
                (miss or pred(sample)):
            if miss and pred(sample):
                miss = False
            transitions += 1
            if not miss:
                if burn_in:
                    burn_in -= 1
                elif (transitions % lag) == 0:
                    # print len(samples), sample, new_trace._likelihood, rejected
                    samples.append(answer(sample))
            rejected = 0
            MCMC_shared.trace = new_trace
            MCMC_shared.trace.clean(MCMC_shared.iteration)
        else:
            rejected += 1

    return samples
Ejemplo n.º 55
0
 def setUp(self):
     self.addCleanup(sys.settrace, sys.gettrace())
     self.tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
     self.my_py_filename = fix_ext_py(__file__)
Ejemplo n.º 56
0
class TestLineCounts(unittest.TestCase):
    """White-box testing of line-counting, via runfunc"""
    def setUp(self):
        self.addCleanup(sys.settrace, sys.gettrace())
        self.tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
        self.my_py_filename = fix_ext_py(__file__)

    def test_traced_func_linear(self):
        result = self.tracer.runfunc(traced_func_linear, 2, 5)
        self.assertEqual(result, 7)

        # all lines are executed once
        expected = {}
        firstlineno = get_firstlineno(traced_func_linear)
        for i in range(1, 5):
            expected[(self.my_py_filename, firstlineno +  i)] = 1

        self.assertEqual(self.tracer.results().counts, expected)

    def test_traced_func_loop(self):
        self.tracer.runfunc(traced_func_loop, 2, 3)

        firstlineno = get_firstlineno(traced_func_loop)
        expected = {
            (self.my_py_filename, firstlineno + 1): 1,
            (self.my_py_filename, firstlineno + 2): 6,
            (self.my_py_filename, firstlineno + 3): 5,
            (self.my_py_filename, firstlineno + 4): 1,
        }
        self.assertEqual(self.tracer.results().counts, expected)

    def test_traced_func_importing(self):
        self.tracer.runfunc(traced_func_importing, 2, 5)

        firstlineno = get_firstlineno(traced_func_importing)
        expected = {
            (self.my_py_filename, firstlineno + 1): 1,
            (fix_ext_py(testmod.__file__), 2): 1,
            (fix_ext_py(testmod.__file__), 3): 1,
        }

        self.assertEqual(self.tracer.results().counts, expected)

    def test_trace_func_generator(self):
        self.tracer.runfunc(traced_func_calling_generator)

        firstlineno_calling = get_firstlineno(traced_func_calling_generator)
        firstlineno_gen = get_firstlineno(traced_func_generator)
        expected = {
            (self.my_py_filename, firstlineno_calling + 1): 1,
            (self.my_py_filename, firstlineno_calling + 2): 11,
            (self.my_py_filename, firstlineno_calling + 3): 10,
            (self.my_py_filename, firstlineno_gen + 1): 1,
            (self.my_py_filename, firstlineno_gen + 2): 11,
            (self.my_py_filename, firstlineno_gen + 3): 10,
        }
        self.assertEqual(self.tracer.results().counts, expected)

    def test_trace_list_comprehension(self):
        self.tracer.runfunc(traced_caller_list_comprehension)

        firstlineno_calling = get_firstlineno(traced_caller_list_comprehension)
        firstlineno_called = get_firstlineno(traced_doubler)
        expected = {
            (self.my_py_filename, firstlineno_calling + 1): 1,
            # List compehentions work differently in 3.x, so the count
            # below changed compared to 2.x.
            (self.my_py_filename, firstlineno_calling + 2): 12,
            (self.my_py_filename, firstlineno_calling + 3): 1,
            (self.my_py_filename, firstlineno_called + 1): 10,
        }
        self.assertEqual(self.tracer.results().counts, expected)


    def test_linear_methods(self):
        # XXX todo: later add 'static_method_linear' and 'class_method_linear'
        # here, once issue1764286 is resolved
        #
        for methname in ['inst_method_linear',]:
            tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
            traced_obj = TracedClass(25)
            method = getattr(traced_obj, methname)
            tracer.runfunc(method, 20)

            firstlineno = get_firstlineno(method)
            expected = {
                (self.my_py_filename, firstlineno + 1): 1,
            }
            self.assertEqual(tracer.results().counts, expected)
Ejemplo n.º 57
0
 def setUp(self):
     self.tracer = Trace(count=0, trace=0, countcallers=1)
     self.filemod = my_file_and_modname()
Ejemplo n.º 58
0
class TestTrace(TestCase):
    """Smoke tests for LISA's Trace class"""

    traces_dir = os.path.join(os.path.dirname(__file__), 'traces')
    events = [
        'sched_switch',
        'sched_overutilized',
        'cpu_idle',
        'sched_load_avg_task',
        'sched_load_se'
    ]

    def __init__(self, *args, **kwargs):
        super(TestTrace, self).__init__(*args, **kwargs)

        self.test_trace = os.path.join(self.traces_dir, 'test_trace.txt')

        self.platform = self._get_platform()

        self.trace_path = os.path.join(self.traces_dir, 'trace.txt')
        self.trace = Trace(self.platform, self.trace_path, self.events)

    def make_trace(self, in_data):
        """
        Get a trace from an embedded string of textual trace data
        """
        with open(self.test_trace, "w") as fout:
            fout.write(in_data)

        return Trace(self.platform, self.test_trace, self.events,
                     normalize_time=False)

    def get_trace(self, trace_name):
        """
        Get a trace from a separate provided trace file
        """
        dir = os.path.join(self.traces_dir, trace_name)

        trace_path = os.path.join(dir, 'trace.dat')
        return Trace(self._get_platform(trace_name), trace_path, self.events)

    def _get_platform(self, trace_name=None):
        trace_dir = self.traces_dir
        if trace_name:
            trace_dir = os.path.join(trace_dir, trace_name)

        with open(os.path.join(trace_dir, 'platform.json')) as f:
            return json.load(f)

    def test_getTaskByName(self):
        """TestTrace: getTaskByName() returns the list of PIDs for all tasks with the specified name"""
        for name, pids in [('watchdog/0', [12]),
                           ('sh', [1642, 1702, 1717, 1718]),
                           ('NOT_A_TASK', [])]:
            self.assertEqual(self.trace.getTaskByName(name), pids)

    def test_getTaskByPid(self):
        """TestTrace: getTaskByPid() returns the name of the task with the specified PID"""
        for pid, names in [(15, 'watchdog/1'),
                           (1639, 'sshd'),
                           (987654321, None)]:
            self.assertEqual(self.trace.getTaskByPid(pid), names)

    def test_getTasks(self):
        """TestTrace: getTasks() returns a dictionary mapping PIDs to a single task name"""
        tasks_dict = self.trace.getTasks()
        for pid, name in [(1, 'init'),
                          (9, 'rcu_sched'),
                          (1383, 'jbd2/sda2-8')]:
            self.assertEqual(tasks_dict[pid], name)

    def test_setTaskName(self):
        """TestTrace: getTaskBy{Pid,Name}() properly track tasks renaming"""
        in_data = """
          father-1234  [002] 18765.018235: sched_switch:          prev_comm=father prev_pid=1234 prev_prio=120 prev_state=0 next_comm=father next_pid=5678 next_prio=120
           child-5678  [002] 18766.018236: sched_switch:          prev_comm=child prev_pid=5678 prev_prio=120 prev_state=1 next_comm=sh next_pid=3367 next_prio=120
        """
        trace = self.make_trace(in_data)

        self.assertEqual(trace.getTaskByPid(1234), 'father')
        self.assertEqual(trace.getTaskByPid(5678), 'child')
        self.assertEqual(trace.getTaskByName('father'), [1234])

        os.remove(self.test_trace)

    def test_time_range(self):
        """
        TestTrace: time_range is the duration of the trace
        """
        expected_duration = 6.676497

        trace = Trace(self.platform, self.trace_path,
                      self.events, normalize_time=False
        )

        self.assertAlmostEqual(trace.time_range, expected_duration, places=6)

    def test_time_range_window(self):
        """
        TestTrace: time_range is the duration of the trace in the given window
        """
        expected_duration = 4.0

        trace = Trace(self.platform, self.trace_path,
                      self.events, normalize_time=False,
                      window=(76.402065, 80.402065)
        )

        self.assertAlmostEqual(trace.time_range, expected_duration, places=6)

    def test_overutilized_time(self):
        """
        TestTrace: overutilized_time is the total time spent while system was overutilized
        """
        events = [
            76.402065,
            80.402065,
            82.001337
        ]

        trace_end = self.trace.ftrace.basetime + self.trace.ftrace.get_duration()
        # Last event should be extended to the trace's end
        expected_time = (events[1] - events[0]) + (trace_end - events[2])

        self.assertAlmostEqual(self.trace.overutilized_time, expected_time, places=6)

    def test_plotCPUIdleStateResidency(self):
        """
        Test that plotCPUIdleStateResidency doesn't crash
        """
        in_data = """
            foo-1  [000] 0.01: cpu_idle: state=0 cpu_id=0
            foo-1  [000] 0.02: cpu_idle: state=-1 cpu_id=0
            bar-2  [000] 0.03: cpu_idle: state=0 cpu_id=1
            bar-2  [000] 0.04: cpu_idle: state=-1 cpu_id=1
            baz-3  [000] 0.05: cpu_idle: state=0 cpu_id=2
            baz-3  [000] 0.06: cpu_idle: state=-1 cpu_id=2
            bam-4  [000] 0.07: cpu_idle: state=0 cpu_id=3
            bam-4  [000] 0.08: cpu_idle: state=-1 cpu_id=3
            child-5678  [002] 18765.018235: sched_switch: prev_comm=child prev_pid=5678 prev_prio=120 prev_state=1 next_comm=father next_pid=5678 next_prio=120
        """
        trace = self.make_trace(in_data)

        trace.analysis.idle.plotCPUIdleStateResidency()

    def test_deriving_cpus_count(self):
        """Test that Trace derives cpus_count if it isn't provided"""
        if self.platform:
            del self.platform['cpus_count']

        in_data = """
            father-1234  [000] 18765.018235: sched_switch: prev_comm=father prev_pid=1234 prev_prio=120 prev_state=0 next_comm=father next_pid=5678 next_prio=120
             child-5678  [002] 18765.018235: sched_switch: prev_comm=child prev_pid=5678 prev_prio=120 prev_state=1 next_comm=father next_pid=5678 next_prio=120
        """

        trace = self.make_trace(in_data)

        self.assertEqual(trace.platform['cpus_count'], 3)

    def test_dfg_cpu_wakeups(self):
        """
        Test the cpu_wakeups DataFrame getter
        """
        trace = self.make_trace("""
          <idle>-0     [004]   519.021928: cpu_idle:             state=4294967295 cpu_id=4
          <idle>-0     [004]   519.022147: cpu_idle:             state=0 cpu_id=4
          <idle>-0     [004]   519.022641: cpu_idle:             state=4294967295 cpu_id=4
          <idle>-0     [001]   519.022642: cpu_idle:             state=4294967295 cpu_id=1
          <idle>-0     [002]   519.022643: cpu_idle:             state=4294967295 cpu_id=2
          <idle>-0     [001]   519.022788: cpu_idle:             state=0 cpu_id=1
          <idle>-0     [002]   519.022831: cpu_idle:             state=2 cpu_id=2
          <idle>-0     [003]   519.022867: cpu_idle:             state=4294967295 cpu_id=3
          <idle>-0     [003]   519.023045: cpu_idle:             state=2 cpu_id=3
          <idle>-0     [004]   519.023080: cpu_idle:             state=1 cpu_id=4
        """)

        df = trace.data_frame.cpu_wakeups()

        exp_index=[519.021928, 519.022641, 519.022642, 519.022643, 519.022867]
        exp_cpus= [         4,          4,          1,          2,          3]
        self.assertListEqual(df.index.tolist(), exp_index)
        self.assertListEqual(df.cpu.tolist(), exp_cpus)

        df = trace.data_frame.cpu_wakeups([2])

        self.assertListEqual(df.index.tolist(), [519.022643])
        self.assertListEqual(df.cpu.tolist(), [2])

    def _test_tasks_dfs(self, trace_name):
        """Helper for smoke testing _dfg methods in tasks_analysis"""
        trace = self.get_trace(trace_name)

        lt_df = trace.data_frame.task_load_events()
        columns = ['comm', 'pid', 'load_avg', 'util_avg', 'cpu']
        if trace.has_big_little:
            columns += ['cluster']
            if 'nrg_model' in trace.platform:
                columns += ['min_cluster_cap']
        for column in columns:
            msg = 'Task signals parsed from {} missing {} column'.format(
                trace.data_dir, column)
            self.assertIn(column, lt_df, msg=msg)

        if trace.has_big_little:
            df = trace.data_frame.top_big_tasks(min_samples=1)
            for column in ['samples', 'comm']:
                msg = 'Big tasks parsed from {} missing {} column'.format(
                    trace.data_dir, column)
                self.assertIn(column, df, msg=msg)

        # Pick an arbitrary PID to try plotting signals for.
        pid = lt_df['pid'].unique()[0]
        # Call plotTasks - although we won't check the results we can just check
        # that things aren't totally borken.
        trace.analysis.tasks.plotTasks(tasks=[pid])

    def test_sched_load_signals(self):
        """Test parsing sched_load_se events from EAS upstream integration"""
        self._test_tasks_dfs('sched_load')

    def test_sched_load_avg_signals(self):
        """Test parsing sched_load_avg_task events from EAS1.2"""
        self._test_tasks_dfs('sched_load_avg')
Ejemplo n.º 59
0
 def setUp(self):
     self.tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0)
     self.my_py_filename = fix_ext_py(__file__)
Ejemplo n.º 60
0
 def setUp(self):
     self.addCleanup(sys.settrace, sys.gettrace())
     self.tracer = Trace(count=0, trace=0, countfuncs=1)
     self.filemod = my_file_and_modname()
     self._saved_tracefunc = sys.gettrace()