def generate_addition(prefix, data_dir, num_examples, debug=False, maximum=10000000000, debug_every=1000): """ Generates addition data with the given string prefix (i.e. 'train', 'test') and the specified number of examples. :param prefix: String prefix for saving the file ('train', 'test') :param num_examples: Number of examples to generate. """ in_data = [] trace_data = [] for i in range(num_examples): in1 = np.random.randint(maximum - 1) in2 = np.random.randint(maximum - in1) if debug and i % debug_every == 0: trace = Trace(in1, in2, True).trace else: trace = Trace(in1, in2).trace in_data.append((in1, in2)) trace_data.append(trace) with open(os.path.join(data_dir, '{}.json'.format(prefix + "_int")), 'w') as f: json.dump(in_data, f) with open(os.path.join(data_dir, '{}.json'.format(prefix + "_trace")), 'w') as f: json.dump(trace_data, f)
def __init__(self): self.switches = {"VIDEO": Trace("seconds", "bps"), "AUDIO": Trace("seconds", "bps")} self.buffer_levels = {"VIDEO": BufferLevelMetric(), "AUDIO": BufferLevelMetric()} self.bps_history = Trace("seconds", "bps") self.bps_history.append(0, 0) self.buffer_levels["VIDEO"].append(0, 0) self.buffer_levels["AUDIO"].append(0, 0) self.switches["VIDEO"].append(0, 0) self.switches["AUDIO"].append(0, 0)
def _check_closed_tour(self, position, count): if len(self.knight.visited_positions) == (self.board.size -1) and self.closed == True: if position in self.end_positions: t = Trace(count, position, retrace=False) self.knight.set_position(position) #final position of the closed tour has been reached else: previous_position = self.knight.retrace() t = Trace(count, previous_position, retrace=True) return True
def all_off_traces(parent, samp, reps, on_off_map, option=None, multitemp=None, iterations=None, temp=None): off_vectors = [] if multitemp: for iteration in iterations: for n in reps: for off in on_off_map.values(): off_string = ("{0}/{1}_{2}_{3}_{4}_{5}.tpkl".format( parent, samp, iteration, temp, n, off)) try: off_data = parse.parse(off_string) off_data.scale(reference, qmin=QMIN, qmax=QMAX) off_scaled = Trace(off_data.q, np.empty_like(off_data.q), np.empty_like(off_data.q), off_data.scaled_sigSA, off_data.scaled_SA, off_data.Nj) off_vectors.append(off_scaled) except: print(off_string + "\tfailed") else: for n in reps: for off in on_off_map.values(): # off_string = ("{0}/{1}_{2}_{3}_off.tpkl".format(parent, samp, n, off)) if option: off_string = ("{0}/{1}_{2}_{3}_on.tpkl".format( parent, samp, n, off)) else: off_string = ("{0}/{1}_{2}_{3}.tpkl".format( parent, samp, n, off)) try: off_data = parse.parse(off_string) off_data.scale(reference, qmin=QMIN, qmax=QMAX) off_scaled = Trace(off_data.q, np.empty_like(off_data.q), np.empty_like(off_data.q), off_data.scaled_sigSA, off_data.scaled_SA, off_data.Nj) off_vectors.append(off_scaled) except: print(off_string + "\tfailed") return off_vectors
def initial_trace(self): self.reference_trace = Trace([400], self.screen) if not self.artificial: self.reference_trace.data = self.survey.data[ self. current_real_trace, :] #/(max(self.survey.data[:, 0])-min(self.survey.data[:, 0])) self.current_real_trace += 1 self.current_trace = Trace([400], self.screen, noise=True) if not self.artificial: self.current_trace.data = self.survey.data[ self.current_real_trace, :] self.current_real_trace += 1 self.current_trace.peaks = randint(0, high=self.dim, size=self.peaks)
def _setup_ui(self): self.setFrameStyle(QFrame.StyledPanel | QFrame.Raised) layout = QVBoxLayout(self) self.setLayout(layout) layout.setAlignment(Qt.AlignTop) layout.setSpacing(2) layout.setMargin(2) layout.addSpacing(20) self._alarms = Alarms(self, self._usbif) layout.addWidget(self._alarms) self._meas = Measurements(self, self._usbif) layout.addWidget(self._meas) self._core_rope_sim = CoreRopeSim(self, self._usbif) layout.addWidget(self._core_rope_sim) layout.setAlignment(self._core_rope_sim, Qt.AlignTop) self._erasable_mem_sim = ErasableMemSim(self, self._usbif) layout.addWidget(self._erasable_mem_sim) layout.setAlignment(self._erasable_mem_sim, Qt.AlignTop) self._trace = Trace(self, self._usbif) layout.addWidget(self._trace)
def _open_ssh(host, port): global _ssh_master key = '%s:%s' % (host, port) if key in _ssh_cache: return True if not _ssh_master \ or 'GIT_SSH' in os.environ \ or sys.platform in ('win32', 'cygwin'): # failed earlier, or cygwin ssh can't do this # return False command = [ 'ssh', '-o', 'ControlPath %s' % _ssh_sock(), '-p', str(port), '-M', '-N', host ] try: Trace(': %s', ' '.join(command)) p = subprocess.Popen(command) except Exception, e: _ssh_master = False print >>sys.stderr, \ '\nwarn: cannot enable ssh control master for %s:%s\n%s' \ % (host,port, str(e)) return False
def _eval_trace(self): factor, mmul = self.as_coeff_mmul() if factor != 1: from trace import Trace return factor * Trace(mmul) else: raise NotImplementedError("Can't simplify any further")
def get_called_functions(self, driver_function): """return the called functions in their called sequence Returns: list(dict) -- called functions returned in a list(sorted) comprising of a dict keyed on filename, modulename and funcname. """ # spec = importlib.util.spec_from_file_location( # self.driver_module, self.driver_path) # foo = importlib.util.module_from_spec(spec) # spec.loader.exec_module(foo) # # main_2 = foo.main_2() # tracer = Trace(countfuncs=1) # function_to_be_called = foo.__getattribute__('main_2') # print(dir(function_to_be_called)) # func_name = function_to_be_called.__name__ # tracer.run('{}()'.format(func_name)) #resolve hardcoded driver function. # results = tracer.results() # print(results.calledfuncs) spec = importlib.util.spec_from_file_location( "driver", "/Users/aviralsrivastava/Desktop/source_code_to_study/driver.py") foo = importlib.util.module_from_spec(spec) spec.loader.exec_module(foo) # foo.main_2() tracer = Trace(countfuncs=1, ) tracer.run('foo.main_2()') results = tracer.results() return results.calledfuncs
def trace(trace=True, exclude=None, **kwargs): ''' Decorator to trace line execution. Usage:: @trace() def method(...): ... When ``method()`` is called, every line of execution is traced. ''' if exclude is None: ignoredirs = (sys.prefix, ) elif isinstance(exclude, six.string_types): ignoredirs = (sys.prefix, os.path.abspath(exclude)) elif isinstance(exclude, (list, tuple)): ignoredirs = [sys.prefix] + [os.path.abspath(path) for path in exclude] tracer = Trace(trace=trace, ignoredirs=ignoredirs, **kwargs) def decorator(func): @functools.wraps(func) def wrapper(*args, **kwargs): return tracer.runfunc(func, *args, **kwargs) return wrapper return decorator
def generate_sequential_function_calls(self): """generate sequential function calls for tracing source code and plotting sequence diagram. """ # generating sequence diagram for a use-case use_case, driver_path, driver_name, driver_function = self.get_driver_path_and_driver_name( ) generate_sequence_diagram = GenerateSequenceDiagram( driver_path, driver_name, self.source_code_path[0]) spec = importlib.util.spec_from_file_location(driver_name, driver_path) global foo foo = importlib.util.module_from_spec(spec) spec.loader.exec_module(foo) tracer = Trace(countfuncs=1, countcallers=1, timing=1) tracer.run('foo.{}()'.format(driver_function)) results = tracer.results() caller_functions = results.callers function_sequence = [] # consists of all functions called in sequence for caller, callee in caller_functions: _, caller_module, caller_function = caller _, _, callee_function = callee if caller_module not in self.source_code_modules: continue function_sequence.append([caller_function, callee_function]) for sequence in function_sequence: print(sequence) self.df = self.write_in_excel.integrate_sequence_diagram_in_df( self.df, function_sequence, use_case) self.write_in_excel.write_df_to_excel(self.df, 'sheet_one', self.skip_cols, self.classes_covered, use_case)
def get(self): trace = Trace(1, 100, 'test', 'monitoring-manager', 'orchestrator-m') data_string = jsonpickle.encode(trace) self.write(data_string) traceSelectSql = "select * from tb_e2e_trace_list" records = self.dbengine.execute(traceSelectSql) self.print_result(records)
def get_trace(self, trace_name): """ Get a trace from a separate provided trace file """ dir = os.path.join(self.traces_dir, trace_name) trace_path = os.path.join(dir, 'trace.dat') return Trace(trace_path, self.events, self._get_platform(trace_name))
def _eval_trace(self): factor = Mul(*[arg for arg in self.args if not arg.is_Matrix]) matrix = MatMul(*[arg for arg in self.args if arg.is_Matrix]) if factor != 1: from trace import Trace return factor * Trace(matrix) else: raise NotImplementedError("Can't simplify any further")
def static_traces(parent, samp, reps, temps, series, option=None): """ Average togther repetitions of static temperature data series. First, curves are scaled to a user-chose reference curve. Then, an iterative chi filter is used to remove outliers from the list. Finally, an average is calcuated. Parameters: parent (pathlib object): the directory where files are stored samp (str): the name of the sample, as recorded in the filename reps (list of strings): values denoting repetition number temps (list of strings): values denoting temperature during data collection series (list of strings): values denoting sample dilution factor option (T/F): filename idiosynchroncy Returns: static_vectors (list of Trace objects) """ static_vectors = {i: {j: [] for j in series} for i in temps} for temp in temps: for dilution in series: static = [] for n in reps: static_string = ("{0}/{1}_off{2}T{3}_{4}.tpkl".format( parent, samp, dilution, temp, n)) # print(static_string) try: static_data = parse.parse(static_string) # print("test1") # static_data.scale(reference, qmin=QMIN, qmax=QMAX) static_data.scale(reference, qmin=QMIN, qmax=QMAX, approach='algebraic') # print("test2") static_scaled = Trace(static_data.q, np.empty_like(static_data.q), np.empty_like(static_data.q), static_data.scaled_sigSA, static_data.scaled_SA, static_data.Nj) static.append(static_scaled) except: # print(buff_string+"\tfailed") print("{} failed to parse or scale".format(static_string)) pass try: static_filtered = iterative_chi_filter(static) static_filt_avg = average_traces(static_filtered) static_vectors[temp][dilution].append(static_filt_avg) except: print("temp {}C failed for {}".format(temp, dilution)) return static_vectors
def __init__(self, cmdv, capture_stdout=False, ignore_stdout=False, capture_stderr=False, cwd=None): env = dict(os.environ) for e in [ REPO_TRACE, GIT_DIR, 'GIT_ALTERNATE_OBJECT_DIRECTORIES', 'GIT_OBJECT_DIRECTORY', 'GIT_WORK_TREE', 'GIT_GRAFT_FILE', 'GIT_INDEX_FILE' ]: if e in env: del env[e] command = [GIT] command.extend(cmdv) stdout = capture_stdout and subprocess.PIPE or None if ignore_stdout: stdout = file("/dev/null", "w") stderr = capture_stderr and subprocess.PIPE or None if IsTrace(): global LAST_CWD global LAST_GITDIR dbg = '' if cwd and LAST_CWD != cwd: if LAST_GITDIR or LAST_CWD: dbg += '\n' dbg += ': cd %s\n' % cwd LAST_CWD = cwd if GIT_DIR in env and LAST_GITDIR != env[GIT_DIR]: if LAST_GITDIR or LAST_CWD: dbg += '\n' dbg += ': export GIT_DIR=%s\n' % env[GIT_DIR] LAST_GITDIR = env[GIT_DIR] dbg += ': ' dbg += ' '.join(command) if stdout == subprocess.PIPE: dbg += ' 1>|' if stderr == subprocess.PIPE: dbg += ' 2>|' Trace('%s', dbg) try: p = subprocess.Popen(command, cwd=cwd, env=env, stdout=stdout, stderr=stderr) except Exception, e: raise Exception('%s: %s' % (command[1], e))
def create_trace(self, event): service = event["trace_name"] trace_id = event["trace_id"] span_id = event["span_id"] parent_span = event["parent_span_id"] if parent_span == 0: parent_span = None trace = Trace(service, trace_id, span_id, parent_span) return trace
def build_router_range(self, mbps, delay, num_routers, range_factor=10): """return a dict where values are routers with throughput 'mpbs' and delay 'delay' each, and queue sizes distributed logarithmically from bdp up to ranger_factor x bdp keys are the respectively used queue sizes""" bdp_bits = mbps * delay * 1000.0 * 2 bdp_bytes = bdp_bits / 8 step_size = 1.0 / (num_routers - 1) routers = {} current_queue_size = int(bdp_bytes) for i in range(num_routers): r = Router(delay=delay, up_trace=Trace(mbps=mbps), up_queue_type='droptail', up_queue_args='bytes=%d' % int(current_queue_size), down_trace=Trace(mbps=mbps)) routers[int(current_queue_size)] = r current_queue_size *= math.pow(range_factor, step_size) return routers
def runExperiments(cls): """ Set up logging and trigger running experiments """ cls._log = logging.getLogger('LisaTest') cls._log.info('Setup tests execution engine...') te = TestEnv(test_conf=cls._getTestConf()) experiments_conf = cls._getExperimentsConf(te) test_dir = os.path.join(te.res_dir, experiments_conf['confs'][0]['tag']) os.makedirs(test_dir) # Setting cpufreq governor to performance te.target.cpufreq.set_all_governors('performance') # Creating cgroups hierarchy cpuset_cnt = te.target.cgroups.controller('cpuset') cpu_cnt = te.target.cgroups.controller('cpu') max_duration = 0 for se in cls.root_group.iter_nodes(): if se.is_task: max_duration = max(max_duration, se.duration_s) # Freeze userspace tasks cls._log.info('Freezing userspace tasks') te.target.cgroups.freeze(Executor.critical_tasks[te.target.os]) cls._log.info('FTrace events collection enabled') te.ftrace.start() # Run tasks cls._log.info('Running the tasks') # Run all tasks in background and wait for completion for se in cls.root_group.iter_nodes(): if se.is_task: # Run tasks se.wload.run(out_dir=test_dir, cpus=se.cpus, cgroup=se.parent.name, background=True) sleep(max_duration) te.ftrace.stop() trace_file = os.path.join(test_dir, 'trace.dat') te.ftrace.get_trace(trace_file) cls._log.info('Collected FTrace binary trace: %s', trace_file) # Un-freeze userspace tasks cls._log.info('Un-freezing userspace tasks') te.target.cgroups.freeze(thaw=True) # Extract trace cls.trace = Trace(None, test_dir, te.ftrace.events)
def get_windows_hardlink(file_path): if os.path.isdir(file_path): return None cmd = ['cmd', '/c', 'fsutil', 'hardlink', 'list', file_path] try: Trace(' '.join(cmd)) out = subprocess.check_output(cmd, stderr=subprocess.STDOUT) except: return None lines = [s.strip() for s in out.split('\n')] if len(lines) >= 2 and len(lines[1]) > 0: hardlink = file_path[0:2] + lines[0] Trace("Hard link found: %s -> %s", file_path, hardlink) return hardlink else: return None
def test_linear_methods(self): for methname in ['inst_method_linear']: tracer = Trace(count=1, trace=0, countfuncs=0, countcallers=0) traced_obj = TracedClass(25) method = getattr(traced_obj, methname) tracer.runfunc(method, 20) firstlineno = get_firstlineno(method) expected = {(self.my_py_filename, firstlineno + 1): 1} self.assertEqual(tracer.results().counts, expected)
def __init__(self, *args, **kwargs): super(TestTrace, self).__init__(*args, **kwargs) self.test_trace = os.path.join(self.traces_dir, 'test_trace.txt') self.platform = self._get_platform() self.trace_path = os.path.join(self.traces_dir, 'trace.txt') self.trace = Trace(self.trace_path, self.events, self.platform)
def run(self): self.knight = Knight(self.start_position, self.verbosity.verbose_int) self.knight.add_to_board(self.board) if self.closed == True: self.end_positions = self.knight.get_possible_moves() count = 0 duration = 0 largest_tour = 0 start = time.time() complete = False while len(self.knight.visited_positions) < self.board.size and self._check_limit(count, duration): #garner stats largest_tour = self.verbosity.min_max(self, largest_tour) self.verbosity.potential_OBOB(self) self.verbosity.progress(count) if len(self.knight.visited_positions) < 4: largest_tour = len(self.knight.visited_positions) if self.time_limit != None and count%1000 == 0: duration = time.time()-start #find the next move possible_positions = self.knight.get_possible_moves() self.verbosity.possible_moves(self.knight.get_current_position(), possible_positions) if len(possible_positions) == 0: previous_position = self.knight.retrace() t = Trace(count, previous_position, retrace=True) count += 1 continue initial_moves = [] for position in possible_positions: #the position already has a weight when it's created if self._check_closed_tour(position, count) == True: #either the tour is complete, or the knight retraced and we return to the while loop complete = True break move = Move(position, self.knight.get_visited_positions()[:]) initial_moves.append(move) if len(initial_moves) != 0 and complete != True: best_move = Move.choose_best_move(initial_moves, self.end_positions) if not self.knight.set_position(best_move.get_position()): raise MoveError(best_move.get_position()) t = Trace(count, best_move.get_position(), retrace=False) count += 1 end_time = round(time.time() - start,3) return self.knight, count, self.board, end_time
def __init__(self): self.bps_history = Trace("time", "bps") self.ma4_history = Trace("time", "bps ma4") self.ma10_history = Trace("time", "bps ma10") self.ma50_history = Trace("time", "bps ma50") self.buffer_history = Trace("sec", "sec") self.buffer_ma4_history = Trace("sec", "sec") self.buffer_ma10_history = Trace("sec", "sec")
def __init__(self, *args, **kwargs): super(TestTrace, self).__init__(*args, **kwargs) self.test_trace = os.path.join(self.traces_dir, 'test_trace.txt') with open(os.path.join(self.traces_dir, 'platform.json')) as f: self.platform = json.load(f) trace_path = os.path.join(self.traces_dir, 'trace.txt') self.trace = Trace(self.platform, trace_path, self.events)
def _NeedUpdate(self): Trace(': scan refs %s', self._gitdir) for name, mtime in self._mtime.items(): try: if mtime != os.path.getmtime(os.path.join(self._gitdir, name)): return True except OSError: return True return False
async def beginStream(STREAM, PORT, URL): # Initialize the Trace trace = Trace(id='User') # Connect Websocket + EEG headset through Brainflow if STREAM == 'SYNTHETIC': await trace.capture(stream=STREAM, url=URL) elif STREAM == 'OPENBCI': await trace.capture(stream=STREAM, url=URL, port=PORT)
def ReviewUrl(self, userEmail): if self._review_url is None: if self.review is None: return None u = self.review if u.startswith('persistent-'): u = u[len('persistent-'):] if u.split(':')[0] not in ('http', 'https', 'sso', 'ssh'): u = 'http://%s' % u if u.endswith('/Gerrit'): u = u[:len(u) - len('/Gerrit')] if u.endswith('/ssh_info'): u = u[:len(u) - len('/ssh_info')] if not u.endswith('/'): u += '/' http_url = u if u in REVIEW_CACHE: self._review_url = REVIEW_CACHE[u] elif 'REPO_HOST_PORT_INFO' in os.environ: host, port = os.environ['REPO_HOST_PORT_INFO'].split() self._review_url = self._SshReviewUrl(userEmail, host, port) REVIEW_CACHE[u] = self._review_url elif u.startswith('sso:') or u.startswith('ssh:'): self._review_url = u # Assume it's right REVIEW_CACHE[u] = self._review_url else: try: info_url = u + 'ssh_info' from trace import Trace Trace("urlopen %s" % info_url) try: info = urllib.request.urlopen(info_url).read() except Exception: info = 'NOT_AVAILABLE' if info == 'NOT_AVAILABLE' or '<' in info: # If `info` contains '<', we assume the server gave us some sort # of HTML response back, like maybe a login page. # # Assume HTTP if SSH is not enabled or ssh_info doesn't look right. self._review_url = http_url else: host, port = info.split() self._review_url = self._SshReviewUrl(userEmail, host, port) except urllib.error.HTTPError as e: raise UploadError('%s: %s' % (self.review, str(e))) except urllib.error.URLError as e: raise UploadError('%s: %s' % (self.review, str(e))) except HTTPException as e: raise UploadError('%s: %s' % (self.review, e.__class__.__name__)) REVIEW_CACHE[u] = self._review_url return self._review_url + self.projectname
def make_trace(self, in_data): """ Get a trace from an embedded string of textual trace data """ with open(self.test_trace, "w") as fout: fout.write(in_data) return Trace(self.test_trace, self.events, self.platform, normalize_time=False)
def average_traces(traces): one_curve = traces[0] mean_SA = np.mean([trace.SA for trace in traces], axis=0) std_err = np.std([trace.SA for trace in traces], axis=0) prop_err = np.sqrt(np.sum([trace.sigSA**2 for trace in traces], axis=0)) / (len(traces) - 1) tot_err = np.sqrt(std_err**2 + prop_err**2) averaged_vector = Trace(one_curve.q, np.empty_like(one_curve.q), np.empty_like(one_curve.q), tot_err, mean_SA, np.empty_like(one_curve.q)) return averaged_vector