def __init__(self, type, title, units, samp_rate, minval, maxval, factor, decimal_places, ref_level, number_rate, average, avg_alpha, label, peak_hold, show_gauge): # Call init for Qt QtGui.QWidget.__init__(self) # determine actual type if type == complex: sig = numpy.complex64 else: sig = numpy.float32 # Call init for sync block gr.sync_block.__init__(self, name="numbersink", in_sig=[sig], out_sig=None) # create class members for all parameters self.type = type self.title = title self.units = units self.samp_rate = samp_rate self.factor = factor self.decimal_places = decimal_places self.ref_level = ref_level self.number_rate = number_rate # members which are set later self.average = False self.avg_alpha = 0.0 self.peak_hold = False self.minval = 0 self.maxval = 100 self.peak_hold = False self.show_gauge = False # members calculated for later use self.update_res = gr.high_res_timer_tps() * (1.0 / self.number_rate) self.last_update = 0 self.update_samps = int(self.samp_rate) self.hold = False #self.nsamps_read = 0 self.avgval = 0 #self.label = label self.peakval = 0 self.bar_expand = 1 # Create QtWidget. Hold it as a class member to make things more clear. self.ui = Ui_NumberSinkWidget() self.ui.setupUi(self) # These functions rely on an existing UI self.setAverage(average) self.setAvgAlpha(avg_alpha) self.setPeakHold(peak_hold) self.setBarRange(minval, maxval) self.setShowGauge(show_gauge) self.setWidgetTitle(title)
def main(top_block_cls=pfb_test, options=None): if StrictVersion("4.5.0") <= StrictVersion( Qt.qVersion()) < StrictVersion("5.0.0"): style = gr.prefs().get_string('qtgui', 'style', 'raster') Qt.QApplication.setGraphicsSystem(style) qapp = Qt.QApplication(sys.argv) tb = top_block_cls() tb.start() tb.show() tps = scipy.float64(gr.high_res_timer_tps()) print(tb.pfb_channelizer_ccf_0.pfb.pc_work_time_total() / tps) def quitting(): tb.stop() tb.wait() qapp.aboutToQuit.connect(quitting) qapp.exec_()
def set_number_rate(self, rate): self.number_rate = rate self.update_res = gr.high_res_timer_tps() * (1.0 / rate)
def main(): parser = add_argparser() args = parser.parse_args() verbose = args.verbose kwargs = {} ic = None if enable_ctrlport: ic = Ice.initialize(sys.argv) if(args.file is None and args.iscripts is None): print "Please specify either a config file or a list of scripts to run.\n" parser.print_help() sys.exit(1) if args.file is None: nscripts = len(args.iscripts) if(args.nitems is None or args.iters is None): print "Please specify a number of items and number of iterations for each benchmark script provided.\n" parser.print_help() sys.exit() if(len(args.nitems) != nscripts or len(args.iters) != nscripts): print "Please specify a number of items and number of iterations for each benchmark script provided.\n" parser.print_help() sys.exit() tests = [] for n in xrange(nscripts): newtest = dict() newtest['module'] = args.iscripts[n] newtest['testname'] = args.iscripts[n] newtest['nitems'] = args.nitems[n] newtest['iters'] = args.iters[n] tests.append(newtest) else: f = json.load(open(args.file, 'rb')) if args.directory: script_dir = args.directory else: try: script_dir = f['directory'] except KeyError: script_dir = "bm_scripts" tests = f['tests'] find_tests = re.compile('run*') tpms = gr.high_res_timer_tps()/1000.0 # ticks per millisecond procinfo = gr_profiler.cpuinfo() procver = gr_profiler.kversion() sysinfo = procinfo + procver #print sysinfo results = {} for ntest, t in enumerate(tests): test_name = t['module'] + "." + t['testname'] qa = __import__(script_dir + '.' + t['module'], globals(), locals(), t['testname']) iters = t['iters'] nitems = t['nitems'] if(t.has_key('kwargs')): kwargs = t['kwargs'] fresults = {} # Turn off a test by setting iters = 0 if iters == 0: continue # Get the name of the test class in the module test_suite = getattr(qa, t['testname']) # Search for all tests in the test class test_funcs = [] for f in dir(test_suite): testf = find_tests.match(f) if testf: test_funcs.append(testf.string) obj = test_suite(nitems, **kwargs) # Run each test case iters number of iterations for f in test_funcs: print "\nRUNNING FUNCTION: {0}.{1}".format(str(test_name), str(f)) _program_time = numpy.array(iters*[0,]) _all_blocks_time = numpy.array(iters*[0,]) _nblocks = 0 # Run function setup if hasattr(obj, f.replace("run_", "setup_")): _x = getattr(obj, f.replace("run_", "setup_"))() for i in xrange(iters): _start_time = gr.high_res_timer_now() _x = getattr(obj, f) _x() _end_time = gr.high_res_timer_now() _program_time[i] = _end_time - _start_time times = {} if enable_ctrlport: times = get_block_times(ic, obj.tb._tb.alias()) if _nblocks == 0: n = len(times.keys()) _blocks_times = dict() for bt in times: _blocks_times[bt] = numpy.array(iters*[0,]) _nblocks = len(times.keys()) for bt in times: _all_blocks_time[i] += times[bt] _blocks_times[bt][i] = times[bt] pt_min = _program_time.min()/tpms pt_avg = _program_time.mean()/tpms pt_var = (_program_time/tpms).var() bt_min = _all_blocks_time.min()/tpms bt_avg = _all_blocks_time.mean()/tpms bt_var = (_all_blocks_time/tpms).var() bt_blks_min = dict() bt_blks_avg = dict() bt_blks_var = dict() for bt in _blocks_times: bt_blks_min[bt] = _blocks_times[bt].min()/tpms bt_blks_avg[bt] = _blocks_times[bt].mean()/tpms bt_blks_var[bt] = (_blocks_times[bt]/tpms).var() if(verbose): print "Num. Blocks: {0}".format(_nblocks) print "Program Time: {0:.2f} ms".format(pt_avg) print " std dev: {0:.2e} ms".format(numpy.sqrt(pt_var)) print "Block Time: {0:.2f} ms".format(bt_avg) print " std dev: {0:.2e} ms".format(numpy.sqrt(bt_var)) print "Ratio: {0:.2f}".format(bt_avg/pt_avg) fresults[f] = (pt_min, pt_avg, pt_var, bt_min, bt_avg, bt_var, _nblocks, bt_blks_min, bt_blks_avg, bt_blks_var) results[t['testname']] = fresults #print "" #print results #print "" test_suite = getattr(qa, t['testname']) pickle.dump([sysinfo, tests, results], open(args.ofile, 'wb'))