示例#1
0
def run_mini_ds2_benchmark(args, **kwargs):
    device_id = kwargs.get('device_id')

    inputs, train_set, eval_set = generate_ds2_data(args.max_length,
                                                    args.str_w, args.nout,
                                                    args.nbands,
                                                    args.batch_size,
                                                    args.num_iterations)

    model_out = get_mini_ds2(inputs, args.nfilters, args.filter_width,
                             args.str_w, args.nbands, args.depth,
                             args.hidden_size, args.batch_norm,
                             args.hetr_device, device_id)

    if args.bprop:
        with ng.metadata(device=args.hetr_device,
                         device_id=device_id,
                         parallel=ax.N):
            loss = ng.ctc(model_out, ng.flatten(inputs["char_map"]),
                          inputs["audio_length"], inputs["trans_length"])

            optimizer = GradientDescentMomentum(learning_rate=2e-5,
                                                momentum_coef=0.99,
                                                gradient_clip_norm=400,
                                                nesterov=args.nesterov)

            updates = optimizer(loss)
            mean_cost = ng.sequential([updates, ng.mean(loss, out_axes=())])
            bprop_computation_op = ng.computation(mean_cost, "all")

        benchmark = Benchmark(bprop_computation_op, train_set, inputs,
                              args.backend, args.hetr_device)
        Benchmark.print_benchmark_results(
            benchmark.time(args.num_iterations,
                           args.skip_iter,
                           'ds2_bprop',
                           args.visualize,
                           preprocess=True))
    else:
        fprop_computation_op = ng.computation(model_out, "all")

        benchmark_fprop = Benchmark(fprop_computation_op, train_set, inputs,
                                    args.backend, args.hetr_device)
        Benchmark.print_benchmark_results(
            benchmark_fprop.time(args.num_iterations,
                                 args.skip_iter,
                                 'ds2_fprop',
                                 args.visualize,
                                 preprocess=True))
示例#2
0
    def __init__(self, m=50, p=40, d=20, lb=-100, ub=100, f=7):
        self.G = 10
        self.rpercent = 0.2
        self.hpercent = 0.6
        self.mpercent = 0.1
        self.m = m
        self.pop = p
        self.dim = d
        self.rNum = int(p * self.rpercent)
        self.hNum = int(p * self.hpercent)
        self.cNum = self.pop - self.rNum - self.hNum
        self.mNum = int(p * self.mpercent)
        self.lowb = lb
        self.upb = ub
        self.fit = np.zeros(self.pop)
        self.pfit = np.zeros(self.pop)
        self.gbest = np.zeros(self.dim)
        self.f = f
        self.gfit_list = np.zeros(0)
        xi = np.zeros(self.dim)
        # 初始化鸡的位置
        self.x = pd.DataFrame(-1 +
                              2 * np.random.rand(self.pop, self.dim)) * 100
        self.px = self.x

        self.test = Benchmark(self.dim, self.f)
        for i in np.arange(self.pop):  # 初始化鸡的适应度
            xi = self.x.iloc[i]
            self.fit[i] = self.test.Fitness(xi)

        self.pfit = pd.DataFrame(self.fit)  # 初始化更新前适应度
        self.gfit = self.pfit.min()  # 初始化全局最优适应度
        self.ind = self.pfit.idxmin()
        self.gbest = self.x.iloc[self.ind]  # 初始化全局最优位置
    def setUp(self):
        self.status = []
        self.called_ReadCache = None
        self.log_error = []
        self.log_output = []
        self.err_msg = None
        self.test_benchmark = Benchmark(
            'page_cycler.netsim.top_10',  # name
            'page_cycler.netsim.top_10',  # test_name
            '',  # test_args
            1,  # iterations
            False,  # rm_chroot_tmp
            '',  # perf_args
            suite='telemetry_Crosperf')  # suite

        self.test_label = MockLabel('test1',
                                    'image1',
                                    'autotest_dir',
                                    '/tmp/test_benchmark_run',
                                    'x86-alex',
                                    'chromeos2-row1-rack4-host9.cros',
                                    image_args='',
                                    cache_dir='',
                                    cache_only=False,
                                    log_level='average',
                                    compiler='gcc')

        self.test_cache_conditions = [
            CacheConditions.CACHE_FILE_EXISTS, CacheConditions.CHECKSUMS_MATCH
        ]

        self.mock_logger = logger.GetLogger(log_dir='', mock=True)

        self.mock_machine_manager = mock.Mock(spec=MachineManager)
示例#4
0
def main():
    # XXX Deduce benchmark scope from commandline arguments
    # Topology names
    if len(sys.argv) == 2:
        names = sys.argv[1:]
    else:
        names = get_names('rocketfuel-*-r0.cch')
        #names.extend(get_names('rocketfuelbb-*-r0.cch'))

    topologies = [Topology(name) for name in names]
    patterns = [p() for p in MobilityPattern.factory_list().values()]

    tasks = list()
    for topology in topologies:
        for pattern in patterns:
            tasks.extend(Benchmark(topology, pattern).get_tasks())

    if PARALLEL:
        pool = multiprocessing.Pool(processes=NUM_PROCESSES)
        pool.map(Task.virtual_run, tasks)
        pool.close()
        pool.join()
    else:
        for task in tasks:
            task.run()
示例#5
0
def main():
    # Create Problem
    polynomial = Polynomial(num_terms=5, num_dimensions=2)
    log.info(polynomial)

    # Define Search Range
    search_range = SearchRange()
    search_range.set_feasible_range(10000, 2)

    # Solvers
    # brute_force_solver = BruteForceSolver(search_range)
    genetic_algorithm_solver = GeneticAlgorithmSolver(search_range)

    gui = Gui(polynomial, search_range)
    # benchmark = Benchmark([brute_force_solver, genetic_algorithm_solver])
    benchmark = Benchmark([genetic_algorithm_solver])

    # Benchmark
    benchmark.evaluate(polynomial)

    # Visualize Solvers
    # gui.create_animation(brute_force_solver)
    animation = gui.create_animation(genetic_algorithm_solver)

    # Create Default Visual
    # gui.plot_problem()
    gui.show()
示例#6
0
def execute_all(method, template, show_images):
    """ Start a benchmark for the given method for every avaiable file. """
    benchmark = Benchmark(method(), template)
    benchmark.execute_all(show_images)
    print "Recall: " + str(benchmark.recall())
    print "Precision: " + str(benchmark.precision())
    print "F-Measure: " + str(benchmark.fmeasure())
    def predict_hinges(self, k_inv):
        """
        Predicts the hinge residues.
        :param  k_inv: The inverted matrix produced by the GNM analysis which is interpreted as a correlation matrix.
        :return:    The predicted hinge residues
        """
        (n, m) = k_inv.shape

        confidence_levels = [0] * m

        before_cca = time.time()
        for i in range(1, m - 1):

            forward = min(m - i - 1, self.local_sensitivity)
            backward = min(i, self.local_sensitivity)

            cross = k_inv[i + 1:i + forward + 1, i - backward:i]

            cross_avg = np.average(cross)

            confidence_levels[i] = math.exp(-cross_avg)
        after_cca = time.time()
        Benchmark().update(m, 'CCA confidence', after_cca - before_cca)
        return predict_hinges(confidence_levels, self.local_sensitivity, 90,
                              95, 0.3)
示例#8
0
def initialize_benchmark(initialize_benchmark_variables):
    pytest.benchmark_results = pull_data(pytest.benchmarkString,
                                         pytest.start_date, pytest.end_date)
    pytest.benchmark_return_dict = calculate_returns(
        pytest.benchmark_results['stock_dict'])
    pytest.testBenchmark = Benchmark(pytest.benchmark_return_dict,
                                     pytest.benchmarkString)
示例#9
0
    def run_test_setup(self, test):
        super(Browsertime, self).run_test_setup(test)

        if test.get("type") == "benchmark":
            # benchmark-type tests require the benchmark test to be served out
            self.benchmark = Benchmark(self.config, test)
            test["test_url"] = test["test_url"].replace(
                "<host>", self.benchmark.host)
            test["test_url"] = test["test_url"].replace(
                "<port>", self.benchmark.port)

        if test.get("playback") is not None and self.playback is None:
            self.start_playback(test)

        # TODO: geckodriver/chromedriver from tasks.
        self.driver_paths = []
        if self.browsertime_geckodriver:
            self.driver_paths.extend(
                ["--firefox.geckodriverPath", self.browsertime_geckodriver])
        if self.browsertime_chromedriver:
            if (not self.config.get("run_local", None)
                    or "{}" in self.browsertime_chromedriver):
                if self.browser_version:
                    bvers = str(self.browser_version)
                    chromedriver_version = bvers.split(".")[0]
                else:
                    chromedriver_version = DEFAULT_CHROMEVERSION

                self.browsertime_chromedriver = self.browsertime_chromedriver.format(
                    chromedriver_version)

            self.driver_paths.extend(
                ["--chrome.chromedriverPath", self.browsertime_chromedriver])

        LOG.info("test: {}".format(test))
示例#10
0
def run_mini_ds2_benchmark(max_length,
                           nbands,
                           str_w,
                           batch_size,
                           max_iter,
                           skip_iter,
                           nfilters,
                           filter_width,
                           depth,
                           hidden_size,
                           batch_norm,
                           device_id,
                           device,
                           transformer,
                           visualize=False):
    inputs, train_set, eval_set = generate_ds2_data(max_length, str_w, nbands,
                                                    batch_size, max_iter)
    model_out = get_mini_ds2(inputs, nfilters, filter_width, str_w, nbands,
                             depth, hidden_size, batch_norm, device_id)

    fprop_computation_op = ng.computation(model_out, "all")

    benchmark_fprop = Benchmark(fprop_computation_op, train_set, inputs,
                                transformer, device)
    Benchmark.print_benchmark_results(
        benchmark_fprop.time(max_iter, skip_iter, 'ds2_fprop', visualize))
示例#11
0
    def predict_hinges(self, k_inv):
        """
        Predicts the hinge residues.
        :param  k_inv: The inverted matrix produced by the GNM analysis which is interpreted as a correlation matrix.
        :return:    The predicted hinge residues
        """
        (m, n) = k_inv.shape

        confidence_levels = [0] * m

        before_cvd = time.time()
        for i in range(m):
            forward = min(m - i, self.local_sensitivity)
            backward = min(i, self.local_sensitivity - 1)

            distances = spt.distance.squareform(
                spt.distance.pdist(k_inv[i - backward:i + forward, :],
                                   'sqeuclidean'))

            confidence_levels[i] = np.sum(distances[:backward, backward + 1:])

        after_cvd = time.time()
        Benchmark().update(m, 'CVD confidence', after_cvd - before_cvd)
        return predict_hinges(confidence_levels, self.local_sensitivity, 90,
                              95, 0.2)
示例#12
0
    def _run_pytorch(self, config: BenchmarkConfig) -> Benchmark:
        """
        :return:
        """
        LOGGER.info("Running PyTorch Eager benchmark")
        benchmark = Benchmark()

        dummy_inputs = self._get_dummy_inputs(
            batch_size=config.batch_size,
            seq_len=(config.sequence_length - self.tokenizer.num_special_tokens_to_add(pair=False))
        )

        inputs = self.tokenizer(
            dummy_inputs,
            is_split_into_words=True,
            return_tensors=TensorType.PYTORCH,
        )

        inputs = inputs.to(config.device)
        self.model = self.model.to(config.device)

        # Warmup
        for _ in trange(config.warmup_runs, desc="Warming up"):
            self.model(**inputs)

        # Run benchmark
        benchmark_duration_ns = config.benchmark_duration * SEC_TO_NS_SCALE
        while sum(benchmark.latencies) < benchmark_duration_ns:
            with benchmark.track():
                self.model(**inputs)

        benchmark.finalize(benchmark_duration_ns)

        return benchmark
示例#13
0
def main():


    args = parse_args()

    data_dir = args.data_dir
    plot_dir = data_dir+"/plots/benchmarks/"

    if not os.path.exists(plot_dir):
        os.makedirs(plot_dir)


    # create benchmark object
    b = Benchmark(data_dir+"/pdb/")


    #specify methods to benchmark
    b.add_method("pseudo-likelihood APC", data_dir +"/predictions_pll/", "apc.mat")
    b.add_method("pseudo-likelihood raw", data_dir +"/predictions_pll/", "raw.mat")
    b.add_method("persistent contrastive divergence APC", data_dir +"/predictions_pcd/", "apc.mat")
    b.add_method("persistent contrastive divergence raw", data_dir +"/predictions_pcd/", "raw.mat")

    #add constraint that all MRF optimizations have exist status 0
    b.add_constraint("opt_code", 0, "greater_equal")

    #compute the precision of predictions
    b.compute_evaluation_statistics(seqsep=6, contact_thr=8, noncontact_thr=8)

    #generate a benchmark plot
    plot = b.plot_precision_vs_rank()

    #format that benchmark plot to resemble the one in Fig 1C
    plot_pll_vs_pcd_benchmark_figure(plot, plot_dir, height=500, width=1000)
示例#14
0
    def predict_hinges(self, k_inv):
        """
        Predicts the hinge residues.
        :param  k_inv: The inverted matrix produced by the GNM analysis which is interpreted as a correlation matrix.
        :return:    The predicted hinge residues
        """
        (m, n) = k_inv.shape
        confidence_levels = [0] * m

        before_nca = time.time()
        for i in range(1, m - 1):
            forward = min(m - i - 1, self.local_sensitivity)
            backward = min(i, self.local_sensitivity)

            cross = k_inv[i + 1:i + forward + 1, i - backward:i]
            left_up = k_inv[i - backward:i, i - backward:i]
            right_bottom = k_inv[i + 1:i + forward + 1, i + 1:i + forward + 1]

            inter_avg = np.average(
                np.append(upper_triangular_no_diagonal(left_up),
                          upper_triangular_no_diagonal(right_bottom)))

            cross_avg = np.average(cross)

            confidence_levels[i] = (
                1 - self.alpha) * inter_avg - self.alpha * cross_avg

        after_nca = time.time()
        Benchmark().update(m, 'NCA confidence', after_nca - before_nca)

        return predict_hinges(confidence_levels, self.local_sensitivity, 80, 0,
                              0.7)
示例#15
0
def main():
    """
    Main function.

    """
    warnings.filterwarnings("ignore")
    # get data from csv file, and catch error reading file
    try:
        df = pd.read_csv('data/dataset_mood_smartphone.csv', sep=",")
    except OSError as e:
        print("ERROR: cannot open or read input file")

    # get command line options
    args = get_args()

    df = util.init_data(df)

    # initial model
    if args.prediction_models == "ml":
        model = ML()
    elif args.prediction_models == "temporal_algorithm":
        model = Temporal()
    elif args.prediction_models == "benchmark":
        model = Benchmark()
    else:
        sys.exit("BUG! this should not happen.")

    # call pipline
    predictions, evaluation_scores = model.pipeline(df)

    # print output
    util.output_to_file(predictions, args.pred_file)
    # util.output_to_file(evaluation_scores, args.eval_file)
    util.output_to_screen(evaluation_scores)
  def test_benchmark(self):
    # Test creating a benchmark with all the fields filled out.
    b1 = Benchmark(
        'b1_test',  # name
        'octane',  # test_name
        '',  # test_args
        3,  # iterations
        False,  # rm_chroot_tmp
        'record -e cycles',  # perf_args
        'telemetry_Crosperf',  # suite
        True)  # show_all_results
    self.assertTrue(b1.suite, 'telemetry_Crosperf')

    # Test creating a benchmark field with default fields left out.
    b2 = Benchmark(
        'b2_test',  # name
        'octane',  # test_name
        '',  # test_args
        3,  # iterations
        False,  # rm_chroot_tmp
        'record -e cycles')  # perf_args
    self.assertEqual(b2.suite, '')
    self.assertFalse(b2.show_all_results)

    # Test explicitly creating 'suite=Telemetry' and 'show_all_results=False"
    # and see what happens.
    b3 = Benchmark(
        'b3_test',  # name
        'octane',  # test_name
        '',  # test_args
        3,  # iterations
        False,  # rm_chroot_tmp
        'record -e cycles',  # perf_args
        'telemetry',  # suite
        False)  # show_all_results
    self.assertTrue(b3.show_all_results)

    # Check to see if the args to Benchmark have changed since the last time
    # this test was updated.
    args_list = [
        'self', 'name', 'test_name', 'test_args', 'iterations', 'rm_chroot_tmp',
        'perf_args', 'suite', 'show_all_results', 'retries', 'run_local'
    ]
    arg_spec = inspect.getargspec(Benchmark.__init__)
    self.assertEqual(len(arg_spec.args), len(args_list))
    for arg in args_list:
      self.assertIn(arg, arg_spec.args)
示例#17
0
def main():
    ''' Main method
    '''
    build_dir = "../../build/"
    bin_dir = "../../bin"

    benchmark = Benchmark(EXPERIMENTS, RUNS, build_dir, bin_dir)
    benchmark.generate()
    benchmark.run()
 def __init__(self):
     # self.actions = ['toB+tree', 'toHash']
     self.structure = ['hash', 'b+tree']
     self.preIndStructure = 'b+tree'
     self.db = Database()
     self.bm = Benchmark(BENCHMARK_FILE_DATAPATH)
     self.qlines = self.bm.query_read()
     with open("./testdata/data.txt", 'r') as f:
         self.labellines = [line.rstrip('\n') for line in f]
示例#19
0
    def evaluate(self, rtol=1.e-9, atol=1.e-40, skip_dict={}):
        '''Compare IO file checksum with benchmark.

        Read checksum from IO file, read benchmark
        corresponding to test_name, and assert that they are equal.
        Almost all the body of this functions is for
        user-readable print statements.

        @param self The object pointer.
        @param test_name Name of test, as found between [] in .ini file.
        @param file_name IO file from which the checksum is computed.
        '''

        print("Checksum evaluation started...")

        ref_benchmark = Benchmark(self.test_name)

        # Dictionaries have same outer keys (levels, species)?
        if (self.data.keys() != ref_benchmark.data.keys()):
            print("ERROR: Benchmark and IO file checksum "
                  "have different outer keys:")
            print("Benchmark: %s" % ref_benchmark.data.keys())
            print("IO file  : %s" % self.data.keys())
            sys.exit(1)

        # Dictionaries have same inner keys (field and particle quantities)?
        for key1 in ref_benchmark.data.keys():
            if (self.data[key1].keys() != ref_benchmark.data[key1].keys()):
                print("ERROR: Benchmark and IO file checksum have "
                      "different inner keys:")
                print("Common outer keys: %s" % ref_benchmark.data.keys())
                print("Benchmark inner keys in %s: %s"
                      % (key1, ref_benchmark.data[key1].keys()))
                print("IO file   inner keys in %s: %s"
                      % (key1, self.data[key1].keys()))
                sys.exit(1)

        # Dictionaries have same values?
        checksums_differ = False
        for key1 in ref_benchmark.data.keys():
            for key2 in ref_benchmark.data[key1].keys():
                if key1 in skip_dict.keys() and key2 in skip_dict[key1]:
                    continue
                passed = np.isclose(self.data[key1][key2],
                                    ref_benchmark.data[key1][key2],
                                    rtol=rtol, atol=atol)
                if not passed:
                    print("ERROR: Benchmark and IO file checksum have "
                          "different value for key [%s,%s]" % (key1, key2))
                    print("Benchmark: [%s,%s] %.40f"
                          % (key1, key2, ref_benchmark.data[key1][key2]))
                    print("IO file  : [%s,%s] %.40f"
                          % (key1, key2, self.data[key1][key2]))
                    checksums_differ = True
        if checksums_differ:
            sys.exit(1)
        print("Checksum evaluation passed.")
示例#20
0
def test():
    
    p = BlockChainPacket()
    p.payload = "should OK"

    with Benchmark('p1 mine') as _:
        p.mine()

    p.writeToOut()
    out.flush()

    p2 = p.next()
    p2.payload = 'should OK'
    
    with Benchmark('p2 mine') as _:
        p2.mine()

    p2.writeToOut()
    out.flush()
    

    p2.payload = "should FAIL, not mined packet"
    p2.writeToOut()
    out.flush()
    

    p2.payload = 'should OK, remined'
    with Benchmark('p2 remine') as _:
        p2.mine()
    p2.writeToOut()
    out.flush()
    

    p3 = p2.next()
    p3.payload = '''
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Nunc eget gravida orci.
Etiam mattis, nibh ut scelerisque elementum, leo lorem dictum arcu, sit amet faucibus nunc augue at velit.
Lorem ipsum dolor sit amet, consectetur adipiscing elit. Donec eget fringilla arcu. Curabitur posuere nibh quam,
varius viverra felis porttitor non. Nullam sagittis et massa vel volutpat. Donec a dolor porta, fermentum lorem quis, posuere massa.'''

    with Benchmark('p3 mine') as _:
        p3.mine()
    p3.writeToOut()
    out.flush()
 def AppendBenchmarkSet(self, benchmarks, benchmark_list, test_args,
                        iterations, rm_chroot_tmp, perf_args, suite,
                        show_all_results, retries, run_local):
     """Add all the tests in a set to the benchmarks list."""
     for test_name in benchmark_list:
         telemetry_benchmark = Benchmark(test_name, test_name, test_args,
                                         iterations, rm_chroot_tmp,
                                         perf_args, suite, show_all_results,
                                         retries, run_local)
         benchmarks.append(telemetry_benchmark)
示例#22
0
    def run_test(self, test, timeout=None):
        self.log.info("starting raptor test: %s" % test['name'])
        self.log.info("test settings: %s" % str(test))
        self.log.info("raptor config: %s" % str(self.config))

        # benchmark-type tests require the benchmark test to be served out
        if test.get('type') == "benchmark":
            self.benchmark = Benchmark(self.config, test)
            benchmark_port = int(self.benchmark.port)
        else:
            benchmark_port = 0

        gen_test_config(self.config['app'], test['name'],
                        self.control_server.port, benchmark_port)

        # must intall raptor addon each time because we dynamically update some content
        raptor_webext = os.path.join(webext_dir, 'raptor')
        self.log.info("installing webext %s" % raptor_webext)
        self.profile.addons.install(raptor_webext)
        # on firefox we can get an addon id; chrome addon actually is just cmd line arg
        if self.config['app'] == "firefox":
            webext_id = self.profile.addons.addon_details(raptor_webext)['id']

        # some tests require tools to playback the test pages
        if test.get('playback', None) is not None:
            self.get_playback_config(test)
            # startup the playback tool
            self.playback = get_playback(self.config)

        self.runner.start()

        proc = self.runner.process_handler
        self.output_handler.proc = proc
        self.control_server.browser_proc = proc

        try:
            self.runner.wait(timeout)
        finally:
            try:
                self.runner.check_for_crashes()
            except NotImplementedError:  # not implemented for Chrome
                pass

        if self.playback is not None:
            self.playback.stop()

        # remove the raptor webext; as it must be reloaded with each subtest anyway
        # applies to firefox only; chrome the addon is actually just cmd line arg
        if self.config['app'] == "firefox":
            self.log.info("removing webext %s" % raptor_webext)
            self.profile.addons.remove_addon(webext_id)

        if self.runner.is_running():
            self.log("Application timed out after {} seconds".format(timeout))
            self.runner.stop()
示例#23
0
def run_resnet_benchmark(dataset,
                         num_iterations,
                         n_skip,
                         batch_size,
                         device_id,
                         transformer_type,
                         device,
                         bprop=True,
                         batch_norm=False,
                         visualize=False):
    inputs, data, train_set = get_fake_data(dataset, batch_size,
                                            num_iterations)

    # Running forward propagation
    model_out = get_mini_resnet(inputs,
                                dataset,
                                device_id,
                                batch_norm=batch_norm)

    # Running back propagation
    if bprop:
        with ng.metadata(device_id=device_id, parallel=ax.N):
            optimizer = GradientDescentMomentum(0.01, 0.9)
            train_loss = ng.cross_entropy_multi(
                model_out, ng.one_hot(inputs['label'], axis=ax.Y))

            batch_cost = ng.sequential(
                [optimizer(train_loss),
                 ng.mean(train_loss, out_axes=())])
            batch_cost_computation_op = ng.computation(batch_cost, "all")
        benchmark = Benchmark(batch_cost_computation_op, train_set, inputs,
                              transformer_type, device)
        Benchmark.print_benchmark_results(
            benchmark.time(num_iterations, n_skip, dataset + '_msra_bprop',
                           visualize, 'device_id'))
    else:
        fprop_computation_op = ng.computation(model_out, 'all')
        benchmark = Benchmark(fprop_computation_op, train_set, inputs,
                              transformer_type, device)
        Benchmark.print_benchmark_results(
            benchmark.time(num_iterations, n_skip, dataset + '_msra_fprop',
                           visualize))
示例#24
0
    def serve_benchmark_source(self, test):
        # benchmark-type tests require the benchmark test to be served out
        self.benchmark = Benchmark(self.config, test)
        self.benchmark_port = int(self.benchmark.port)

        # for android we must make the benchmarks server available to the device
        if self.config['app'] in self.firefox_android_apps and \
                self.config['host'] in ('localhost', '127.0.0.1'):
            self.log.info("making the raptor benchmarks server port available to device")
            _tcp_port = "tcp:%s" % self.benchmark_port
            self.device.create_socket_connection('reverse', _tcp_port, _tcp_port)
示例#25
0
 def run(self):
     try:
         result = Benchmark(self.stat, self.file, self.iterations, self.top,
                            self.debug)
     except:
         traceback.print_exc()
         exctype, value = sys.exc_info()[:2]
         self.signals.error.emit((exctype, value, traceback.format_exc()))
     else:
         self.signals.result.emit(result)
     finally:
         self.signals.finished.emit()
示例#26
0
 def predict_hinges(self, k_inv):
     """
     Predicts the hinge residues.
     :param  k_inv: The inverted matrix produced by the GNM analysis which is interpreted as a correlation matrix.
     :return:    The predicted hinge residues
     """
     before_tf = time.time()
     predicted_confidence_levels = self._predict_confidence_levels(k_inv)
     after_tf = time.time()
     Benchmark().update(k_inv.shape[0], 'Tensor Flow', after_tf - before_tf)
     return predict_hinges(predicted_confidence_levels,
                           self.local_sensitivity, 90, 95, 0)
示例#27
0
def calc_gnm_k_inv(ubi, header, contact_map=None, cutoff=8, contact_map_alpha=0.5, number_of_modes=2):
    """
    Compute the correlation matrix using GNM analysis.

    :param ubi: The AtomGroup object containing the residues data
    :param header:  The header of the protein file
    :param contact_map: The protein's contact map.
    :param cutoff:  The cutoff distance used to build the Kirchoff matrix.
    :param contact_map_alpha:   The weight parameter used by the ContactMapAndDistanceGamma.
    :param number_of_modes: The number of slowest modes to take into consideration in pseudo-inverse computation.

    :return: proteins gnm correlation matrix
    """
    benchmark = Benchmark()
    gnm = GNM()

    before_kirchhoff = time.time()
    if contact_map is None:
        gnm.buildKirchhoff(ubi, cutoff=cutoff, gamma=SquaredDistanceGamma(cutoff))
    else:
        gnm.buildKirchhoff(ubi, cutoff=cutoff, gamma=ContactMapAndDistanceGamma(contact_map, cutoff, contact_map_alpha))
    after_kirchhoff = time.time()

    benchmark.update(len(ubi), 'Springs Setup', after_kirchhoff - before_kirchhoff)

    before_calc_modes = time.time()
    gnm.calcModes()
    after_calc_modes = time.time()

    benchmark.update(len(ubi), 'Eigenvalues Find', after_calc_modes - before_calc_modes)

    if gnm._array is None:
        raise ValueError('Modes are not calculated.')

    V = gnm._array

    eigvals = gnm._eigvals

    (m, n) = V.shape
    k_inv = np.zeros((m,m))

    before_k_inv = time.time()
    for i in range(number_of_modes):
        eigenvalue = eigvals[i]
        eigenvector = V[:,i]
        k_inv += (np.outer(eigenvector, eigenvector) / eigenvalue)
    after_k_inv = time.time()

    benchmark.update(len(ubi), 'Gamma Inversion', after_k_inv - before_k_inv)

    return k_inv
def main():
    ''' Main method
    '''
    build_dir = "../../build/"
    bin_dir = "../../bin"

    for exp in EXPERIMENTS:
        exp['command'] += f" -r {exp['number_of_rows']} -d {exp['number_of_columns']}"
        exp['command'] += f" -s {exp['selectivity']} -q {exp['number_of_queries']}"
        exp['command'] += f" -f {exp['data']} -w {exp['workload']}"

    benchmark = Benchmark(EXPERIMENTS, RUNS, build_dir, bin_dir)
    benchmark.generate()
    benchmark.run()
示例#29
0
def reset_benchmark(test_name, plotfile, do_fields=True, do_particles=True):
    '''Update the benchmark (overwrites reference json file).

    Overwrite value of benchmark corresponding to
    test_name with checksum read from input plotfile.

    @param test_name Name of test, as found between [] in .ini file.
    @param plotfile Plotfile from which the checksum is computed.
    @param do_fields Whether to write field checksums in the benchmark.
    @param do_particles Whether to write particles checksums in the benchmark.
    '''
    ref_checksum = Checksum(test_name, plotfile, do_fields=do_fields,
                            do_particles=do_particles)
    ref_benchmark = Benchmark(test_name, ref_checksum.data)
    ref_benchmark.reset()
示例#30
0
    def printBenchmarks(self, data):
        """Takes a data input of tensors and targets. ge is the value that is
        used to treshold the output to 1 and 0.
        Returns TPR, FPR and accuracy for each data"""
        self.module.eval()

        print("Name" + "\t\t" + "Leak" + "\t" + "TPR" + "\t" + "FPR" + "\t" +
              "Accuracy")
        for tensors, targets, scenario in data:
            bench = Benchmark()

            for tensor, target in zip(tensors, targets):
                classification = self.module.classify(tensor)[1]

                bench += Benchmark(classification.squeeze(), target.squeeze())

            if bench.p != 0:
                leak = "yes"
            else:
                leak = "no"
            print(
                f"{scenario:13}\t{leak}\t{bench.getTPR():.3f}\t{bench.getFPR():.3f}\t{bench.getAccuracy():.3f}"
            )
            self.benchmarks[scenario] = bench