Esempio n. 1
0
def run_benchmark(name, arguments_list, precision, num_runs, platform, device):
    binary = "./clblast_client_x" + name

    # Loops over sub-benchmarks per benchmark
    results = []
    for arguments in arguments_list:

        # Sets the arguments
        constant_arguments = ["-warm_up", "-q", "-no_abbrv", "-cblas 0", "-cublas 0"]
        common_arguments = ["-precision %d" % precision, "-runs %d" % num_runs]
        opencl_arguments = ["-platform %d" % platform, "-device %d" % device]
        all_arguments = opencl_arguments + common_arguments + constant_arguments
        for name, value in arguments.items():
            all_arguments.append("-" + name + " " + str(value))

        # Calls the binary and parses the results
        benchmark_output = utils.run_binary(binary, all_arguments)
        result = utils.parse_results(benchmark_output)

        # For half-precision: also runs single-precision for comparison
        if precision == 16:
            all_arguments = [arg if arg != "-precision 16" else "-precision 32" for arg in all_arguments]
            benchmark_output = utils.run_binary(binary, all_arguments)
            result_extra = utils.parse_results(benchmark_output)
            for index in range(len(min(result, result_extra))):
                result[index]["GBs_1_FP32"] = result_extra[index]["GBs_1"]
                result[index]["GBs_2"] = result_extra[index]["GBs_2"]
                result[index]["GFLOPS_1_FP32"] = result_extra[index]["GFLOPS_1"]
                result[index]["GFLOPS_2"] = result_extra[index]["GFLOPS_2"]

        results.extend(result)
    return results
Esempio n. 2
0
def main(args):
    expectations, input_files, columns = pre_process_args(args)
    successful_dest_folder = args.successful_dest_folder
    failed_dest_folder = args.failed_dest_folder

    files_handler = FilesHandler(input_files=input_files)
    files_path = files_handler.get_files_path()

    validator = FileValidator(columns=columns, expectations=expectations)

    logging.info("Starting the validation files process.. ")
    p = Pool(processes=cpu_count())
    r = list(
        tqdm(p.imap(validator.validate, files_path), total=len(files_path)))
    p.close()
    p.join()

    sucessful_expectations, failed_expectations = parse_results(r)
    write_results(sucessful_expectations, success=True)
    write_results(failed_expectations, success=False)

    if (successful_dest_folder != None):
        for expectation in sucessful_expectations:
            file_name = expectation["file_path"].split("/")[-1]
            files_handler.move_file(file_name, successful_dest_folder)

    if (failed_dest_folder != None):
        for expectation in failed_expectations:
            file_name = expectation["file_path"].split("/")[-1]
            files_handler.move_file(file_name, failed_dest_folder)

    if (files_handler.is_from_storage):
        files_handler.clean_temp_folder()
Esempio n. 3
0
    def test_run_axe(self, base_url, selenium):
        """Run axe against base_url and verify JSON output."""

        selenium.get(base_url)

        # Include axe-core API
        selenium.execute_script(open('./src/axe.min.js').read())
        # Script to run axe API against page
        selenium.execute_script(open('./src/script.js').read())

        # Delay while JavaScript runs
        time.sleep(1)
        # Get JSON results
        result = selenium.find_element_by_id('axe-result').text
        # --- Is there a better way to pass data from JS to python? --- #
        # Parse JSON
        parsed = json.loads(result)

        global test_results
        test_results = utils.parse_results(parsed['violations'])

        # Create file to JSON response from axe
        file = open('result.json', 'w+')

        # And pretty-print to file
        file.write(json.dumps(parsed['violations'], indent=4))
        file.close

        assert len(test_results) != 0, test_results
Esempio n. 4
0
 def do_POST(self):
     content_length = int(self.headers['Content-Length'])
     post_data = self.rfile.read(content_length)
     is_valid = self.check_request_data(post_data, self.headers)
     if not is_valid:
         self.send_error(400, 'Invalid message body')
     else:
         try:
             post_data = post_data.split('\r\n')
             test_results = parse_results(post_data)
             if test_results: PatchServerHandler.update(test_results, self.headers)
             self._set_headers()
         except KeyError:
             self.send_error('400', 'Invalid message structure')
Esempio n. 5
0
 def parse(self):
     tested_patches = os.listdir(self.builds_path)
     logger.info('Parsing results for the following patches %s' % tested_patches)
     result_dict = {}
     for log_folder in os.listdir(self.results_path):
         log_path = os.path.join(self.results_path, log_folder)
         with open('%s/ica.log' % log_path, 'r') as log_content:
             logger.info('Parsing logs from %s' % log_folder)
             results = parse_results(log_content, tested_patches)
             for patch_name, result in results.items():
                 if result != 'Passed':
                     logger.warning('%s failed on %s' % (patch_name, log_folder))
                     build_path = os.path.join(self.builds_path, patch_name)
                     move(build_path, self.failures_path)
Esempio n. 6
0
 def parse(self):
     tested_patches = os.listdir(self.builds_path)
     logger.info('Parsing results for the following patches %s' % tested_patches)
     result_dict = {}
     for log_folder in os.listdir(self.results_path):
         log_path = os.path.join(self.results_path, log_folder)
         with open('%s/ica.log' % log_path, 'r') as log_content:
             logger.info('Parsing logs from %s' % log_folder)
             results = parse_results(log_content, tested_patches)
             for patch_name, result in results.items():
                 if result != 'Passed':
                     logger.warning('%s failed on %s' % (patch_name, log_folder))
                     build_path = os.path.join(self.builds_path, patch_name)
                     move(build_path, self.failures_path)
Esempio n. 7
0
 def do_POST(self):
     content_length = int(self.headers['Content-Length'])
     post_data = self.rfile.read(content_length)
     is_valid = self.check_request_data(post_data, self.headers)
     if not is_valid:
         self.send_error(400, 'Invalid message body')
     else:
         try:
             post_data = post_data.split('\r\n')
             test_results = parse_results(post_data)
             if test_results:
                 PatchServerHandler.update(test_results, self.headers)
             self._set_headers()
         except KeyError:
             self.send_error('400', 'Invalid message structure')
Esempio n. 8
0
def main():
    config = RetrainConfig()
    main_proc = not config.distributed or config.local_rank == 0
    if config.distributed:
        torch.cuda.set_device(config.local_rank)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method=config.dist_url,
                                             rank=config.local_rank,
                                             world_size=config.world_size)
    if main_proc:
        os.makedirs(config.output_path, exist_ok=True)
    if config.distributed:
        torch.distributed.barrier()
    logger = utils.get_logger(os.path.join(config.output_path, 'search.log'))
    if main_proc:
        config.print_params(logger.info)
    utils.reset_seed(config.seed)

    loaders, samplers = get_augment_datasets(config)
    train_loader, valid_loader = loaders
    train_sampler, valid_sampler = samplers

    model = Model(config.dataset,
                  config.layers,
                  in_channels=config.input_channels,
                  channels=config.init_channels,
                  retrain=True).cuda()
    if config.label_smooth > 0:
        criterion = utils.CrossEntropyLabelSmooth(config.n_classes,
                                                  config.label_smooth)
    else:
        criterion = nn.CrossEntropyLoss()

    fixed_arc_path = os.path.join(config.output_path, config.arc_checkpoint)
    with open(fixed_arc_path, "r") as f:
        fixed_arc = json.load(f)
    fixed_arc = utils.encode_tensor(fixed_arc, torch.device("cuda"))
    genotypes = utils.parse_results(fixed_arc, n_nodes=4)
    genotypes_dict = {i: genotypes for i in range(3)}
    apply_fixed_architecture(model, fixed_arc_path)
    param_size = utils.param_size(
        model, criterion,
        [3, 32, 32] if 'cifar' in config.dataset else [3, 224, 224])

    if main_proc:
        logger.info("Param size: %.6f", param_size)
        logger.info("Genotype: %s", genotypes)

    # change training hyper parameters according to cell type
    if 'cifar' in config.dataset:
        if param_size < 3.0:
            config.weight_decay = 3e-4
            config.drop_path_prob = 0.2
        elif 3.0 < param_size < 3.5:
            config.weight_decay = 3e-4
            config.drop_path_prob = 0.3
        else:
            config.weight_decay = 5e-4
            config.drop_path_prob = 0.3

    if config.distributed:
        apex.parallel.convert_syncbn_model(model)
        model = DistributedDataParallel(model, delay_allreduce=True)

    optimizer = torch.optim.SGD(model.parameters(),
                                config.lr,
                                momentum=config.momentum,
                                weight_decay=config.weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                              config.epochs,
                                                              eta_min=1E-6)

    best_top1 = best_top5 = 0.
    for epoch in range(config.epochs):
        drop_prob = config.drop_path_prob * epoch / config.epochs
        if config.distributed:
            model.module.drop_path_prob(drop_prob)
        else:
            model.drop_path_prob(drop_prob)
        # training
        if config.distributed:
            train_sampler.set_epoch(epoch)
        train(logger, config, train_loader, model, optimizer, criterion, epoch,
              main_proc)

        # validation
        top1, top5 = validate(logger, config, valid_loader, model, criterion,
                              epoch, main_proc)
        best_top1 = max(best_top1, top1)
        best_top5 = max(best_top5, top5)
        lr_scheduler.step()

    logger.info("Final best Prec@1 = %.4f Prec@5 = %.4f", best_top1, best_top5)
Esempio n. 9
0
def main():
    config = RetrainConfig()
    main_proc = not config.distributed or config.local_rank == 0
    if config.distributed:
        torch.cuda.set_device(config.local_rank)
        torch.distributed.init_process_group(backend='nccl',
                                             init_method=config.dist_url,
                                             rank=config.local_rank,
                                             world_size=config.world_size)
    if main_proc:
        os.makedirs(config.output_path, exist_ok=True)
    if config.distributed:
        torch.distributed.barrier()
    logger = utils.get_logger(os.path.join(config.output_path, 'search.log'))
    if main_proc:
        config.print_params(logger.info)
    utils.reset_seed(config.seed)

    loaders, samplers = get_augment_datasets(config)
    train_loader, valid_loader = loaders
    train_sampler, valid_sampler = samplers
    train_loader = CyclicIterator(train_loader, train_sampler)
    # valid_loader = CyclicIterator(valid_loader, valid_sampler, False)

    model = Model(config.dataset,
                  config.layers,
                  in_channels=config.input_channels,
                  channels=config.init_channels,
                  retrain=True).cuda()
    if config.label_smooth > 0:
        criterion = utils.CrossEntropyLabelSmooth(config.n_classes,
                                                  config.label_smooth)
    else:
        criterion = nn.CrossEntropyLoss()

    fixed_arc_path = os.path.join('', config.arc_checkpoint)
    with open(fixed_arc_path, "r") as f:
        fixed_arc = json.load(f)
    fixed_arc = utils.encode_tensor(fixed_arc, torch.device("cuda"))
    genotypes = utils.parse_results(fixed_arc, n_nodes=4)
    genotypes_dict = {i: genotypes for i in range(3)}
    apply_fixed_architecture(model, fixed_arc_path)
    param_size = utils.param_size(model, criterion, [3, 512, 512])

    if main_proc:
        logger.info("Param size: %.6f", param_size)
        logger.info("Genotype: %s", genotypes)

    # change training hyper parameters according to cell type
    if 'cifar' in config.dataset:
        if param_size < 3.0:
            config.weight_decay = 3e-4
            config.drop_path_prob = 0.2
        elif 3.0 < param_size < 3.5:
            config.weight_decay = 3e-4
            config.drop_path_prob = 0.3
        else:
            config.weight_decay = 5e-4
            config.drop_path_prob = 0.3

    if config.distributed:
        apex.parallel.convert_syncbn_model(model)
        model = DistributedDataParallel(model, delay_allreduce=True)

    optimizer = torch.optim.AdamW(model.parameters(), config.lr)
    # lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, config.epochs, eta_min=1E-6)

    best_top1 = 0.
    epoch = 0
    try:
        checkpoint = torch.load(config.model_checkpoint)
        model.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        epoch = checkpoint['epoch']
        loss = checkpoint['loss']

        model.eval()
        print("----------------------------")
        print("MODEL LOADED FROM CHECKPOINT" + config.model_checkpoint)
        print("----------------------------")
    except:
        print("----------------------------")
        print("MODEL NOT LOADED FROM CHECKPOINT")
        print("----------------------------")
        pass

    # for epoch in range(0, epoch):
    # lr_scheduler.step()

    for epoch in range(epoch, config.epochs):
        drop_prob = config.drop_path_prob * epoch / config.epochs
        if config.distributed:
            model.module.drop_path_prob(drop_prob)
        else:
            model.drop_path_prob(drop_prob)
        # training
        if config.distributed:
            train_sampler.set_epoch(epoch)
        train(logger, config, train_loader, model, optimizer, criterion, epoch,
              main_proc)
        if (epoch % config.log_frequency == 0):
            # validation
            top1 = validate(logger, config, valid_loader, model, criterion,
                            epoch, main_proc)
            best_top1 = max(best_top1, top1)
            # lr_scheduler.step()
            logger.info("Final best Prec@1 = %.4f", best_top1)
Esempio n. 10
0
 def plot_genotype(self, results, logger):
     genotypes = parse_results(results, self.n_nodes)
     logger.info(genotypes)
     return genotypes
Esempio n. 11
0
def test_parse_results_invalid(invalid_data_frame):
    invalid_df, cols = invalid_data_frame
    try:
        result = parse_results(invalid_df)
    except Exception as e:
        assert type(e) == KeyError
Esempio n. 12
0
def test_parse_results_valid(valid_data_frame):
    result = parse_results(valid_data_frame)
    num_cols = len(result[0])
    num_rows = len(result)
    assert valid_data_frame.shape == (num_rows, num_cols)