Ejemplo n.º 1
0
def account_one_arch(arch_index: int, arch_str: Text, checkpoints: List[Text],
                     datasets: List[Text]) -> ArchResults:
    information = ArchResults(arch_index, arch_str)

    for checkpoint_path in checkpoints:
        try:
            checkpoint = torch.load(checkpoint_path, map_location='cpu')
        except:
            raise ValueError(
                'This checkpoint failed to be loaded : {:}'.format(
                    checkpoint_path))
        used_seed = checkpoint_path.name.split('-')[-1].split('.')[0]
        ok_dataset = 0
        for dataset in datasets:
            if dataset not in checkpoint:
                print('Can not find {:} in arch-{:} from {:}'.format(
                    dataset, arch_index, checkpoint_path))
                continue
            else:
                ok_dataset += 1
            results = checkpoint[dataset]
            assert results[
                'finish-train'], 'This {:} arch seed={:} does not finish train on {:} ::: {:}'.format(
                    arch_index, used_seed, dataset, checkpoint_path)
            arch_config = {
                'name': 'infer.shape.tiny',
                'channels': arch_str,
                'arch_str': arch_str,
                'genotype': results['arch_config']['genotype'],
                'class_num': results['arch_config']['num_classes']
            }
            xresult = ResultsCount(dataset, results['net_state_dict'],
                                   results['train_acc1es'],
                                   results['train_losses'], results['param'],
                                   results['flop'], arch_config, used_seed,
                                   results['total_epoch'], None)
            xresult.update_train_info(results['train_acc1es'],
                                      results['train_acc5es'],
                                      results['train_losses'],
                                      results['train_times'])
            xresult.update_eval(results['valid_acc1es'],
                                results['valid_losses'],
                                results['valid_times'])
            information.update(dataset, int(used_seed), xresult)
        if ok_dataset < len(datasets):
            raise ValueError('{:} does find enought data : {:} vs {:}'.format(
                checkpoint_path, ok_dataset, len(datasets)))
    return information
Ejemplo n.º 2
0
def create_result_count(used_seed, dataset, arch_config, results,
                        dataloader_dict):
    xresult     = ResultsCount(dataset, results['net_state_dict'], results['train_acc1es'], results['train_losses'], \
                                 results['param'], results['flop'], arch_config, used_seed, results['total_epoch'], None)

    net_config = dict2config(
        {
            'name': 'infer.tiny',
            'C': arch_config['channel'],
            'N': arch_config['num_cells'],
            'genotype': CellStructure.str2structure(arch_config['arch_str']),
            'num_classes': arch_config['class_num']
        }, None)
    network = get_cell_based_tiny_net(net_config)
    network.load_state_dict(xresult.get_net_param())
    if 'train_times' in results:  # new version
        xresult.update_train_info(results['train_acc1es'],
                                  results['train_acc5es'],
                                  results['train_losses'],
                                  results['train_times'])
        xresult.update_eval(results['valid_acc1es'], results['valid_losses'],
                            results['valid_times'])
    else:
        if dataset == 'cifar10-valid':
            xresult.update_OLD_eval('x-valid', results['valid_acc1es'],
                                    results['valid_losses'])
            loss, top1, top5, latencies = pure_evaluate(
                dataloader_dict['{:}@{:}'.format('cifar10', 'test')],
                network.cuda())
            xresult.update_OLD_eval('ori-test',
                                    {results['total_epoch'] - 1: top1},
                                    {results['total_epoch'] - 1: loss})
            xresult.update_latency(latencies)
        elif dataset == 'cifar10':
            xresult.update_OLD_eval('ori-test', results['valid_acc1es'],
                                    results['valid_losses'])
            loss, top1, top5, latencies = pure_evaluate(
                dataloader_dict['{:}@{:}'.format(dataset, 'test')],
                network.cuda())
            xresult.update_latency(latencies)
        elif dataset == 'cifar100' or dataset == 'ImageNet16-120':
            xresult.update_OLD_eval('ori-test', results['valid_acc1es'],
                                    results['valid_losses'])
            loss, top1, top5, latencies = pure_evaluate(
                dataloader_dict['{:}@{:}'.format(dataset, 'valid')],
                network.cuda())
            xresult.update_OLD_eval('x-valid',
                                    {results['total_epoch'] - 1: top1},
                                    {results['total_epoch'] - 1: loss})
            loss, top1, top5, latencies = pure_evaluate(
                dataloader_dict['{:}@{:}'.format(dataset, 'test')],
                network.cuda())
            xresult.update_OLD_eval('x-test',
                                    {results['total_epoch'] - 1: top1},
                                    {results['total_epoch'] - 1: loss})
            xresult.update_latency(latencies)
        else:
            raise ValueError('invalid dataset name : {:}'.format(dataset))
    return xresult
Ejemplo n.º 3
0
def create_result_count(
    used_seed: int,
    dataset: Text,
    arch_config: Dict[Text, Any],
    results: Dict[Text, Any],
    dataloader_dict: Dict[Text, Any],
) -> ResultsCount:
    xresult = ResultsCount(
        dataset,
        results["net_state_dict"],
        results["train_acc1es"],
        results["train_losses"],
        results["param"],
        results["flop"],
        arch_config,
        used_seed,
        results["total_epoch"],
        None,
    )
    net_config = dict2config(
        {
            "name": "infer.tiny",
            "C": arch_config["channel"],
            "N": arch_config["num_cells"],
            "genotype": CellStructure.str2structure(arch_config["arch_str"]),
            "num_classes": arch_config["class_num"],
        },
        None,
    )
    network = get_cell_based_tiny_net(net_config)
    network.load_state_dict(xresult.get_net_param())
    if "train_times" in results:  # new version
        xresult.update_train_info(
            results["train_acc1es"],
            results["train_acc5es"],
            results["train_losses"],
            results["train_times"],
        )
        xresult.update_eval(results["valid_acc1es"], results["valid_losses"],
                            results["valid_times"])
    else:
        if dataset == "cifar10-valid":
            xresult.update_OLD_eval("x-valid", results["valid_acc1es"],
                                    results["valid_losses"])
            loss, top1, top5, latencies = pure_evaluate(
                dataloader_dict["{:}@{:}".format("cifar10", "test")],
                network.cuda())
            xresult.update_OLD_eval(
                "ori-test",
                {results["total_epoch"] - 1: top1},
                {results["total_epoch"] - 1: loss},
            )
            xresult.update_latency(latencies)
        elif dataset == "cifar10":
            xresult.update_OLD_eval("ori-test", results["valid_acc1es"],
                                    results["valid_losses"])
            loss, top1, top5, latencies = pure_evaluate(
                dataloader_dict["{:}@{:}".format(dataset, "test")],
                network.cuda())
            xresult.update_latency(latencies)
        elif dataset == "cifar100" or dataset == "ImageNet16-120":
            xresult.update_OLD_eval("ori-test", results["valid_acc1es"],
                                    results["valid_losses"])
            loss, top1, top5, latencies = pure_evaluate(
                dataloader_dict["{:}@{:}".format(dataset, "valid")],
                network.cuda())
            xresult.update_OLD_eval(
                "x-valid",
                {results["total_epoch"] - 1: top1},
                {results["total_epoch"] - 1: loss},
            )
            loss, top1, top5, latencies = pure_evaluate(
                dataloader_dict["{:}@{:}".format(dataset, "test")],
                network.cuda())
            xresult.update_OLD_eval(
                "x-test",
                {results["total_epoch"] - 1: top1},
                {results["total_epoch"] - 1: loss},
            )
            xresult.update_latency(latencies)
        else:
            raise ValueError("invalid dataset name : {:}".format(dataset))
    return xresult