コード例 #1
0
def evaluate(xargs):
    # start cudnn
    cudnn.enabled = True
    # make each conv is the same
    cudnn.benchmark = False
    # make sure the same seed has the same result
    cudnn.deterministic = True
    torch.set_num_threads(xargs.workers)
    prepare_seed(xargs.rand_seed)
    xargs.save_dir += 'eva/'
    logger = prepare_logger(xargs)
    train_data, valid_data, xshape, class_num = get_dataset(
        xargs.dataset, xargs.data_path, -1)
    logger.log('Train Config:')
    eva_config = load_config(
        xargs.eva_config, {
            'class_num': class_num,
            'xshape': xshape,
            'tau_max': xargs.tau_max,
            'tau_min': xargs.tau_min
        }, logger)
    search_loader, train_loader, test_loader = get_nas_search_loaders(
        train_data, valid_data, xargs.dataset, 'config/',
        eva_config.batch_size, xargs.workers)
    logger.log('dataset: {:} Train-Loader-length={:}, batch size={:}'.format(
        xargs.dataset, len(train_loader), eva_config.batch_size))
    eva_dense_net(search_loader, test_loader, eva_config, logger, 'normal')
コード例 #2
0
def train(xargs):
    # start cudnn
    cudnn.enabled = True
    # make each conv is the same
    cudnn.benchmark = False
    # make sure the same seed has the same result
    cudnn.deterministic = True
    torch.set_num_threads(xargs.workers)
    prepare_seed(xargs.rand_seed)
    logger = prepare_logger(xargs)
    # get original data(cifar10/cifar100/uci)
    train_data, valid_data, xshape, class_num = get_dataset(xargs.dataset, xargs.data_path, -1)
    logger.log('{:}Train Config{:}'.format("-" * 50, "-" * 50))
    opt_config = load_config(xargs.opt_config, {'class_num': class_num, 'xshape': xshape,
                                                'batch_size': xargs.batch_size, 'epochs': xargs.epochs,
                                                'LR': xargs.opt_learning_rate}, logger)
    _, train_loader, _ = get_nas_search_loaders(train_data, valid_data, xargs.dataset,
                                                'config/', opt_config.batch_size, xargs.workers)
    logger.log('dataset: {:} Train-Loader-length={:}, batch size={:}'.format(xargs.dataset, len(train_loader),
                                                                             opt_config.batch_size))
コード例 #3
0
def get_nas_search_loaders(train_data, valid_data, dataset, config_root,
                           batch_size, workers):
    # get search_loader, train_loader, valid_loader
    if isinstance(batch_size, (list, tuple)):
        batch, test_batch = batch_size
    else:
        batch, test_batch = batch_size, batch_size
    if dataset == 'cifar10':
        cifar_split = load_config('{:}/cifar10-split.txt'.format(config_root),
                                  None, None)
        train_split, valid_split = cifar_split.train, cifar_split.valid
        # search over the proposed training and validation set
        # To split data
        xvalid_data = deepcopy(train_data)
        if hasattr(xvalid_data, 'transforms'):  # to avoid a print issue
            xvalid_data.transforms = valid_data.transform
        xvalid_data.transform = deepcopy(valid_data.transform)
        search_data = SearchDataset(dataset, train_data, train_split,
                                    valid_split)
        # data loader
        search_loader = DataLoader(search_data,
                                   batch_size=batch,
                                   shuffle=True,
                                   num_workers=workers,
                                   pin_memory=True)
        train_loader = DataLoader(
            train_data,
            batch_size=batch,
            sampler=sampler.SubsetRandomSampler(train_split),
            num_workers=workers,
            pin_memory=True)
        valid_loader = DataLoader(
            xvalid_data,
            batch_size=test_batch,
            sampler=sampler.SubsetRandomSampler(valid_split),
            num_workers=workers,
            pin_memory=True)
    elif dataset == 'cifar100':
        cifar100_test_split = load_config(
            '{:}/cifar100-split.txt'.format(config_root), None, None)
        search_train_data = train_data
        search_valid_data = deepcopy(valid_data)
        search_valid_data.transform = train_data.transform
        search_data = SearchDataset(dataset,
                                    [search_train_data, search_valid_data],
                                    list(range(len(search_train_data))),
                                    cifar100_test_split.xvalid)
        search_loader = DataLoader(search_data,
                                   batch_size=batch,
                                   shuffle=True,
                                   num_workers=workers,
                                   pin_memory=True)
        train_loader = DataLoader(train_data,
                                  batch_size=batch,
                                  shuffle=True,
                                  num_workers=workers,
                                  pin_memory=True)
        valid_loader = DataLoader(valid_data,
                                  batch_size=test_batch,
                                  sampler=sampler.SubsetRandomSampler(
                                      cifar100_test_split.xvalid),
                                  num_workers=workers,
                                  pin_memory=True)
    elif dataset == 'HAPT':
        HAPT_split = load_config('{:}HAPT-split.txt'.format(config_root), None,
                                 None)
        train_split, valid_split = HAPT_split.train, HAPT_split.valid
        search_data = GenDataset(dataset, train_data, train_split, valid_split)
        search_loader = DataLoader(search_data,
                                   batch_size=batch,
                                   shuffle=True,
                                   num_workers=workers,
                                   pin_memory=True)
        train_loader = DataLoader(NormalDataset(dataset, train_data),
                                  batch_size=batch,
                                  shuffle=True,
                                  num_workers=workers,
                                  pin_memory=True)
        valid_loader = DataLoader(NormalDataset(dataset, valid_data),
                                  batch_size=batch,
                                  shuffle=False,
                                  num_workers=workers,
                                  pin_memory=True)
    else:
        raise ValueError('invalid dataset : {:}'.format(dataset))
    return search_loader, train_loader, valid_loader
コード例 #4
0
ファイル: MyGDAS.py プロジェクト: MartrixG/CODES
def train(xargs):
    lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
    if str(lib_dir) not in sys.path:
        sys.path.insert(0, str(lib_dir))

    assert (torch.cuda.is_available(), 'CUDA is not available.')
    # start cudnn
    cudnn.enabled = True
    # make each conv is the same
    cudnn.benchmark = False
    # make sure the same seed has the same result
    cudnn.deterministic = True
    torch.set_num_threads(xargs.workers)
    prepare_seed(xargs.rand_seed)
    logger = prepare_logger(xargs)
    # get original data(cifar10/cifar100/uci)
    train_data, valid_data, xshape, class_num = get_dataset(
        xargs.dataset, xargs.data_path, -1)
    logger.log('{:}Train Config{:}'.format("-" * 50, "-" * 50))
    opt_config = load_config(
        xargs.opt_config, {
            'class_num': class_num,
            'xshape': xshape,
            'batch_size': xargs.batch_size,
            'epochs': xargs.epochs,
            'LR': xargs.opt_learning_rate
        }, logger)
    search_loader, _, valid_loader = get_nas_search_loaders(
        train_data, valid_data, xargs.dataset, 'config/',
        opt_config.batch_size, xargs.workers)
    logger.log('dataset: {:} Search-Loader-length={:}, batch size={:}'.format(
        xargs.dataset, len(search_loader), opt_config.batch_size))
    logger.log('{:}Arch Config{:}'.format("-" * 50, "-" * 50))
    arch_config = load_config(
        xargs.arch_config, {
            'class_num': class_num,
            'space': HAPT_SPACE,
            'affine': False,
            'track_running_stats': bool(xargs.track_running_stats)
        }, logger)
    if xargs.dataset == 'HAPT':
        search_model = DNNModel(config=arch_config, logger=logger)
    elif xargs.dataset in ('cifar10', 'cifar100'):
        search_model = DNNModel(config=arch_config, logger=logger)
    else:
        raise NameError(
            "dataset must be in \"HAPT\", \"cifar100\", \"cifar100\"")
    if xargs.evaluate == 'test':
        search_model.load_state_dict(
            torch.load(logger.path('best'))['network'])
        network = search_model.cuda()
        test_loader = valid_loader
        test(test_loader, network, arch_config.C_out)
        return
    # logger.log('search-model :\n{:}'.format(search_model))
    logger.log('{:}model-config{:}\n{:}'.format("-" * 50, "-" * 50,
                                                arch_config))
    if opt_config.criterion == 'cross_entropy':
        criterion = nn.CrossEntropyLoss()
    else:
        raise NameError('unknown loss function {:}'.format(
            opt_config.criterion))
    # criterion = nn.MSELoss()
    w_optimizer = torch.optim.SGD(params=search_model.get_weights(),
                                  lr=opt_config.LR,
                                  weight_decay=opt_config.w_decay)
    w_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
        optimizer=w_optimizer,
        T_max=opt_config.epochs,
        eta_min=opt_config.eta_min)
    a_optimizer = torch.optim.Adam(params=search_model.get_alphas(),
                                   lr=xargs.arch_learning_rate,
                                   betas=(0.5, 0.999),
                                   weight_decay=opt_config.a_decay)
    logger.log('{:}w-optimizer{:}\n{:}'.format("-" * 50, "-" * 50,
                                               w_optimizer))
    logger.log('{:}a-optimizer{:}\n{:}'.format("-" * 50, "-" * 50,
                                               a_optimizer))
    logger.log('{:}w-scheduler{:}\n{:}'.format("-" * 50, "-" * 50,
                                               w_scheduler))
    logger.log('{:}criterion{:}\n{:}'.format("-" * 50, "-" * 50, criterion))
    flop, param = get_model_infos(search_model, xshape)
    logger.log('FLOP = {:.6f} M, Params = {:.6f} MB'.format(flop, param))
    logger.log('search-space [{:} ops] : {:}'.format(len(HAPT_SPACE),
                                                     HAPT_SPACE))
    last_info, model_base_path, model_best_path = logger.path(
        'info'), logger.path('model'), logger.path('best')
    # network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()
    network = search_model.cuda()
    criterion = criterion.cuda()
    if last_info.exists():
        # automatically resume from previous checkpoint
        logger.log("=> loading checkpoint of the last-info '{:}' start".format(
            last_info))
        last_info = torch.load(last_info)
        start_epoch = last_info['epoch']
        checkpoint = torch.load(last_info['last_checkpoint'])
        genotypes = checkpoint['genotypes']
        valid_accuracies = checkpoint['valid_accuracies']
        network.load_state_dict(checkpoint['network'])
        w_scheduler.load_state_dict(checkpoint['w_scheduler'])
        w_optimizer.load_state_dict(checkpoint['w_optimizer'])
        a_optimizer.load_state_dict(checkpoint['a_optimizer'])
        logger.log(
            "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch."
            .format(last_info, start_epoch))
    else:
        logger.log("=> do not find the last-info file : {:}".format(last_info))
        start_epoch, valid_accuracies, genotypes = 0, {'best': -1}, \
                                                   {-1: network.genotype(xargs.save_dir + xargs.genotype_file)}
    start_time, epoch_time = time.time(), AverageMeter()
    total_epoch = opt_config.epochs
    for epoch in range(start_epoch, total_epoch):
        w_scheduler.step(epoch=epoch)
        need_time = 'Time Left: {:}'.format(
            convert_secs2time(epoch_time.avg * (total_epoch - epoch), True))
        epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch)
        network.set_tau(xargs.tau_max -
                        (xargs.tau_max - xargs.tau_min) * epoch /
                        (total_epoch - 1))
        logger.log('\n[Search the {:}-th epoch] tau={:.2f} {:}'.format(
            epoch_str, network.get_tau(), need_time))
        epoch_start = time.time()
        base_losses, base_top1, base_top5, arch_losses, arch_top1, arch_top5 = \
            search(arch_config, search_loader, network, criterion, w_optimizer, a_optimizer,
                   xargs.print_frequency, epoch_str, logger)
        epoch_time.update(time.time() - epoch_start)
        logger.log(
            '[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'
            .format(epoch_str, base_losses, base_top1, base_top5))
        logger.log(
            '[{:}] evaluate  : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'
            .format(epoch_str, arch_losses, arch_top1, arch_top5))
        valid_accuracies[epoch] = arch_top1
        if arch_top1 > valid_accuracies['best']:
            valid_accuracies['best'] = arch_top1
            genotypes['best'] = network.genotype(xargs.save_dir +
                                                 xargs.genotype_file)
            find_best = True
        else:
            find_best = False
        genotypes[epoch] = network.genotype(xargs.save_dir +
                                            xargs.genotype_file)
        logger.log('<<<--->>> The {:}-th epoch : {:}'.format(
            epoch_str, genotypes[epoch]))
        save_path = save_checkpoint(
            {
                'epoch': epoch + 1,
                'args': deepcopy(xargs),
                'network': network.state_dict(),
                'w_optimizer': w_optimizer.state_dict(),
                'a_optimizer': a_optimizer.state_dict(),
                'w_scheduler': w_scheduler.state_dict(),
                'genotypes': genotypes,
                'valid_accuracies': valid_accuracies
            }, model_base_path, logger)
        last_info = save_checkpoint(
            {
                'epoch': epoch + 1,
                'args': deepcopy(xargs),
                'last_checkpoint': save_path,
            }, logger.path('info'), logger)
        if find_best:
            logger.log(
                '<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.'
                .format(epoch_str, arch_top1))
            copy_checkpoint(model_base_path, model_best_path, logger)
            with torch.no_grad():
                logger.log('{:}'.format(network.show_alphas()))
    logger.log('\n' + '-' * 100)
    # check the performance from the architecture dataset
    logger.log('GDAS : run {:} epochs, use time : {:}.'.format(
        total_epoch, convert_secs2time(time.time() - start_time, True)))
    logger.log('best geno is {:}.'.format(genotypes['best']))
コード例 #5
0
def train(xargs):
    lib_dir = (Path(__file__).parent / '..' / '..' / 'lib').resolve()
    if str(lib_dir) not in sys.path:
        sys.path.insert(0, str(lib_dir))

    assert (torch.cuda.is_available(), 'CUDA is not available.')
    # start cudnn
    cudnn.enabled = True
    # make each conv is the same
    cudnn.benchmark = False
    # make sure the same seed has the same result
    cudnn.deterministic = True
    torch.set_num_threads(xargs.workers)
    prepare_seed(xargs.rand_seed)
    logger = prepare_logger(xargs)
    # get original data
    train_data, valid_data, xshape, class_num = get_dataset(
        xargs.dataset, xargs.data_path, -1)
    config = load_config(xargs.config_path, {
        'class_num': class_num,
        'xshape': xshape
    }, logger)
    search_loader, _, valid_loader = get_nas_search_loaders(
        train_data, valid_data, xargs.dataset, 'config/', config.batch_size,
        xargs.workers)
    logger.log(
        '||||||| {:10s} ||||||| Search-Loader-Num={:}, batch size={:}'.format(
            xargs.dataset, len(search_loader), config.batch_size))
    logger.log('||||||| {:10s} ||||||| Config={:}'.format(
        xargs.dataset, config))
    search_space = DARTS_SPACE
    model_config = load_config(
        xargs.model_config, {
            'num_classes': class_num,
            'space': search_space,
            'affine': False,
            'track_running_stats': bool(xargs.track_running_stats)
        }, None)
    search_model = get_cell_based_tiny_net(model_config)
    # logger.log('search-model :\n{:}'.format(search_model))
    # logger.log('model-config : {:}'.format(model_config))

    w_optimizer, w_scheduler, criterion = get_optim_scheduler(
        search_model.get_weights(), config)
    a_optimizer = torch.optim.Adam(search_model.get_alphas(),
                                   lr=xargs.arch_learning_rate,
                                   betas=(0.5, 0.999),
                                   weight_decay=xargs.arch_weight_decay)
    # logger.log('w-optimizer : {:}'.format(w_optimizer))
    # logger.log('a-optimizer : {:}'.format(a_optimizer))
    # logger.log('w-scheduler : {:}'.format(w_scheduler))
    # logger.log('criterion   : {:}'.format(criterion))
    # flop, param = get_model_infos(search_model, xshape)
    # logger.log('FLOP = {:.2f} M, Params = {:.2f} MB'.format(flop, param))
    logger.log('search-space [{:} ops] : {:}'.format(len(search_space),
                                                     search_space))
    if xargs.arch_nas_dataset is None:
        api = None
    else:
        pass
        # api = API(xargs.arch_nas_dataset)
    last_info, model_base_path, model_best_path = logger.path(
        'info'), logger.path('model'), logger.path('best')
    # network, criterion = torch.nn.DataParallel(search_model).cuda(), criterion.cuda()
    network = search_model

    if last_info.exists():
        # automatically resume from previous checkpoint
        logger.log("=> loading checkpoint of the last-info '{:}' start".format(
            last_info))
        last_info = torch.load(last_info)
        start_epoch = last_info['epoch']
        checkpoint = torch.load(last_info['last_checkpoint'])
        genotypes = checkpoint['genotypes']
        valid_accuracies = checkpoint['valid_accuracies']
        search_model.load_state_dict(checkpoint['search_model'])
        w_scheduler.load_state_dict(checkpoint['w_scheduler'])
        w_optimizer.load_state_dict(checkpoint['w_optimizer'])
        a_optimizer.load_state_dict(checkpoint['a_optimizer'])
        logger.log(
            "=> loading checkpoint of the last-info '{:}' start with {:}-th epoch."
            .format(last_info, start_epoch))
    else:
        logger.log("=> do not find the last-info file : {:}".format(last_info))
        start_epoch, valid_accuracies, genotypes = 0, {
            'best': -1
        }, {
            -1: search_model.genotype()
        }
    start_time, search_time, epoch_time, total_epoch = time.time(
    ), AverageMeter(), AverageMeter(), config.epochs + config.warmup
    for epoch in range(start_epoch, total_epoch):
        w_scheduler.update(epoch, 0.0)
        need_time = 'Time Left: {:}'.format(
            convert_secs2time(epoch_time.val * (total_epoch - epoch), True))
        epoch_str = '{:03d}-{:03d}'.format(epoch, total_epoch)
        search_model.set_tau(xargs.tau_max -
                             (xargs.tau_max - xargs.tau_min) * epoch /
                             (total_epoch - 1))
        logger.log('\n[Search the {:}-th epoch] {:}, tau={:}, LR={:}'.format(
            epoch_str, need_time, search_model.get_tau(),
            min(w_scheduler.get_lr())))
        search_w_loss, search_w_top1, search_w_top5, valid_a_loss, valid_a_top1, valid_a_top5 \
            = search_func(search_loader, network, criterion, w_scheduler, w_optimizer, a_optimizer, epoch_str,
                          xargs.print_freq, logger)
        search_time.update(time.time() - start_time)
        logger.log(
            '[{:}] searching : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%, time-cost={:.1f} s'
            .format(epoch_str, search_w_loss, search_w_top1, search_w_top5,
                    search_time.sum))
        logger.log(
            '[{:}] evaluate  : loss={:.2f}, accuracy@1={:.2f}%, accuracy@5={:.2f}%'
            .format(epoch_str, valid_a_loss, valid_a_top1, valid_a_top5))
        # check the best accuracy
        valid_accuracies[epoch] = valid_a_top1
        if valid_a_top1 > valid_accuracies['best']:
            valid_accuracies['best'] = valid_a_top1
            genotypes['best'] = search_model.genotype()
            find_best = True
        else:
            find_best = False

        genotypes[epoch] = search_model.genotype()
        logger.log('<<<--->>> The {:}-th epoch : {:}'.format(
            epoch_str, genotypes[epoch]))
        # save checkpoint
        save_path = save_checkpoint(
            {
                'epoch': epoch + 1,
                'args': deepcopy(xargs),
                'search_model': search_model.state_dict(),
                'w_optimizer': w_optimizer.state_dict(),
                'a_optimizer': a_optimizer.state_dict(),
                'w_scheduler': w_scheduler.state_dict(),
                'genotypes': genotypes,
                'valid_accuracies': valid_accuracies
            }, model_base_path, logger)
        last_info = save_checkpoint(
            {
                'epoch': epoch + 1,
                'args': deepcopy(xargs),
                'last_checkpoint': save_path,
            }, logger.path('info'), logger)
        if find_best:
            logger.log(
                '<<<--->>> The {:}-th epoch : find the highest validation accuracy : {:.2f}%.'
                .format(epoch_str, valid_a_top1))
            copy_checkpoint(model_base_path, model_best_path, logger)
        with torch.no_grad():
            logger.log('{:}'.format(search_model.show_alphas()))
        # if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[epoch])))
        # measure elapsed time
        epoch_time.update(time.time() - start_time)
        start_time = time.time()
    logger.log('\n' + '-' * 100)
    # check the performance from the architecture dataset
    logger.log(
        'GDAS : run {:} epochs, cost {:.1f} s, last-geno is {:}.'.format(
            total_epoch, search_time.sum, genotypes[total_epoch - 1]))
    # if api is not None: logger.log('{:}'.format(api.query_by_arch(genotypes[total_epoch - 1])))
    logger.close()