Esempio n. 1
0
    parser.add_argument('-c', '--config', help='config file')
    parser.add_argument('-r', '--resume', help='resume from checkpoints')
    args = parser.parse_args()

    print(read_text(args.config))
    cfg = get_config(args.config)
    dataset = cfg.Dataset.type
    assert dataset == 'MNIST'

    fp16 = cfg.get("fp16", False)
    if fp16:
        assert cfg.device == 'gpu'

    update_defaults(cfg.get("Global", {}))

    manual_seed(cfg.seed)

    data_home = cfg.Dataset.data_home

    train_transform = get_transform(cfg.Dataset.Train.transforms)
    val_transform = get_transform(cfg.Dataset.Val.transforms)
    test_transform = get_transform(cfg.Dataset.Test.transforms)

    num_classes = 10

    if cfg.get("hpo"):
        import nni
        RCV_CONFIG = nni.get_next_parameter()
        for k, v in RCV_CONFIG.items():
            ks = k.split(".")
            override(cfg, ks, v)
Esempio n. 2
0
import torch.nn as nn
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import CosineAnnealingLR
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import ToTensor, Normalize, Compose, Pad, Lambda

from horch.config import cfg
from horch.datasets import train_test_split, CombineDataset
from horch.nas.darts.model_search_gdas import Network
from horch.nas.darts.trainer import DARTSTrainer
from horch.train import manual_seed
from horch.train.metrics import TrainLoss, Loss
from horch.train.metrics.classification import Accuracy

manual_seed(0)

train_transform = Compose([
    Pad(2),
    ToTensor(),
    Normalize((0.1307, ), (0.3081, )),
    Lambda(lambda x: x.expand(3, -1, -1))
])

root = '/Users/hrvvi/Code/study/pytorch/datasets'
ds_all = MNIST(root=root, train=True, download=True, transform=train_transform)

ds = train_test_split(ds_all, test_ratio=0.001, random=True)[1]
ds_train, ds_val = train_test_split(ds, test_ratio=0.5, random=True)
ds = CombineDataset(ds_train, ds_val)
Esempio n. 3
0
def main():
    torch.backends.cudnn.enabled = True
    torch.backends.cudnn.benchmark = True
    torch.backends.cudnn.deterministic = False
    manual_seed(args.seed)

    train_transform = Compose([
        RandomCrop(32, padding=4),
        RandomHorizontalFlip(),
        ToTensor(),
        Normalize([0.491, 0.482, 0.447], [0.247, 0.243, 0.262]),
    ])

    ds = CIFAR10(root=args.data, train=True, download=True)

    ds_train, ds_search = train_test_split(
        ds, test_ratio=0.5, shuffle=True, random_state=args.seed,
        transform=train_transform, test_transform=train_transform)

    train_queue = DataLoader(
        ds_train, batch_size=args.batch_size, pin_memory=True, shuffle=True, num_workers=2)

    valid_queue = DataLoader(
        ds_search, batch_size=args.batch_size, pin_memory=True, shuffle=True, num_workers=2)

    set_defaults({
        'relu': {
            'inplace': False,
        },
        'bn': {
            'affine': False,
        }
    })
    model = Network(args.init_channels, args.layers, num_classes=CIFAR_CLASSES)
    criterion = nn.CrossEntropyLoss()

    optimizer_arch = Adam(
        model.arch_parameters(),
        lr=args.arch_learning_rate,
        betas=(0.5, 0.999),
        weight_decay=args.arch_weight_decay)
    optimizer_model = SGD(
        model.model_parameters(),
        args.learning_rate,
        momentum=args.momentum,
        weight_decay=args.weight_decay)

    scheduler = CosineLR(
        optimizer_model, float(args.epochs), min_lr=args.learning_rate_min)

    train_metrics = {
        "loss": TrainLoss(),
        "acc": Accuracy(),
    }

    eval_metrics = {
        "loss": Loss(criterion),
        "acc": Accuracy(),
    }

    learner = DARTSLearner(model, criterion, optimizer_arch, optimizer_model, scheduler,
                           train_metrics=train_metrics, eval_metrics=eval_metrics,
                           search_loader=valid_queue, grad_clip_norm=5.0, work_dir='models')

    for epoch in range(args.epochs):
        scheduler.step()
        lr = scheduler.get_lr()[0]
        logging.info('epoch %d lr %e', epoch, lr)

        genotype = model.genotype()
        logging.info('genotype = %s', genotype)

        print(F.softmax(model.alphas_normal, dim=-1))
        print(F.softmax(model.alphas_reduce, dim=-1))
        print(F.softmax(model.betas_normal[2:5], dim=-1))
        # training

        train_acc, train_obj = train(learner, train_queue, epoch)
        logging.info('train_acc %f', train_acc)

        utils.save(model, os.path.join(args.save, 'weights.pt'))