示例#1
0
def main():
    parser = argparse.ArgumentParser(description='Proxyless-NAS augment')
    parser.add_argument('-n',
                        '--name',
                        type=str,
                        required=True,
                        help="name of the model")
    parser.add_argument('-c',
                        '--config',
                        type=str,
                        default='./config/default.yaml',
                        help="yaml config file")
    parser.add_argument('-p',
                        '--chkpt',
                        type=str,
                        default=None,
                        help="path of checkpoint pt file")
    parser.add_argument('-d',
                        '--device',
                        type=str,
                        default="all",
                        help="override device ids")
    parser.add_argument('-g',
                        '--genotype',
                        type=str,
                        default=None,
                        help="override genotype file")
    args = parser.parse_args()

    hp = HParam(args.config)

    pt_path = os.path.join('.', hp.log.chkpt_dir)
    out_dir = os.path.join(pt_path, args.name)
    os.makedirs(out_dir, exist_ok=True)

    log_dir = os.path.join('.', hp.log.log_dir)
    log_dir = os.path.join(log_dir, args.name)
    os.makedirs(log_dir, exist_ok=True)

    logger = utils.get_logger(log_dir, args.name)

    if utils.check_config(hp, args.name):
        raise Exception("Config error.")

    writer = utils.get_writer(log_dir, hp.log.writer)

    dev, dev_list = utils.init_device(hp.device, args.device)

    trn_loader = load_data(hp.augment.data, validation=False)
    val_loader = load_data(hp.augment.data, validation=True)

    gt.set_primitives(hp.genotypes)

    # load genotype
    genotype = utils.get_genotype(hp.augment, args.genotype)

    model, arch = get_model(hp.model, dev, dev_list, genotype)

    augment(out_dir, args.chkpt, trn_loader, val_loader, model, writer, logger,
            dev, hp.augment)
示例#2
0
def main():
    logger.info('=> PyTorch Version: {}'.format(torch.__version__))

    # Environment initialization
    device, pin_memory = init_device(args.seed, args.cpu, args.gpu,
                                     args.cpu_affinity)

    # Create the data loader
    train_loader, val_loader, test_loader = Cost2100DataLoader(
        root=args.data_dir,
        batch_size=args.batch_size,
        num_workers=args.workers,
        pin_memory=pin_memory,
        scenario=args.scenario)()

    # Define model
    model = init_model(args)
    model.to(device)

    # Define loss function
    criterion = nn.MSELoss().to(device)

    # Inference mode
    if args.evaluate:
        Tester(model, device, criterion)(test_loader)
        return

    # Define optimizer and scheduler
    lr_init = 1e-3 if args.scheduler == 'const' else 2e-3
    optimizer = torch.optim.Adam(model.parameters(), lr_init)
    if args.scheduler == 'const':
        scheduler = FakeLR(optimizer=optimizer)
    else:
        scheduler = WarmUpCosineAnnealingLR(optimizer=optimizer,
                                            T_max=args.epochs *
                                            len(train_loader),
                                            T_warmup=30 * len(train_loader),
                                            eta_min=5e-5)

    # Define the training pipeline
    trainer = Trainer(model=model,
                      device=device,
                      optimizer=optimizer,
                      criterion=criterion,
                      scheduler=scheduler,
                      resume=args.resume)

    # Start training
    trainer.loop(args.epochs, train_loader, val_loader, test_loader)

    # Final testing
    loss, rho, nmse = Tester(model, device, criterion)(test_loader)
    print(f"\n=! Final test loss: {loss:.3e}"
          f"\n         test rho: {rho:.3e}"
          f"\n         test NMSE: {nmse:.3e}\n")
示例#3
0
def compute_loss_by_model():
    device = init_device()
    train_data, valid_data, test_data, word_to_id, id_2_word = ptb_raw_data(data_path='data')
    vocab_size = len(word_to_id)
    loss_fn = nn.CrossEntropyLoss()
    # Models from 4_1
    models = [ModelInfo('RNN', 'ADAM', 0.0001, 20, 35, 1500, 2, 0.35),
                   ModelInfo('GRU','SGD_LR_SCHEDULE', 10, 20, 35, 1500, 2, 0.35),
                    ModelInfo('TRANSFORMER', 'ADAM', 20, 128, 35, 512, 6, 0.9)]

    losses_by_model = []
    for model_info in models:
        model = load_model(model_info, device, vocab_size)
        loss_per_step = compute_loss(model, model_info, device, valid_data, loss_fn)
        losses_by_model.append(loss_per_step)

    plot_loss(models, losses_by_model)
示例#4
0
def main():
    logger.info('=> PyTorch Version: {}'.format(torch.__version__))

    # Environment initialization
    device = init_device(args.seed, args.cpu, args.gpu, args.cpu_affinity)

    # Create the test data loader
    test_loader = Cost2100DataLoader(root=args.data_dir,
                                     batch_size=args.batch_size,
                                     num_workers=args.workers,
                                     scenario=args.scenario)()

    # Define model
    model = init_model(args)
    model.to(device)

    # Define loss function
    criterion = nn.MSELoss().to(device)

    # Inference
    Tester(model, device, criterion, print_freq=20)(test_loader)
示例#5
0
                    help='use single GPU')
args = parser.parse_args()

if args.dropoutl < 0:
    args.dropoutl = args.dropouth
if args.small_batch_size < 0:
    args.small_batch_size = args.batch_size

# Logger init and set for utils
logger = get_logger(args, filename="finetune.log")
set_utils_logger(logger)
# Set the random seed manually for reproducibility.
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# Sets the `args.device` and CUDA seed.
init_device(args)
# Save infos
save_args(args)
save_commit_id(args)
# Tensorboard
tb = TensorBoard(args.model_dir)

logger.info('finetune load path: {}/model.pt. '.format(args.model_dir))
logger.info('log save path: {}/finetune_log.txt'.format(args.model_dir))
logger.info('model save path: {}/finetune_model.pt'.format(args.model_dir))

###############################################################################
# Load data
###############################################################################

corpus = data.Corpus(args.data)
                       vocab_size=vocab_size,
                       load_on_device=False)

    hidden = torch.zeros(model.num_layers, batch_size, model.hidden_size)
    hidden = repackage_hidden(hidden)
    samples = model.generate(start, hidden, seq_len)
    generated_sentences = []

    for i, sample in enumerate(samples.to("cpu"), 1):
        sentence = " ".join([id_2_word[int(word)] for word in sample])
        generated_sentences.append(sentence)

    with open('%s_%s_generated_samples.txt' % (model_info.model, str(seq_len)),
              'w') as f:
        for sentence in generated_sentences:
            f.write("%s\n\n" % sentence)


if __name__ == "__main__":
    seq_lens = [35, 70]
    generations_per_seq_len = 10
    starting_word = "<eos>"
    models = [
        ModelInfo('RNN', 'ADAM', 0.0001, 20, 35, 1500, 2, 0.35),
        ModelInfo('GRU', 'SGD_LR_SCHEDULE', 10, 20, 35, 1500, 2, 0.35)
    ]

    device = init_device()
    for m, s in product(models, seq_lens):
        generate_sentences(m, device, s, generations_per_seq_len,
                           starting_word)