示例#1
0
                if batch_idx % log_interval == 0:

                    print('Eval Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f} {}'.
                          format(epoch, batch_idx * len(data),
                                 len(train_loader.dataset),
                                 100. * batch_idx / len(train_loader),
                                 loss.item() / len(data),
                                 datetime.datetime.now()))

                    _, preds = torch.max(scores_copy, dim=2)
                    preds = preds.cpu().numpy()
                    targets_copy = targets_copy.cpu().numpy()
                    for i in range(4):
                        sample = preds[i, ...]
                        target = targets_copy[i, ...]
                        print("ORIG: {}\nNEW : {}\n".format(
                            "".join([charset[chars] for chars in target]),
                            "".join([charset[chars] for chars in sample])))

        experiment.log_metric("loss", losses.avg)
    return losses.avg


for epoch in range(starting_epoch, epochs):
    decoder_sched.step()
    encoder_sched.step()
    train(epoch)
    val = test(epoch)
    torch.save(encoder.state_dict(), "encoder." + str(epoch) + ".pt")
    torch.save(decoder.state_dict(), "decoder." + str(epoch) + ".pt")
示例#2
0
def train(args):
    cfg_from_file(args.cfg)
    cfg.WORKERS = args.num_workers
    pprint.pprint(cfg)
    # set the seed manually
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    # define outputer
    outputer_train = Outputer(args.output_dir, cfg.IMAGETEXT.PRINT_EVERY,
                              cfg.IMAGETEXT.SAVE_EVERY)
    outputer_val = Outputer(args.output_dir, cfg.IMAGETEXT.PRINT_EVERY,
                            cfg.IMAGETEXT.SAVE_EVERY)
    # define the dataset
    split_dir, bshuffle = 'train', True

    # Get data loader
    imsize = cfg.TREE.BASE_SIZE * (2**(cfg.TREE.BRANCH_NUM - 1))
    train_transform = transforms.Compose([
        transforms.Scale(int(imsize * 76 / 64)),
        transforms.RandomCrop(imsize),
    ])
    val_transform = transforms.Compose([
        transforms.Scale(int(imsize * 76 / 64)),
        transforms.CenterCrop(imsize),
    ])
    if args.dataset == 'bird':
        train_dataset = ImageTextDataset(args.data_dir,
                                         split_dir,
                                         transform=train_transform,
                                         sample_type='train')
        val_dataset = ImageTextDataset(args.data_dir,
                                       'val',
                                       transform=val_transform,
                                       sample_type='val')
    elif args.dataset == 'coco':
        train_dataset = CaptionDataset(args.data_dir,
                                       split_dir,
                                       transform=train_transform,
                                       sample_type='train',
                                       coco_data_json=args.coco_data_json)
        val_dataset = CaptionDataset(args.data_dir,
                                     'val',
                                     transform=val_transform,
                                     sample_type='val',
                                     coco_data_json=args.coco_data_json)
    else:
        raise NotImplementedError

    train_dataloader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=cfg.IMAGETEXT.BATCH_SIZE,
        shuffle=bshuffle,
        num_workers=int(cfg.WORKERS))
    val_dataloader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=cfg.IMAGETEXT.BATCH_SIZE,
        shuffle=False,
        num_workers=1)
    # define the model and optimizer
    if args.raw_checkpoint != '':
        encoder, decoder = load_raw_checkpoint(args.raw_checkpoint)
    else:
        encoder = Encoder()
        decoder = DecoderWithAttention(
            attention_dim=cfg.IMAGETEXT.ATTENTION_DIM,
            embed_dim=cfg.IMAGETEXT.EMBED_DIM,
            decoder_dim=cfg.IMAGETEXT.DECODER_DIM,
            vocab_size=train_dataset.n_words)
        # load checkpoint
        if cfg.IMAGETEXT.CHECKPOINT != '':
            outputer_val.log("load model from: {}".format(
                cfg.IMAGETEXT.CHECKPOINT))
            encoder, decoder = load_checkpoint(encoder, decoder,
                                               cfg.IMAGETEXT.CHECKPOINT)

    encoder.fine_tune(False)
    # to cuda
    encoder = encoder.cuda()
    decoder = decoder.cuda()
    loss_func = torch.nn.CrossEntropyLoss()
    if args.eval:  # eval only
        outputer_val.log("only eval the model...")
        assert cfg.IMAGETEXT.CHECKPOINT != ''
        val_rtn_dict, outputer_val = validate_one_epoch(
            0, val_dataloader, encoder, decoder, loss_func, outputer_val)
        outputer_val.log("\n[valid]: {}\n".format(dict2str(val_rtn_dict)))
        return

    # define optimizer
    optimizer_encoder = torch.optim.Adam(encoder.parameters(),
                                         lr=cfg.IMAGETEXT.ENCODER_LR)
    optimizer_decoder = torch.optim.Adam(decoder.parameters(),
                                         lr=cfg.IMAGETEXT.DECODER_LR)
    encoder_lr_scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer_encoder, step_size=10, gamma=cfg.IMAGETEXT.LR_GAMMA)
    decoder_lr_scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer_decoder, step_size=10, gamma=cfg.IMAGETEXT.LR_GAMMA)
    print("train the model...")
    for epoch_idx in range(cfg.IMAGETEXT.EPOCH):
        # val_rtn_dict, outputer_val = validate_one_epoch(epoch_idx, val_dataloader, encoder,
        #         decoder, loss_func, outputer_val)
        # outputer_val.log("\n[valid] epoch: {}, {}".format(epoch_idx, dict2str(val_rtn_dict)))
        train_rtn_dict, outputer_train = train_one_epoch(
            epoch_idx, train_dataloader, encoder, decoder, optimizer_encoder,
            optimizer_decoder, loss_func, outputer_train)
        # adjust lr scheduler
        encoder_lr_scheduler.step()
        decoder_lr_scheduler.step()

        outputer_train.log("\n[train] epoch: {}, {}\n".format(
            epoch_idx, dict2str(train_rtn_dict)))
        val_rtn_dict, outputer_val = validate_one_epoch(
            epoch_idx, val_dataloader, encoder, decoder, loss_func,
            outputer_val)
        outputer_val.log("\n[valid] epoch: {}, {}\n".format(
            epoch_idx, dict2str(val_rtn_dict)))

        outputer_val.save_step({
            "encoder": encoder.state_dict(),
            "decoder": decoder.state_dict()
        })
    outputer_val.save({
        "encoder": encoder.state_dict(),
        "decoder": decoder.state_dict()
    })