Пример #1
0
    def test_save_and_load_vector_field(self):
        field = torch.randn(*dims_v_small)
        field_path = './temp/test_output/field.vtk'
        save_field_to_disk(field[0], field_path)

        field_new = load_field(field_path, dims_v_small)
        assert torch.allclose(field, field_new)
Пример #2
0
def main(args):
    device = torch.device('cuda' if args.gpu  else 'cpu')

    load_vars = torch.load(args.model)
    lm_args = load_vars['args']
    weights = load_vars['weights']

    dirname = os.path.dirname(args.model)
    TEXT = utils.load_field(os.path.join(dirname, 'text.field'))
    fields = [('src', TEXT), ('tgt', TEXT)]

    with open(args.input, 'r') as f:
        examples = [data.Example.fromlist([line], [('src', TEXT)]) for line in f]
    
    test_data = data.Dataset(examples, [('src', TEXT)])
    test_iter = data.Iterator(
        test_data,
        batch_size=args.batch_size,
        train=False, 
        shuffle=False,
        sort=False,
    )

    model = TranslationLM(TEXT, lm_args).to(device)
    model.load_state_dict(weights)
    
    model.eval()
    for samples in tqdm(test_iter, total=len(test_iter)):
        srcs = samples.src.to(device)
        outs = model.generate(srcs, args.maxlen).transpose(0, 1)
        sents = [utils.id2w(out, TEXT) for out in outs]
        print('\n'.join(sents))
Пример #3
0
def load_model(sim):

    TEXT = load_field('Random 50bi-tri calib/Random ' + sim + '/Step_1/TEXT')
    LABEL = load_field('Random 50bi-tri calib/Random ' + sim + '/Step_1/LABEL')
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # need to re-define here all these parameters here as well
    INPUT_DIM = len(TEXT.vocab)
    EMBEDDING_DIM = 50
    N_FILTERS = 50
    FILTER_SIZES = [2, 3]
    OUTPUT_DIM = len(LABEL.vocab)
    DROPOUT = 0.5
    PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]

    # load previous parameters for simulations after the first one
    model = MCNN(INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM,
                 DROPOUT, PAD_IDX)
    model.load_state_dict(
        torch.load('Random 50bi-tri calib/Random ' + sim + '/Step_1/model.pt'))

    return model, TEXT, LABEL, device
Пример #4
0
def retraining_session(datasets_path, path_TEXT, train_name, step, model_path,
                       sim, strategy):

    # initialize pytorch stuff
    torch.backends.cudnn.deterministic = True

    if strategy != 'random':
        TEXT = load_field('Random 50bi-tri calib/Random ' + sim +
                          '/Step_1/TEXT')
        LABEL = load_field('Random 50bi-tri calib/Random ' + sim +
                           '/Step_1/LABEL')
    else:
        TEXT = load_field(path_TEXT + 'TEXT')
        LABEL = load_field(path_TEXT + 'LABEL')

    INPUT_DIM = len(TEXT.vocab)
    EMBEDDING_DIM = 50
    N_FILTERS = 50
    FILTER_SIZES = [2, 3]
    OUTPUT_DIM = len(LABEL.vocab)
    DROPOUT = 0.5
    PAD_IDX = TEXT.vocab.stoi[TEXT.pad_token]

    # load previous parameters for simulations after the first one
    model = MCNN(INPUT_DIM, EMBEDDING_DIM, N_FILTERS, FILTER_SIZES, OUTPUT_DIM,
                 DROPOUT, PAD_IDX)
    model.load_state_dict(torch.load(model_path))

    train_data, test_data = data.TabularDataset.splits(path=datasets_path,
                                                       train=train_name,
                                                       test='test_final.csv',
                                                       format='csv',
                                                       fields=[('text', TEXT),
                                                               ('label', LABEL)
                                                               ],
                                                       skip_header=True)

    # split train into train and validation
    train_data, valid_data = train_data.split(split_ratio=0.7,
                                              random_state=random.seed(2))

    BATCH_SIZE = 50
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # define iterators
    train_iterator, valid_iterator, test_iterator = data.BucketIterator.splits(
        (train_data, valid_data, test_data),
        batch_size=BATCH_SIZE,
        sort_key=lambda x: len(x.text),
        sort_within_batch=False,
        device=device)

    # set optimizer and loss function
    optimizer = optim.Adam(model.parameters())
    criterion = nn.CrossEntropyLoss()
    model = model.to(device)
    criterion = criterion.to(device)

    N_EPOCHS = 5

    best_valid_loss = float('inf')

    train_history = {
        'accuracy': [],
        'loss': [],
        'recall': [],
        'precision': [],
        'f1': []
    }
    valid_history = {
        'accuracy': [],
        'loss': [],
        'recall': [],
        'precision': [],
        'f1': []
    }

    for epoch in range(N_EPOCHS):

        start_time = time.time()

        train_loss, train_acc,_,_,_ = train(model, train_iterator, optimizer, \
                                      criterion, train_history)
        valid_loss, valid_acc,_,_,_ = evaluate(model, valid_iterator, \
                                         criterion, valid_history)

        end_time = time.time()
        epoch_mins, epoch_secs = epoch_time(start_time, end_time)

        if valid_loss < best_valid_loss:
            best_valid_loss = valid_loss

    test_history = {
        'accuracy': [],
        'loss': [],
        'recall': [],
        'precision': [],
        'f1': []
    }
    test_loss, test_acc, _, _, _ = evaluate(model, test_iterator, criterion,
                                            test_history)
    print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}%')

    # model calibration
    scaled_model = model_calibration(model, valid_iterator)

    return model, scaled_model, train_history, valid_history, test_history, TEXT, LABEL, device
Пример #5
0
def main(args):
    device = torch.device('cuda' if args.gpu else 'cpu')

    if args.model:
        basedir, _ = os.path.split(args.model)
        path = os.path.join(basedir, 'text.field')
        TEXT = utils.load_field(path)
    else:
        TEXT = data.Field(lower=True, init_token='<bos>', eos_token='<eos>')

    fields = [('src', TEXT), ('tgt', TEXT)] if args.mode else [('src', TEXT)]

    # load training data
    if args.mode == 'finetune':
        slen_filter = lambda x: args.src_minlen <= len(x.src) <= args.src_maxlen \
                             and args.tgt_minlen <= len(x.tgt) <= args.tgt_maxlen

        train_data = data.TabularDataset(
            path=args.train,
            format='tsv',
            fields=fields,
            filter_pred=slen_filter,
        )
    else:  # pretrain
        train_data = datasets.LanguageModelingDataset(path=args.train,
                                                      text_field=TEXT,
                                                      newline_eos=True)

    # set Vocabulary object
    if args.model is None:
        TEXT.build_vocab(
            train_data,
            min_freq=args.min_freq,
            specials=['<sep>', '<mask>'],
        )

    if not os.path.exists(args.savedir):
        os.mkdir(args.savedir)

    utils.save_field(args.savedir, [('text', TEXT)])
    utils.save_vocab(args.savedir, [('text', TEXT)])

    # set training iterator
    if args.mode == 'finetune':
        train_iter = data.BucketIterator(
            train_data,
            batch_size=args.batch_size,
            sort_within_batch=True,
            sort_key=lambda x: len(x.src),
            repeat=False,
        )
    else:  # pre-train
        train_iter = datasets.BPTTIterator(
            train_data,
            batch_size=args.batch_size,
            bptt_len=args.bptt_len,
            train=True,
            repeat=False,
            shuffle=True,
        )

    print(f'| [text] Dictionary: {len(TEXT.vocab.itos)} types')
    print('')

    print(f' train: {args.train}')
    utils.get_stats(train_iter, fields)

    # load validation data
    if args.valid is not None:
        if args.mode == 'finetune':
            valid_data = data.TabularDataset(
                path=args.valid,
                format='tsv',
                fields=fields,
                filter_pred=slen_filter,
            )

            valid_iter = data.BucketIterator(valid_data,
                                             batch_size=args.batch_size,
                                             sort_within_batch=True,
                                             sort_key=lambda x: len(x.src),
                                             train=False,
                                             repeat=False,
                                             shuffle=False)
        else:  # pre-train
            valid_data = datasets.LanguageModelingDataset(path=args.valid,
                                                          text_field=TEXT,
                                                          newline_eos=True)

            valid_iter = datasets.BPTTIterator(
                valid_data,
                batch_size=args.batch_size,
                bptt_len=args.bptt_len,
                train=False,
                repeat=False,
                shuffle=False,
            )

        print(f'valid: {args.valid}')
        utils.get_stats(valid_iter, fields)

    # build a model
    if args.model:
        load_vars = torch.load(args.model)
        epoch = load_vars['epoch'] + 1
        best_loss = load_vars['best_loss']
        lm_args, lm_weights = load_vars['args'], load_vars['weights']
        model = TranslationLM(TEXT, lm_args)
        model.load_state_dict(lm_weights)
        model.to(device)
    else:
        epoch = 1
        best_loss = math.inf
        model = TranslationLM(TEXT, args).to(device)

    criterion = nn.CrossEntropyLoss(ignore_index=TEXT.vocab.stoi['<pad>'])

    optimizer_fn = utils.get_optimizer(args.optimizer)
    optimizer = optimizer_fn(model.parameters(), lr=args.lr)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min')

    # show the details of model and optimizer
    print('=============== MODEL ===============')
    print(model)
    print('')
    print('=============== OPTIMIZER ===============')
    print(optimizer)
    print('')

    max_epoch = (args.max_epoch or math.inf) + epoch

    while epoch < max_epoch and args.min_lr < optimizer.param_groups[0]['lr']:
        # training
        model.train()
        loss = step(epoch, args.mode, model, train_iter, criterion, optimizer,
                    device)

        # validation
        if args.valid is not None:
            model.eval()
            loss = step(epoch, args.mode, model, valid_iter, criterion,
                        optimizer, device)

        # saving model
        save_vars = {
            'epoch': epoch,
            'best_loss': loss if loss < best_loss else best_loss,
            'args': args,
            'weights': model.state_dict()
        }

        if loss < best_loss:
            best_loss = loss
            filename = os.path.join(args.savedir, 'checkpoint_best.pt')
            torch.save(save_vars, filename)
        if epoch % args.save_epoch == 0:
            filename = os.path.join(args.savedir, f'checkpoint_{epoch}.pt')
            torch.save(save_vars, filename)
        filename = os.path.join(args.savedir, 'checkpoint_last.pt')
        torch.save(save_vars, filename)

        # update
        scheduler.step(best_loss)
        epoch += 1
Пример #6
0
def main(args):
    device = torch.device('cuda' if args.gpu  else 'cpu')

    if args.re_training is None:
        TEXT = data.Field(
            lower=True, 
            init_token='<bos>', 
            eos_token='<eos>'
        )
    else: 
        basedir, _ = os.path.split(args.re_training)
        path = os.path.join(basedir, 'text.field')
        TEXT = utils.load_field(path)

    fields = [('text', TEXT)] if args.task in monolingual_tasks \
                else [('src', TEXT), ('tgt', TEXT)]

    slen_filter = lambda x: args.src_minlen <= len(x.src) <= args.src_maxlen \
                         and args.tgt_minlen <= len(x.tgt) <= args.tgt_maxlen

    # load training data
    if args.task == 'translation':
        train_data = data.TabularDataset(
                path=args.train,
                format='tsv',
                fields=fields,
                filter_pred=slen_filter,
        )
    else: # `causal`, `masked`
        train_data = datasets.LanguageModelingDataset(
            path=args.train, 
            text_field=TEXT, 
            newline_eos=True
        )

    # set Vocabulary object
    if args.re_training is None:
        TEXT.build_vocab(
            train_data, 
            min_freq=args.min_freq, 
            specials=['<sep>', '<mask>'], 
        )
        if args.embed_path:
            vectors = utils.load_vector(args.embed_path)
            TEXT.vocab.load_vectors(vectors)

    if not os.path.exists(args.savedir):
        os.mkdir(args.savedir)

    # save a field object
    with open(os.path.join(args.savedir, 'text.field'), 'wb') as fout:
        dill.dump(TEXT, fout)
    utils.save_vocab(args.savedir, TEXT)

    # set training iterator
    if args.task == 'translation':
        train_iter = data.BucketIterator(
            train_data, 
            batch_size=args.batch_size,
            sort_within_batch=True,
            sort_key= lambda x: len(x.src),
            repeat=False,
        )
    else: # `causal`, `masked`
        train_iter = data.BPTTIterator(
            train_data, 
            batch_size=args.batch_size, 
            bptt_len=args.bptt_len,
            train=True, 
            repeat=False, 
            shuffle=True,
        )

    print(f'| [text] Dictionary: {len(TEXT.vocab.itos)} types')
    print('')

    print(f'train: {args.train}')
    for name, field in fields:
        n_tokens, n_unk = utils.get_statics(train_iter, name, field)
        print(f'| [{name}] {n_tokens} tokens,', end='')
        print(f' coverage: {100*(n_tokens-n_unk)/n_tokens:.{4}}%')
    print('')

    # build a model
    model_class = get_model(args.task)

    if  args.re_training is None:
        epoch = 1
        iteration = 0
        best_loss = math.inf
        model = model_class(TEXT, args).to(device)
    else:
        load_vars = torch.load(args.re_training)
        epoch = load_vars['epoch'] + 1
        iteration = load_vars['iteration']
        best_loss = load_vars['best_loss']
        lm_args, lm_weights = load_vars['args'], load_vars['weights']
        model = model_class(TEXT, lm_args)
        model.load_state_dict(lm_weights)
        model.to(device)

    criterion = nn.CrossEntropyLoss(ignore_index=TEXT.vocab.stoi['<pad>'])
    optimizer_fn = utils.get_optimizer(args.optimizer)
    optimizer = optimizer_fn(model.parameters(), lr=args.lr)
    trainer = Trainer(model, criterion, optimizer, args.clip, iteration)

    # show the details of model and optimizer
    print('=============== MODEL ===============')
    print(model)
    print('')
    print('=============== OPTIMIZER ===============')
    print(optimizer)
    print('')

    max_epoch = args.max_epoch or math.inf
    max_update = args.max_update or math.inf
    assert not(max_epoch == math.inf and max_update == math.inf), \
        'Please set `--max-epoch` or `--max-update`.'
 
    while epoch <= max_epoch and trainer.n_updates <= max_update:
        # training
        with tqdm(train_iter, dynamic_ncols=True) as pbar:
            train_loss = 0.0
            trainer.model.train()
            for samples in pbar:
                if args.task in monolingual_tasks:
                    srcs = samples.text.to(device)
                    tgts = None
                    refs = None if args.task == 'masked' \
                            else samples.target.to(device)
                else:
                    srcs = samples.src.to(device)
                    tgts = samples.tgt.to(device)
                    refs = None
                loss = trainer.step(srcs, tgts, refs)
                train_loss += loss.item()

                # setting of progressbar
                pbar.set_description(f'epoch {str(epoch).zfill(3)}')
                progress_state = OrderedDict(
                    task=args.task,
                    loss=loss.item(),
                    ppl=math.exp(loss.item()),
                    bsz=srcs.size(1),
                    lr=trainer.get_lr(), 
                    clip=args.clip, 
                    num_updates=trainer.n_updates)
                pbar.set_postfix(progress_state)
        train_loss /= len(train_iter)

        print(f'| epoch {str(epoch).zfill(3)} | train ', end='') 
        print(f'| loss {train_loss:.{4}} ', end='')
        print(f'| ppl {math.exp(train_loss):.{4}} ', end='')
        print(f'| lr {trainer.get_lr():.1e} ', end='')
        print(f'| clip {args.clip} ', end='')
        print(f'| num_updates {trainer.n_updates} |')
        
        # saving model
        save_vars = {
            'epoch': epoch,
            'iteration': trainer.n_updates,
            'best_loss': train_loss if train_loss < best_loss else best_loss,
            'args': args, 
            'weights': model.state_dict()
        }

        if train_loss < best_loss:
            best_loss = train_loss
            filename = os.path.join(args.savedir, 'checkpoint_best.pt') 
            torch.save(save_vars, filename)
        if epoch % args.save_epoch == 0:
            filename = os.path.join(args.savedir, f'checkpoint_{epoch}.pt') 
            torch.save(save_vars, filename)
        filename = os.path.join(args.savedir, 'checkpoint_last.pt') 
        torch.save(save_vars, filename)

        # update
        epoch += 1