コード例 #1
0
def train(args):
    devices = utils.get_devices(args.gpu)
    if args.seed is not None:
        utils.manual_seed(args.seed)

    logging.info("Loading data...")
    dataloader = create_dataloader(args)
    vocab = dataloader.dataset.vocab
    utils.save_pkl(vocab, os.path.join(args.save_dir, "vocab.pkl"))

    logging.info("Initializing training environment...")
    mdl = prepare_model(args, dataloader)
    optimizer_cls = get_optimizer_cls(args)
    trainer = Trainer(model=utils.to_device(mdl, devices),
                      device=devices[0],
                      vocab=vocab,
                      epochs=args.epochs,
                      save_dir=args.save_dir,
                      save_period=args.save_period,
                      optimizer_cls=optimizer_cls,
                      tensor_key="tensor",
                      samples=args.samples,
                      show_progress=args.show_progress,
                      kld_annealing=args.kld_annealing,
                      dynamic_rnn=mdl.encoder.rnn.dynamic
                      or mdl.decoder.rnn.dynamic)
    report_model(trainer)

    logging.info("Commecing training...")
    trainer.train(dataloader)

    logging.info("Done!")
コード例 #2
0
    def __init__(self,
                 num_in_channels,
                 num_out_channels,
                 num_dense_modules,
                 num_classes=10):
        manual_seed()
        super().__init__()

        self.preprocessing = nn.Sequential(
            nn.Conv2d(num_in_channels,
                      num_out_channels,
                      kernel_size=3,
                      padding=1), nn.BatchNorm2d(num_out_channels, eps=0.001),
            nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=(4, 1)))
        num_in_channels = num_out_channels

        self.features = nn.Sequential(
            DenseModule(num_dense_modules, num_in_channels, num_out_channels),
            TransitionModule(
                num_in_channels + num_dense_modules * 4 * num_out_channels,
                num_out_channels))
        num_in_channels = num_out_channels

        self.classifier = nn.Sequential(
            nn.BatchNorm2d(num_in_channels), nn.ReLU(inplace=True),
            nn.AdaptiveAvgPool2d(1), Flatten(),
            nn.Linear(num_in_channels, num_classes))

        for name, module in self.named_modules():
            if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
                nn.init.xavier_normal_(module.weight)
            elif isinstance(module, nn.LSTM):
                for layer in module.all_weights:
                    for weight in layer:
                        nn.init.normal_(weight)
コード例 #3
0
ファイル: generate.py プロジェクト: kaniblu/pytorch-vrae
def generate(args):
    devices = utils.get_devices(args.gpu)
    if args.seed is not None:
        utils.manual_seed(args.seed)

    logging.info("Loading data...")
    vocab = utils.load_pkl(args.vocab)

    logging.info("Initializing generation environment...")
    model = prepare_model(args, vocab)
    model = utils.to_device(model, devices)
    generator = Generator(
        model=model,
        device=devices[0],
        batch_size=args.batch_size,
        vocab=vocab,
        bos=args.bos,
        eos=args.eos,
        unk=args.unk,
        max_len=args.max_length
    )

    logging.info("Commencing generation...")
    samples = generator.generate(args.z_samples)
    if args.nearest_neighbors is not None:
        dataset = prepare_dataset(args, vocab)
        neighbors = nearest_neighbors(args, samples, dataset)
    else:
        neighbors = None
    save(args, samples, neighbors)

    logging.info("Done!")
コード例 #4
0
ファイル: train.py プロジェクト: vzhong/gazp
def main(args):
    args.gpu = torch.cuda.is_available()
    utils.manual_seed(args.seed)
    Model = utils.load_module(args.model)
    cache_file = args.fcache or (os.path.join(
        'cache', 'data_{}_{}.debug.pt'.format(args.model, args.dataset)
        if args.debug else 'data_{}_{}.pt'.format(args.model, args.dataset)))
    splits, ext = torch.load(cache_file, map_location=torch.device('cpu'))
    splits = {k: dataset.Dataset(v) for k, v in splits.items()}
    splits['train'] = Model.prune_train(splits['train'], args)
    splits['dev'] = Model.prune_dev(splits['dev'], args)

    if args.model == 'nl2sql':
        Reranker = utils.load_module(args.beam_rank)
        ext['reranker'] = Reranker(args, ext)
    m = Model(args, ext).place_on_device()

    d = m.get_file('')
    if not os.path.isdir(d):
        os.makedirs(d)

    pprint.pprint(m.get_stats(splits, ext))

    if not args.test_only:
        if not args.skip_upperbound:
            print('upperbound')
            pprint.pprint(m.compute_upperbound(splits['train'][:1000]))
        if args.aug:
            augs = []
            for a in args.aug:
                augs.extend(torch.load(a))
            aug = dataset.Dataset(augs)
            splits['aug'] = Model.prune_train(aug, args)[:args.aug_lim]
            print('aug upperbound')
            pprint.pprint(m.compute_upperbound(aug[:10]))
            # aug_args = copy.deepcopy(args)
            # if 'consistent' not in args.aug:
            #     aug_args.epoch = 10
            # aug_dev = dataset.Dataset(random.sample(splits['train'], 3000))
            # m.run_train(aug, aug_dev, args=aug_args)
        pprint.pprint(m.get_stats(splits, ext))
        m.run_train(dataset.Dataset(splits['train'] + splits.get('aug', [])),
                    splits['dev'],
                    args=args)

    if args.resume:
        m.load_save(fname=args.resume)
    elif args.resumes:
        m.average_saves(args.resumes)
    if args.interactive_eval:
        dev_preds = m.run_interactive_pred(splits['dev'], args, verbose=True)
    else:
        dev_preds = m.run_pred(splits['dev'], args, verbose=True)

    if args.write_test_pred:
        with open(args.write_test_pred, 'wt') as f:
            json.dump(dev_preds, f, indent=2)
        print('saved test preds to {}'.format(args.write_test_pred))

    pprint.pprint(m.compute_metrics(splits['dev'], dev_preds))
コード例 #5
0
def main():
    args = build_args()
    assert args['inference_segments'] == 1
    manual_seed(args['random_seed'])
    net = get_model(args)
    dataloaders = get_dataloaders(args)

    net.load_state_dict(torch.load(args['checkpoint']))
    for phase, dataloader in dataloaders.items():
        features, labels = run_extract_features(net, dataloader, device=args['device'])
        features = torch.squeeze(features).cpu().numpy()
        labels = labels.cpu().numpy()
        df = pd.concat([pd.DataFrame(features), pd.DataFrame(labels)], axis=1)
        fpath = osp.join(args['output_path'], '{job_name}_{phase}.csv'.format(phase=phase, **args))
        if not osp.exists(osp.dirname(fpath)):
            os.makedirs(osp.dirname(fpath))
        df.to_csv(fpath, header=False, index=False)
コード例 #6
0
ファイル: generate.py プロジェクト: vzhong/gazp
def main(args):
    args.gpu = torch.cuda.is_available()
    utils.manual_seed(args.seed)

    assert args.resume

    AugModel = importlib.import_module('model.{}'.format(args.aug)).Module
    args.ftrain = os.path.abspath(args.ftrain)
    args.tables = os.path.abspath(args.tables)
    args.db = os.path.abspath(args.db)
    print(args)
    gen_m = Module.load_inst(args.resume, overwrite=dict(tables=args.tables, db=args.db, dcache=args.dcache, beam_size=args.beam_size, batch=args.batch, fparser=args.fparser))
    fout = args.fout
    if args.beam_size:
        fout = fout.replace('.json', '.beam.json')
    print('generating to {}'.format(fout))
    fout = os.path.abspath(fout)
    gen_m.run_gen_on_split(args.num_gen, AugModel, args.db_split, fout, args=args, save=not args.skip_intermediate)
コード例 #7
0
def main():
    args = parse_args()
    manual_seed(args['random_seed'])
    net, criterion, optimizer = get_model(args)
    dataloaders = get_dataloaders(args)

    if args['inference_mode'] is False:
        tb_writer = SummaryWriter(args['tb_run_name'])
        args['tb_writer'] = tb_writer
        best_net = run_epochs(net, optimizer, dataloaders, criterion, args)
    else:
        net.load_state_dict(torch.load(args['checkpoint']))
        loss, acc = run_epoch(net,
                              optimizer=optimizer,
                              dataloader=dataloaders['test'],
                              criterion=criterion,
                              phase='test',
                              device=args['device'])
        print('Test loss: {:.3f}, Test acc: {:.2%}'.format(loss, acc))
コード例 #8
0
    def __init__(self,
                 num_in_channels,
                 num_out_channels,
                 num_dense_modules,
                 num_classes=10):
        manual_seed()
        super().__init__()

        self.preprocessing = nn.Sequential(
            nn.Conv2d(num_in_channels,
                      num_out_channels,
                      kernel_size=3,
                      padding=1), nn.BatchNorm2d(num_out_channels, eps=0.001),
            nn.ReLU(inplace=True), nn.MaxPool2d(kernel_size=(4, 1)))
        num_in_channels = num_out_channels

        self.features = nn.Sequential(
            DenseModule(num_dense_modules, num_in_channels, num_out_channels),
            TransitionModule(
                num_in_channels + num_dense_modules * 4 * num_out_channels,
                num_out_channels))
        num_in_channels = num_out_channels

        self.classifier = nn.Sequential(
            nn.BatchNorm2d(num_in_channels), nn.ReLU(inplace=True),
            nn.AdaptiveAvgPool2d(1), Flatten(),
            nn.Linear(num_in_channels, num_classes))

        self.aggregate = nn.Sequential(nn.LSTM(10, 10, batch_first=True),
                                       AverageHiddenStateFromLSTM(),
                                       nn.Linear(10, 10))

        for name, module in self.named_modules():
            if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
                nn.init.xavier_normal_(module.weight)
            elif isinstance(module, nn.LSTM):
                for layer in module.all_weights:
                    for weight in layer:
                        nn.init.normal_(weight)

        self.load_state_dict(
            torch.load('checkpoints/densenet_randomized_segment_fold_2.pth'),
            strict=False)
コード例 #9
0
ファイル: main.py プロジェクト: dutlzn/MUSIC-SRC
def main():
    args = build_args()
    manual_seed(args['random_seed'])
    net, criterion, optimizer = get_model(args)
    dataloaders = get_dataloaders(args)

    if args['inference_mode'] is False:
        tb_writer = SummaryWriter(args['tb_run_name'])
        args['tb_writer'] = tb_writer
        run_epochs(net, optimizer, dataloaders, criterion, args)
    if args['checkpoint'] == '':
        args['checkpoint'] = args['checkpoint_name_format'].format(
            checkpoint_name='best_model', **args)
    net.load_state_dict(torch.load(args['checkpoint']))
    loss, acc, preds, gts = run_epoch(
        net, optimizer=optimizer, dataloader=dataloaders['test'],
        criterion=criterion, phase='test', device=args['device'],
        with_preds_and_gts=True)
    aucs = multiclass_roc_auc_score(preds, gts)
    print('{} Test loss: {:.3f}, Test acc: {:.2%}, Test AUC: {}'.format(
        args['job_name'], loss, acc, aucs))
コード例 #10
0
def predict(args):
    devices = utils.get_devices(args.gpu)
    if args.seed is not None:
        utils.manual_seed(args.seed)

    logging.info("Loading data...")
    vocab_paths = [getattr(args, f"{mode}_vocab") for mode in MODES]
    vocabs = [utils.load_pkl(v) for v in vocab_paths]
    test_dataset = prepare_dataset(args, vocabs[0])
    test_dataloader = td.DataLoader(
        dataset=test_dataset,
        batch_size=args.batch_size,
        num_workers=args.data_workers,
        collate_fn=dataset.TextSequenceBatchCollator(
            pad_idxs=[len(v) for v in vocabs]))

    logging.info("Initializing generation environment...")
    model, vocabs[0] = prepare_model(args, vocabs)
    model = utils.to_device(model, devices)
    predictor = Predictor(
        model=model,
        device=devices[0],
        batch_size=args.batch_size,
        sent_vocab=vocabs[0],
        label_vocab=vocabs[1],
        intent_vocab=vocabs[2],
        bos=args.bos,
        eos=args.eos,
        unk=args.unk,
        beam_size=args.beam_size,
    )

    logging.info("Commencing prediction...")
    with torch.no_grad():
        (labels, intents), (pl, pi) = predictor.predict(test_dataloader)
    report_stats(args, labels, intents, pl, pi)
    save(args, labels, intents, pl, pi)

    logging.info("Done!")
コード例 #11
0
ファイル: predict.py プロジェクト: kaniblu/pytorch-nlu
def generate(args):
    devices = utils.get_devices(args.gpu)
    if args.seed is not None:
        utils.manual_seed(args.seed)

    logging.info("Loading data...")
    vocab_paths = [getattr(args, f"{mode}_vocab") for mode in MODES]
    vocabs = [utils.load_pkl(v) for v in vocab_paths]
    test_dataset = prepare_dataset(args, vocabs[0])
    test_dataloader = td.DataLoader(
        dataset=test_dataset,
        batch_size=args.batch_size,
        num_workers=args.data_workers,
        collate_fn=dataset.TextSequenceBatchCollator(
            pad_idxs=[len(v) for v in vocabs]))

    logging.info("Initializing generation environment...")
    model, vocabs[0] = prepare_model(args, vocabs)
    model.beam_size = args.beam_size
    model = utils.to_device(model, devices)
    predictor = PredictorWithProgress(model=model,
                                      device=devices[0],
                                      vocabs=vocabs,
                                      progress=args.show_progress,
                                      bos=args.bos,
                                      eos=args.eos,
                                      unk=args.unk,
                                      topk=args.top_k)

    logging.info("Commencing prediction...")
    with torch.no_grad():
        (labels, pl), (intents, pi) = predictor.predict(test_dataloader)
    labels, intents = [l[0] for l in labels], [i[0] for i in intents]
    pl, pi = [p[0] for p in pl], [p[0] for p in pi]
    report_stats(args, labels, intents, pl, pi)
    save(args, labels, intents, pl, pi)

    logging.info("Done!")
コード例 #12
0
import torch.cuda as cuda
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.model_selection import StratifiedKFold
from tensorboardX import SummaryWriter
from torch.utils.data import DataLoader
from torchvision import models

from datasets.gtzan import GTZANFF_MELSPEC as GTZAN
from model import DenseInception
from utils import manual_seed
from train_utils import run_epoch

# In[2]:
manual_seed()

PRETRAIN_SEGMENTS = 10
PRETRAIN_BATCH_SIZE = 10
PRETRAIN_CHECKPOINT = 'checkpoints/gtzan_fault_filtered_cnn_pretrained.pt'
MIN_SEGMENTS = 10
SEGMENTS = 18
BATCH_SIZE = 8
CHECKPOINT = 'checkpoints/gtzan_fault_filtered_drnn.pt'
OVERLAP = 0.5
EPOCHS = 300
LR = 1e-3
FINE_TUNING_LR = 1e-4
LR_DECAY_RATE = 0.985
NUM_EARLY_STOPPING_PATIENCE = 50
コード例 #13
0
def main(cfg):
    cwd = utils.get_original_cwd()
    cfg.cwd = cwd
    cfg.pos_size = 2 * cfg.pos_limit + 2
    logger.info(f'\n{cfg.pretty()}')

    __Model__ = {
        'cnn': models.PCNN,
        'rnn': models.BiLSTM,
        'transformer': models.Transformer,
        'gcn': models.GCN,
        'capsule': models.Capsule,
        'lm': models.LM,
    }

    # device
    if cfg.use_gpu and torch.cuda.is_available():
        device = torch.device('cuda', cfg.gpu_id)
    else:
        device = torch.device('cpu')
    logger.info(f'device: {device}')

    # 如果不修改预处理的过程,这一步最好注释掉,不用每次运行都预处理数据一次
    if cfg.preprocess:
        preprocess(cfg)

    train_data_path = os.path.join(cfg.cwd, cfg.out_path, 'train.pkl')
    valid_data_path = os.path.join(cfg.cwd, cfg.out_path, 'valid.pkl')
    test_data_path = os.path.join(cfg.cwd, cfg.out_path, 'test.pkl')
    vocab_path = os.path.join(cfg.cwd, cfg.out_path, 'vocab.pkl')

    if cfg.model_name == 'lm':
        vocab_size = None
    else:
        vocab = load_pkl(vocab_path)
        vocab_size = vocab.count
    cfg.vocab_size = vocab_size

    train_dataset = CustomDataset(train_data_path)
    valid_dataset = CustomDataset(valid_data_path)
    test_dataset = CustomDataset(test_data_path)

    train_dataloader = DataLoader(train_dataset,
                                  batch_size=cfg.batch_size,
                                  shuffle=True,
                                  collate_fn=collate_fn(cfg))
    valid_dataloader = DataLoader(valid_dataset,
                                  batch_size=cfg.batch_size,
                                  shuffle=True,
                                  collate_fn=collate_fn(cfg))
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=cfg.batch_size,
                                 shuffle=True,
                                 collate_fn=collate_fn(cfg))

    model = __Model__[cfg.model_name](cfg)
    model.to(device)
    logger.info(f'\n {model}')

    optimizer = optim.Adam(model.parameters(),
                           lr=cfg.learning_rate,
                           weight_decay=cfg.weight_decay)
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                     factor=cfg.lr_factor,
                                                     patience=cfg.lr_patience)
    criterion = nn.CrossEntropyLoss()

    best_f1, best_epoch = -1, 0
    es_loss, es_f1, es_epoch, es_patience, best_es_epoch, best_es_f1, es_path, best_es_path = 1e8, -1, 0, 0, 0, -1, '', ''
    train_losses, valid_losses = [], []

    if cfg.show_plot and cfg.plot_utils == 'tensorboard':
        writer = SummaryWriter('tensorboard')
    else:
        writer = None

    logger.info('=' * 10 + ' Start training ' + '=' * 10)

    for epoch in range(1, cfg.epoch + 1):
        manual_seed(cfg.seed + epoch)
        train_loss = train(epoch, model, train_dataloader, optimizer,
                           criterion, device, writer, cfg)
        valid_f1, valid_loss = validate(epoch, model, valid_dataloader,
                                        criterion, device, cfg)
        scheduler.step(valid_loss)
        model_path = model.save(epoch, cfg)
        # logger.info(model_path)

        train_losses.append(train_loss)
        valid_losses.append(valid_loss)
        if best_f1 < valid_f1:
            best_f1 = valid_f1
            best_epoch = epoch
        # 使用 valid loss 做 early stopping 的判断标准
        if es_loss > valid_loss:
            es_loss = valid_loss
            es_f1 = valid_f1
            es_epoch = epoch
            es_patience = 0
            es_path = model_path
        else:
            es_patience += 1
            if es_patience >= cfg.early_stopping_patience:
                best_es_epoch = es_epoch
                best_es_f1 = es_f1
                best_es_path = es_path

    if cfg.show_plot:
        if cfg.plot_utils == 'matplot':
            plt.plot(train_losses, 'x-')
            plt.plot(valid_losses, '+-')
            plt.legend(['train', 'valid'])
            plt.title('train/valid comparison loss')
            plt.show()

        if cfg.plot_utils == 'tensorboard':
            for i in range(len(train_losses)):
                writer.add_scalars('train/valid_comparison_loss', {
                    'train': train_losses[i],
                    'valid': valid_losses[i]
                }, i)
            writer.close()

    logger.info(
        f'best(valid loss quota) early stopping epoch: {best_es_epoch}, '
        f'this epoch macro f1: {best_es_f1:0.4f}')
    logger.info(f'this model save path: {best_es_path}')
    logger.info(
        f'total {cfg.epoch} epochs, best(valid macro f1) epoch: {best_epoch}, '
        f'this epoch macro f1: {best_f1:.4f}')

    validate(-1, model, test_dataloader, criterion, device, cfg)
コード例 #14
0
def train(args):
    devices = utils.get_devices(args.gpu)
    if args.seed is not None:
        utils.manual_seed(args.seed)

    logging.info("Loading data...")
    dataloader = create_dataloader(args)
    vocabs = dataloader.dataset.vocabs
    if args.validate:
        val_dataloader = create_dataloader(args, vocabs, True)
    else:
        val_dataloader = None
    fnames = [f"{mode}.vocab" for mode in MODES]
    for vocab, fname in zip(vocabs, fnames):
        utils.save_pkl(vocab, os.path.join(args.save_dir, fname))

    logging.info("Initializing training environment...")
    resume_from = dict()
    if args.resume_from is not None:
        resume_from = torch.load(args.resume_from)
    mdl = prepare_model(args, vocabs, resume_from)
    mdl = utils.to_device(mdl, devices)
    optimizer_cls = get_optimizer_cls(args)
    validator = None
    if args.validate:
        validator = Validator(
            model=mdl,
            device=devices[0],
            vocabs=vocabs,
            bos=args.bos,
            eos=args.eos,
            unk=args.unk,
            alpha=args.loss_alpha,
            beta=args.loss_beta,
            progress=args.show_progress,
            batch_stats=args.log_stats
        )
    trainer = Trainer(
        model=mdl,
        model_path=args.model_path,
        alpha=args.loss_alpha,
        beta=args.loss_beta,
        device=devices[0],
        vocabs=vocabs,
        epochs=args.epochs,
        save_dir=args.save_dir,
        save_period=args.save_period,
        optimizer_cls=optimizer_cls,
        samples=args.samples,
        tensorboard=args.tensorboard,
        progress=args.show_progress,
        validator=validator,
        batch_stats=args.log_stats,
        early_stop=args.early_stop,
        early_stop_criterion=args.early_stop_criterion,
        early_stop_patience=args.early_stop_patience
    )
    trainer.load_snapshot(resume_from)
    report_model(trainer)

    logging.info("Commencing training joint-lu...")
    trainer.train(dataloader, val_dataloader)

    logging.info("Done!")
def main():
    args = parse_args()
    manual_seed(args['random_seed'])
    run_kfold(args)
コード例 #16
0
ファイル: __main__.py プロジェクト: ren98feng/pytorch-nlugen
def generate(args):
    devices = utils.get_devices(args.gpu)
    if args.seed is not None:
        utils.manual_seed(args.seed)

    logging.info("Loading data...")
    vocab_paths = [args.word_vocab, args.label_vocab, args.intent_vocab]
    vocabs = [utils.load_pkl(v) for v in vocab_paths]
    dataloader = None

    logging.info("Initializing generation environment...")
    model, vocabs[0] = prepare_model(args, vocabs)
    model = utils.to_device(model, devices)
    encoder = encode.Encoder(model=model,
                             device=devices[0],
                             batch_size=args.batch_size)
    generator = Generator(model=model,
                          device=devices[0],
                          batch_size=args.batch_size,
                          sent_vocab=vocabs[0],
                          label_vocab=vocabs[1],
                          intent_vocab=vocabs[2],
                          bos=args.bos,
                          eos=args.eos,
                          unk=args.unk,
                          max_len=args.max_length,
                          beam_size=args.beam_size,
                          beam_topk=args.beam_sample_topk,
                          validate=args.validate)

    logging.info("Commencing generation...")
    if args.generation_type in {"posterior", "uniform"}:
        if dataloader is None:
            dataloader = create_dataloader(args, vocabs)
    sampler = utils.map_val(args.generation_type, {
        "gaussian":
        lambda: None,
        "posterior":
        lambda: MultivariateGaussianMixtureSampler(
            *encoder.encode(dataloader), scale=args.posterior_sampling_scale),
        "uniform":
        lambda: UniformNoiseSampler(encoder.encode(dataloader)[0],
                                    pa=args.uniform_sampling_pa,
                                    pm=args.uniform_sampling_pm)
    },
                            name="sampler")()
    with torch.no_grad():
        gens, probs = generator.generate(args.samples, sampler)
    if args.nearest_neighbors is not None:
        if dataloader is None:
            dataloader = create_dataloader(args, vocabs)
        sents = [data["string"][0] for data in dataloader.dataset]
        searcher = neighbor.PyTorchPCASearcher(
            pca_dim=100,
            sents=sents,
            num_neighbors=args.nearest_neighbors,
            batch_size=args.nearest_neighbors_batch_size,
            device=devices[0])
        neighbors = searcher.search(gens[0])
    else:
        neighbors = None
    report_stats(args, gens[0], neighbors)
    save(args, gens, probs, neighbors)

    logging.info("Done!")