コード例 #1
0
ファイル: train.py プロジェクト: tttamaki/simple_cnn_training
def main():

    args = get_args()

    train_loader, val_loader, n_classes = dataset_facory(args)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    model = model_factory(args, n_classes)
    model = model.to(device)
    model = nn.DataParallel(model)

    criterion = nn.CrossEntropyLoss()
    optimizer = optimizer_factory(args, model)
    scheduler = scheduler_factory(args, optimizer)

    iters = 0

    with tqdm(range(args.num_epochs)) as pbar_epoch:

        for epoch in pbar_epoch:
            pbar_epoch.set_description('[Epoch {}]'.format(epoch))

            iters = train(model, criterion, optimizer, train_loader, device,
                          iters, epoch)

            if epoch % args.val_epochs:
                val(model, criterion, optimizer, val_loader, device, iters,
                    epoch)

            if args.use_scheduler:
                scheduler.update()
コード例 #2
0
ファイル: train.py プロジェクト: Sierrra/kaggle-bowl-2018
    def _init_model(self, config):
        logger.info('Initing model')
        model = model_factory(config.model)
        if not torch.has_cudnn:
            raise RuntimeError(
                'The model in CPU mode, the code is designed for cuda only')

        #if not isinstance(model, torch.nn.DataParallel):
        #    model = torch.nn.DataParallel(model)
        model = model.cuda()
        cudnn.benchmark = True
        self._model = model
        self.__layers_to_optimize = None
コード例 #3
0
ファイル: gitkeeper.py プロジェクト: willbradbury/gitkeeper
def main():
  if '--help' in sys.argv or '-h' in sys.argv:
    usage()
    return

  # Iterate through requested repositories
  for repo in sys.argv[1:]:
    # download it or find it locally
    rp = util.download(repo, v=verbosity)

    # build all the models
    for model in Model._registry:
      util.log(verbosity, 2, "training model " + model)
      m = model_factory(model, repo=rp, v=verbosity)
      m.train()
      util.log(verbosity, 2, "testing model " + model)
      m.test()
コード例 #4
0
def train(args):
    vocabs, datasets = load_dataset()
    inputs, model_input = inputs_factory(args, vocabs)
    model = model_factory(args, len(vocabs.labels.itos), model_input, inputs)

    # save vocabularies
    save_object(vocabs, args.save_path + 'vocabs')

    # prepare model
    model.compile(optimizer="rmsprop",
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])
    plot_model(model, to_file=args.save_path + 'images/model_structure.png')
    print(model.summary())

    tensorboard_callback = TensorBoard(log_dir=args.save_path,
                                       histogram_freq=0,
                                       write_graph=True,
                                       write_images=True)

    # get inputs based on args.inputs argument
    train, val, test = filter_inputs(args, datasets)

    history = model.fit(train,
                        np.array(datasets.train.y),
                        batch_size=args.batch_size,
                        epochs=args.max_epochs,
                        validation_data=(val, np.array(datasets.val.y)),
                        callbacks=[tensorboard_callback],
                        verbose=1)

    model.save(args.save_path + 'model_ner')

    evaluate(model, datasets.test, test, vocabs.labels, args.save_path,
             args.model_name)

    plot_train_and_save(history, args.save_path, args.model_name)
コード例 #5
0
ファイル: predict.py プロジェクト: wikty/DeepLearningDemo
    assert os.path.isfile(inputs_file), msg.format(inputs_file)

    logger = Logger.set(os.path.join(exp_cfg.experiment_dir(), 
                                     'predict.log'))

    checkpoint = Checkpoint(
        checkpoint_dir=exp_cfg.experiment_dir(),
        filename=exp_cfg.checkpoint_filename(),
        best_checkpoint=exp_cfg.best_checkpoint(),
        latest_checkpoint=exp_cfg.latest_checkpoint(),
        logger=logger)

    # load params
    word_vocab = Vocab(words_file)
    tag_vocab = Vocab(tags_file)

    params = Params(exp_cfg.params_file())
    params.update(Params(dataset_cfg.params_file()))
    params.set('cuda', torch.cuda.is_available())

    # restore model
    items = model_factory(params)
    model = items['model']
    checkpoint.restore(model, None, restore_checkpoint)
    
    # predict
    predict(model, word_vocab, tag_vocab, inputs_file, 
            outputs_file, params.unk_word, params.cuda, encoding)

    print("It's done! Please check the output file:")
    print(outputs_file)
コード例 #6
0
    checkpoint = args.checkpoint
    input_file = args.input_file
    output_file = args.output_file
    encoding = args.encoding

    msg = "Data directory not exists: {}"
    assert os.path.isdir(data_dir), msg.format(data_dir)
    msg = "Model directory not exists: {}"
    assert os.path.isdir(model_dir), msg.format(model_dir)
    msg = "Input file not exists: {}"
    assert os.path.isfile(input_file), msg.format(input_file)

    datasets_params = Params(datasets_params_file)
    word_vocab = Vocabulary(os.path.join(data_dir, words_txt))
    tag_vocab = Vocabulary(os.path.join(data_dir, tags_txt))
    unk_word = datasets_params.unk_word

    params = Params(os.path.join(model_dir, params_filename))
    params.update(datasets_params)
    params.set('cuda', torch.cuda.is_available())

    # restore model from the checkpoint
    model, *others = model_factory(params)
    Serialization(model_dir).restore(model, checkpoint=checkpoint)

    # predict
    predict(model, word_vocab, tag_vocab, unk_word, input_file, output_file,
            encoding, params.cuda)

    print("It's done! Please check the output file:")
    print(output_file)
コード例 #7
0
import matplotlib.pyplot as plt
import seaborn as sb

import torch
from torch import nn
from torch import optim
import torch.nn.functional as F
from torchvision import datasets, transforms

from PIL import Image

import model as Model

model = Model.model_factory(
    arch = args.arch,
    hidden_units = args.hidden_units,
    gpu = args.gpu,
    learningrate = args.learning_rate
    )

data_dir = args.data_directory
train_dir = data_dir + '/train'
valid_dir = data_dir + '/valid'
test_dir = data_dir + '/test'

train_transforms = transforms.Compose([transforms.RandomRotation(30),
                                       transforms.RandomResizedCrop(100),
                                       transforms.RandomHorizontalFlip(),
                                       helper.standard_transforms])


train_data = datasets.ImageFolder(train_dir, transform=train_transforms)
コード例 #8
0
ファイル: train.py プロジェクト: thomas-waite/keras-ML
from utils import generate_plot, load_data, MODEL_PATH
from model import model_factory
from keras.models import load_model


def train(data, labels, model):
    history = model.fit(data, labels, epochs=150, batch_size=10)
    generate_plot(history, 'loss')
    model.save(MODEL_PATH)


X, y, input_dim = load_data()
model = model_factory(input_dim)
train(X, y, model)
コード例 #9
0
    # load datesets
    logger.info("Loading the {} dataset...".format(dataset_name))
    datasets_params = Params(datasets_params_file)
    loader = DataLoader(data_dir, datasets_params, encoding='utf8')
    dataset = loader.load(dataset_name,
                          encoding='utf8',
                          batch_size=params.batch_size,
                          to_tensor=True,
                          to_cuda=params.cuda)
    logger.info("- done.")

    # add datasets parameters into params
    params.update(datasets_params)

    # create model, optimizer and so on.
    model, optimizer, criterion, metrics = model_factory(params)

    # restore model, optimizer
    status = Serialization(checkpoint_dir=model_dir).restore(
        model=model, checkpoint=checkpoint)
    
    if not status:
        logger.error("Restore model from the checkpoint: {}, failed".format(
            checkpoint))

    logger.info("Starting evaluate model on test dataset...")
    metrics_result = evaluate(model, dataset, criterion, metrics)
    logger.info("- done.")

    logger.info("Save metrics results...")
    metrics_file = os.path.join(model_dir,