コード例 #1
0
#save_lrs = []
#def lr_schedule(epoch, lr, **kwargs):
#    """exponential decay"""
#    new_lr = lr[0] * 1e-5**(epoch / 200)
#    save_lrs.append(new_lr)
#    return new_lr
#callbacks = [LearningRateScheduler(lr_schedule)]
#import matplotlib.pyplot as plt
#%matplotlib inline
#plt.plot(np.arange(len(save_lrs)), np.array(save_lrs))
#plt.show()
from torchsample.callbacks import ReduceLROnPlateau
callbacks = [
    ReduceLROnPlateau(monitor='val_loss',
                      factor=0.1,
                      patience=1,
                      cooldown=0,
                      min_lr=1e-3,
                      verbose=1)
]
model = Network()
model.set_loss(F.nll_loss)
model.set_optimizer(optim.Adadelta, lr=1.0)
model.set_callbacks(callbacks)

# FIT THE MODEL
model.fit(x_train,
          y_train,
          validation_data=(x_test, y_test),
          nb_epoch=20,
          batch_size=128,
          verbose=1)
コード例 #2
0
    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

callbacks = [
    EarlyStopping(patience=10),
    ReduceLROnPlateau(factor=0.5, patience=5)
]
regularizers = [
    L1Regularizer(scale=1e-3, module_filter='conv*'),
    L2Regularizer(scale=1e-5, module_filter='fc*')
]
constraints = [UnitNorm(frequency=3, unit='batch', module_filter='fc*')]
initializers = [XavierUniform(bias=False, module_filter='fc*')]
metrics = [CategoricalAccuracy(top_k=3)]

trainer.compile(loss='nll_loss',
                optimizer='adadelta',
                regularizers=regularizers,
                constraints=constraints,
                initializers=initializers,
                metrics=metrics)
コード例 #3
0
ファイル: fish.py プロジェクト: antorsae/fish
    valid_loader = DataLoader(
        dataset,
        batch_size=args.batch_size,
        sampler=valid_sampler,
        num_workers=4,
        pin_memory=True if th.cuda.is_available() else False)

    trainer = ModuleTrainer(model)

    suffix = '-' + args.suffix if args.suffix else ''

    if args.squeeze_excitation:
        suffix = '-se' + suffix

    callbacks = [
        ReduceLROnPlateau(factor=0.5, patience=5, verbose=1),
        ModelCheckpoint(directory='checkpoints',
                        filename='fishnet' + suffix + '_{epoch}_{loss}.pth',
                        save_best_only=True,
                        verbose=1)
    ]

    initializers = [XavierUniform(bias=False, module_filter='class*')]
    metrics = [CategoricalAccuracy()]

    def species_length_loss(input, target):

        input_species = input[:, :8]
        input_length = input[:, 8]

        target_species = target[:, 0].long()
コード例 #4
0
ファイル: main.py プロジェクト: zeroows/NeuralMF-Pytorch
def train(kwargs):
    """
    Training Process

    :return:
    """
    print('[INFO] Loading Settings...')

    parser = None
    config = None

    try:
        parser = NeuralMFConfig()
        # print(kwargs)
        if '-c' in kwargs:
            args = list(enumerate(kwargs))
            for id, arg in args:
                if '-c' == arg and len(args) >= id + 2:
                    config = parser.get_args_from_json(args[id + 1][1])
                    break
                else:
                    raise AssertionError("Corresponding config arg not found")
        else:
            config = parser.parse_args(kwargs)
    except Exception as e:
        print('[Exception] Unavailable Settings, %s' % e)
        if parser:
            help(kwargs)
        if '-c' in kwargs:
            print(
                '[Exception] Please refer formatting: python main.py -c configs/neuralMF_config.json'
            )
        exit(0)

    cuda_device = -1 if not config.cuda else 0
    if config.cuda:
        torch.cuda.set_device(cuda_device)
        config.cuda_device = cuda_device

    print('[INFO] Loading Data...')
    dl = CFDataLoader(config=config, only_test=False)

    print('[INFO] Build Networks...')
    nb_users, nb_items = dl.get_num_user_and_item()
    parser.args.nb_users = nb_users
    parser.args.nb_items = nb_items
    model = implicit_load_model(config.model)(config, nb_users, nb_items)
    print(model)
    callbacks = [
        EarlyStopping(patience=10),
        ReduceLROnPlateau(factor=0.5, patience=5)
    ]
    regularizers = [L2Regularizer(scale=1e-5, module_filter='fc*')]
    constraints = [UnitNorm(frequency=3, unit='batch', module_filter='fc*')]
    initializers = []
    metrics = [HitAccuracy(config.topk), NDCGAccuracy(config.topk)]

    print('[INFO] Begin Training...')
    time_str = _time.strftime('%m%d_%H:%M:%S')

    trainer = RankingModulelTrainer(model=model)
    trainer.compile(loss="binary_cross_entropy_with_logits",
                    optimizer='Adam',
                    regularizers=regularizers,
                    constraints=constraints,
                    initializers=initializers,
                    metrics=metrics,
                    callbacks=callbacks)
    trainer.fit_loader(dl.get_train_data(),
                       dl.get_test_data(),
                       num_epoch=config.epochs,
                       verbose=1,
                       cuda_device=cuda_device)
    print('[INFO] Complete Training...')
    model.save(time_str=time_str, use_onnx=config.onnx)
    parser.save(timestamp=time_str)
    print('[INFO] Saved Model into checkpoint directory')
コード例 #5
0
def main(args):
    """Simply redirrcts to the correct function."""
    start = default_timer()

    args.cuda = not args.no_cuda and torch.cuda.is_available()

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    print("-------------------------------------------------")
    if args.verbose > 0:
        print("Ran on {}".format(time.strftime("%Y-%m-%d %H:%M")))
        print()

    print('Parameters: {}'.format(vars(args)))
    print()

    # PREPARES DATA
    if args.verbose > 1:
        print('Prepares data ...')
    train, valid, test = train_valid_test_datasets(
        args.dataset,
        validSize=args.validation_size,
        isHashingTrick=not args.dictionnary,
        nFeaturesRange=args.num_features_range,
        ngramRange=args.ngrams_range,
        seed=args.seed,
        num_words=args.num_embeding,
        specificArgs={'dictionnary': ['num_words']})

    num_classes = len(train.classes)
    train = DataLoader(dataset=train,
                       batch_size=args.batch_size,
                       shuffle=not args.no_shuffle)
    valid = DataLoader(dataset=valid,
                       batch_size=args.batch_size,
                       shuffle=not args.no_shuffle)
    test = DataLoader(dataset=test,
                      batch_size=args.batch_size,
                      shuffle=not args.no_shuffle)

    # PREPARES MODEL
    if args.verbose > 1:
        print('Prepares model ...')

    Model = ModelNoDict if args.model == 'embed-softmax' else ModelDict
    model = Model(args.num_embeding,
                  args.dim,
                  num_classes,
                  isHash=not args.no_hashembed,
                  seed=args.seed,
                  num_buckets=args.num_buckets,
                  append_weight=not args.no_append_weight,
                  aggregation_mode=args.agg_mode,
                  oldAlgorithm=args.old_hashembed)
    if args.cuda:
        model.cuda()

    if args.verbose > 1:
        model_parameters = filter(lambda p: p.requires_grad,
                                  model.parameters())
        nParams = sum([np.prod(p.size()) for p in model_parameters])
        print('Num parameters in model: {}'.format(nParams))
        print("Train on {} samples, validate on {} samples".format(
            len(train), len(valid)))

    # COMPILES
    trainer = ModuleTrainer(model)
    loss = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters())

    callbacks = []
    callbackMetric = "val_loss" if args.val_loss_callback else "val_acc_metric"
    if args.patience is not None:
        callbacks.append(
            EarlyStopping(patience=args.patience, monitor=callbackMetric))
    if args.plateau_reduce_lr is not None:
        callbacks.append(
            ReduceLROnPlateau(factor=args.plateau_reduce_lr[1],
                              patience=args.plateau_reduce_lr[0],
                              monitor=callbackMetric))
    if not args.no_checkpoint:
        modelDir = os.path.join(parentddir, 'models')
        filename = "{}.pth.tar".format(args.dataset)
        callbacks.append(
            ModelCheckpoint(modelDir,
                            filename=filename,
                            save_best_only=True,
                            max_save=1,
                            monitor=callbackMetric))

    metrics = [CategoricalAccuracy()]

    trainer.compile(loss=loss,
                    optimizer=optimizer,
                    callbacks=callbacks,
                    metrics=metrics)

    # TRAINS
    if args.verbose > 1:
        print('Trains ...')
    trainer.fit_loader(train,
                       val_loader=valid,
                       num_epoch=args.epochs,
                       verbose=args.verbose,
                       cuda_device=0 if args.cuda else -1)

    # EVALUATES
    print()
    evalTest = trainer.evaluate_loader(test,
                                       verbose=args.verbose,
                                       cuda_device=0 if args.cuda else -1)
    evalValid = trainer.evaluate_loader(valid,
                                        verbose=args.verbose,
                                        cuda_device=0 if args.cuda else -1)
    print("Last Model. Validation - Loss: {}, Accuracy: {}".format(
        evalValid['val_loss'], evalValid['val_acc_metric']))
    print("Last Model. Test - Loss: {}, Accuracy: {}".format(
        evalTest['val_loss'], evalTest['val_acc_metric']))

    if not args.no_checkpoint:
        checkpoint = torch.load(os.path.join(modelDir, filename))
        model.load_state_dict(checkpoint["state_dict"])
        evalTest = trainer.evaluate_loader(test,
                                           verbose=args.verbose,
                                           cuda_device=0 if args.cuda else -1)
        evalValid = trainer.evaluate_loader(valid,
                                            verbose=args.verbose,
                                            cuda_device=0 if args.cuda else -1)
        print("Best Model. Validation - Loss: {}, Accuracy: {}".format(
            evalValid['val_loss'], evalValid['val_acc_metric']))
        print("Best Model. Test - Loss: {}, Accuracy: {}".format(
            evalTest['val_loss'], evalTest['val_acc_metric']))

    if args.verbose > 1:
        print('Finished after {:.1f} min.'.format(
            (default_timer() - start) / 60))