Exemple #1
0
    def test_categorical_accuracy(self):
        metric = CategoricalAccuracy()
        predicted = Variable(torch.eye(10))
        expected = Variable(torch.LongTensor(list(range(10))))
        self.assertEqual(metric(predicted, expected), 100.0)

        # Set 1st column to ones
        predicted = Variable(torch.zeros(10, 10))
        predicted.data[:, 0] = torch.ones(10)
        self.assertEqual(metric(predicted, expected), 55.0)
Exemple #2
0
    def test_categorical_accuracy(self):
        metric = CategoricalAccuracy()
        predicted = torch.eye(10, requires_grad=True)
        expected = torch.tensor(list(range(10)), dtype=torch.long, requires_grad=True)
        self.assertEqual(metric(predicted, expected), 100.0)

        # Set 1st column to ones
        predicted = torch.zeros(10, 10, requires_grad=True)
        predicted.data[:, 0] = torch.ones(10)
        self.assertEqual(metric(predicted, expected), 55.0)
def run():
    device = 0 if torch.cuda.is_available() else -1
    config = BaseConfig()
    logging.info('%s_cross_entropy/ckpt.pth.tar' % config.result_dir)
    if os.path.exists('%s_cross_entropy/ckpt.pth.tar' % config.result_dir):
        return True
    logging.info("Triplet Trainer Not Return")
    create_dirs()
    tr_data_loader, val_data_loader, te_data_loader = loaders.data_loaders(
        shuffle=True)

    model = getattr(models,
                    config.network)(num_classes=len(tr_data_loader.dataset.y))

    model
    criterion = CrossEntropyLoss()

    if device == 0:
        model.cuda()
        criterion.cuda()
    trainer = ModuleTrainer(model)
    epochs = config.epochs

    callbacks = [
        EarlyStopping(monitor='val_acc', patience=20),
        ModelCheckpoint('%s_cross_entropy' % config.result_dir,
                        save_best_only=True,
                        verbose=1),
        CSVLogger("%s_cross_entropy/logger.csv" % config.result_dir)
    ]

    metrics = [CategoricalAccuracy()]

    trainer.compile(loss=criterion, optimizer='adam', metrics=metrics)
    trainer.set_callbacks(callbacks)

    trainer.fit_loader(tr_data_loader,
                       val_loader=val_data_loader,
                       num_epoch=epochs,
                       verbose=2,
                       cuda_device=device)

    tr_loss = trainer.evaluate_loader(tr_data_loader, cuda_device=device)
    logging.info(tr_loss)
    val_loss = trainer.evaluate_loader(val_data_loader, cuda_device=device)
    logging.info(val_loss)
    te_loss = trainer.evaluate_loader(te_data_loader, cuda_device=device)
    logging.info(te_loss)
    with open('%s_cross_entropy' % config.log_path, "a") as f:
        f.write('Train %s\nVal:%s\nTest:%s\n' %
                (str(tr_loss), str(val_loss), te_loss))
Exemple #4
0
def run():
    from config import get_config
    config = get_config()

    import losses
    import models
    from utils.make_dirs import create_dirs
    from datasets import loaders
    from torchsample.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger
    from torchsample.metrics import CategoricalAccuracy
    from torchsample.modules import ModuleTrainer
    create_dirs()

    model = getattr(models, config.network).get_network()(
        channel=config.network_channel, embedding_size=config.embedding)
    criterion = getattr(losses, config.loss)()
    if config.cuda:
        model.cuda()
        criterion.cuda()
    trainer = ModuleTrainer(model)

    callbacks = [
        EarlyStopping(monitor='val_loss', patience=50),
        ModelCheckpoint(config.result_dir, save_best_only=True, verbose=1),
        CSVLogger("%s/logger.csv" % config.result_dir)
    ]
    metrics = []
    if config.loader_name == 'data_loaders':
        metrics.append(CategoricalAccuracy(top_k=1))
    trainer.compile(loss=criterion, optimizer='adam', metrics=metrics)
    trainer.set_callbacks(callbacks)

    def get_n_params(model):
        pp = 0
        for p in list(model.parameters()):
            nn = 1
            for s in list(p.size()):
                nn = nn * s
            pp += nn
        return pp

    with open("models.log", mode="a") as f:
        f.write("%s\n" % str(config.__dict__))
        f.write("%s\n" % str(model))
        f.write("%s\n" % str(get_n_params(model)))
        f.write("\n")

model = Network()
trainer = ModuleTrainer(model)

callbacks = [
    EarlyStopping(patience=10),
    ReduceLROnPlateau(factor=0.5, patience=5)
]
regularizers = [
    L1Regularizer(scale=1e-3, module_filter='conv*'),
    L2Regularizer(scale=1e-5, module_filter='fc*')
]
constraints = [UnitNorm(frequency=3, unit='batch', module_filter='fc*')]
initializers = [XavierUniform(bias=False, module_filter='fc*')]
metrics = [CategoricalAccuracy(top_k=3)]

trainer.compile(loss='nll_loss',
                optimizer='adadelta',
                regularizers=regularizers,
                constraints=constraints,
                initializers=initializers,
                metrics=metrics)

#summary = trainer.summary([1,28,28])
#print(summary)

trainer.fit(x_train,
            y_train,
            val_data=(x_test, y_test),
            num_epoch=20,
Exemple #6
0
def run():
    from config import get_config
    config = get_config()

    print('%s/ckpt.pth.tar' % config.result_dir)
    if os.path.exists('%s/ckpt.pth.tar' % config.result_dir):
        return True
    print("Contrastive Trainer Not Return")
    import models
    from utils.make_dirs import create_dirs
    from datasets import loaders
    from torchsample.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger
    from torchsample.metrics import CategoricalAccuracy
    from torchsample.modules import ModuleTrainer
    create_dirs()
    cuda_device = -1
    tr_data_loader, val_data_loader, te_data_loader = loaders.online_pair_loaders()

    model = getattr(models, config.network).get_network()(channel=config.network_channel,
                                                          embedding_size=config.embedding)
    from losses.online_cosine import OnlineCosineLoss
    from datasets.data_utils import AllPositivePairSelector, HardNegativePairSelector

    margin = 0.5

    if args.selector == 'AllPositivePairSelector':
        criterion = OnlineCosineLoss(margin, AllPositivePairSelector())
    elif args.selector == 'HardNegativePairSelector':
        criterion = OnlineCosineLoss(margin, HardNegativePairSelector())

    if config.cuda:
        model.cuda()
        criterion.cuda()
    trainer = ModuleTrainer(model)
    epochs = config.epochs

    callbacks = [EarlyStopping(monitor='val_loss', patience=50),
                 ModelCheckpoint(config.result_dir, save_best_only=True, verbose=1),
                 CSVLogger("%s/logger.csv" % config.result_dir)]

    metrics = []
    if config.loader_name == 'data_loaders' and 'Angle' not in config.loss:
        metrics.append(CategoricalAccuracy(top_k=1))
    trainer.compile(loss=criterion, optimizer='adam', metrics=metrics)
    trainer.set_callbacks(callbacks)
    if config.cuda:
        cuda_device = 0
    start_time = time.time()
    trainer.fit_loader(tr_data_loader, val_loader=val_data_loader, num_epoch=epochs, verbose=2,
                       cuda_device=cuda_device)
    end_time = time.time()

    with open("%s/app.log" % config.result_dir, mode="a") as f:
        f.write("%s\n" % str(model))
        f.write("%s %s\n" % (config.loss, str(end_time - start_time)))
    tr_loss = trainer.evaluate_loader(tr_data_loader, cuda_device=cuda_device)
    print(tr_loss)
    val_loss = trainer.evaluate_loader(val_data_loader, cuda_device=cuda_device)
    te_loss = trainer.evaluate_loader(te_data_loader, cuda_device=cuda_device)
    print(te_loss)
    with open(config.log_path, "a") as f:
        f.write('Train %s\nVal:%s\nTest:%s\n' % (str(tr_loss), str(val_loss), te_loss))

    tr_data_loader, val_data_loader, te_data_loader = loaders.data_loaders(train=False, val=True)

    tr_y_pred = trainer.predict_loader(tr_data_loader, cuda_device=cuda_device)
    save_embeddings(tr_y_pred, '%s/train_embeddings.csv' % config.result_dir)
    save_labels(tr_data_loader, '%s/train_labels.csv' % config.result_dir)

    val_y_pred = trainer.predict_loader(val_data_loader, cuda_device=cuda_device)
    save_embeddings(val_y_pred, '%s/val_embeddings.csv' % config.result_dir)
    save_labels(val_data_loader, '%s/val_labels.csv' % config.result_dir)

    te_y_pred = trainer.predict_loader(te_data_loader, cuda_device=cuda_device)
    save_embeddings(te_y_pred, '%s/test_embeddings.csv' % config.result_dir)
    save_labels(te_data_loader, '%s/test_labels.csv' % config.result_dir)
Exemple #7
0
    suffix = '-' + args.suffix if args.suffix else ''

    if args.squeeze_excitation:
        suffix = '-se' + suffix

    callbacks = [
        ReduceLROnPlateau(factor=0.5, patience=5, verbose=1),
        ModelCheckpoint(directory='checkpoints',
                        filename='fishnet' + suffix + '_{epoch}_{loss}.pth',
                        save_best_only=True,
                        verbose=1)
    ]

    initializers = [XavierUniform(bias=False, module_filter='class*')]
    metrics = [CategoricalAccuracy()]

    def species_length_loss(input, target):

        input_species = input[:, :8]
        input_length = input[:, 8]

        target_species = target[:, 0].long()
        target_length = target[:, 1]

        input_length = input_length * (target_species != 7).float()

        loss = nn.MSELoss()(input_length, target_length) * 1e-5 + F.nll_loss(
            input_species, target_species)

        return loss
Exemple #8
0
def main(args):
    """Simply redirrcts to the correct function."""
    start = default_timer()

    args.cuda = not args.no_cuda and torch.cuda.is_available()

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    print("-------------------------------------------------")
    if args.verbose > 0:
        print("Ran on {}".format(time.strftime("%Y-%m-%d %H:%M")))
        print()

    print('Parameters: {}'.format(vars(args)))
    print()

    # PREPARES DATA
    if args.verbose > 1:
        print('Prepares data ...')
    train, valid, test = train_valid_test_datasets(
        args.dataset,
        validSize=args.validation_size,
        isHashingTrick=not args.dictionnary,
        nFeaturesRange=args.num_features_range,
        ngramRange=args.ngrams_range,
        seed=args.seed,
        num_words=args.num_embeding,
        specificArgs={'dictionnary': ['num_words']})

    num_classes = len(train.classes)
    train = DataLoader(dataset=train,
                       batch_size=args.batch_size,
                       shuffle=not args.no_shuffle)
    valid = DataLoader(dataset=valid,
                       batch_size=args.batch_size,
                       shuffle=not args.no_shuffle)
    test = DataLoader(dataset=test,
                      batch_size=args.batch_size,
                      shuffle=not args.no_shuffle)

    # PREPARES MODEL
    if args.verbose > 1:
        print('Prepares model ...')

    Model = ModelNoDict if args.model == 'embed-softmax' else ModelDict
    model = Model(args.num_embeding,
                  args.dim,
                  num_classes,
                  isHash=not args.no_hashembed,
                  seed=args.seed,
                  num_buckets=args.num_buckets,
                  append_weight=not args.no_append_weight,
                  aggregation_mode=args.agg_mode,
                  oldAlgorithm=args.old_hashembed)
    if args.cuda:
        model.cuda()

    if args.verbose > 1:
        model_parameters = filter(lambda p: p.requires_grad,
                                  model.parameters())
        nParams = sum([np.prod(p.size()) for p in model_parameters])
        print('Num parameters in model: {}'.format(nParams))
        print("Train on {} samples, validate on {} samples".format(
            len(train), len(valid)))

    # COMPILES
    trainer = ModuleTrainer(model)
    loss = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters())

    callbacks = []
    callbackMetric = "val_loss" if args.val_loss_callback else "val_acc_metric"
    if args.patience is not None:
        callbacks.append(
            EarlyStopping(patience=args.patience, monitor=callbackMetric))
    if args.plateau_reduce_lr is not None:
        callbacks.append(
            ReduceLROnPlateau(factor=args.plateau_reduce_lr[1],
                              patience=args.plateau_reduce_lr[0],
                              monitor=callbackMetric))
    if not args.no_checkpoint:
        modelDir = os.path.join(parentddir, 'models')
        filename = "{}.pth.tar".format(args.dataset)
        callbacks.append(
            ModelCheckpoint(modelDir,
                            filename=filename,
                            save_best_only=True,
                            max_save=1,
                            monitor=callbackMetric))

    metrics = [CategoricalAccuracy()]

    trainer.compile(loss=loss,
                    optimizer=optimizer,
                    callbacks=callbacks,
                    metrics=metrics)

    # TRAINS
    if args.verbose > 1:
        print('Trains ...')
    trainer.fit_loader(train,
                       val_loader=valid,
                       num_epoch=args.epochs,
                       verbose=args.verbose,
                       cuda_device=0 if args.cuda else -1)

    # EVALUATES
    print()
    evalTest = trainer.evaluate_loader(test,
                                       verbose=args.verbose,
                                       cuda_device=0 if args.cuda else -1)
    evalValid = trainer.evaluate_loader(valid,
                                        verbose=args.verbose,
                                        cuda_device=0 if args.cuda else -1)
    print("Last Model. Validation - Loss: {}, Accuracy: {}".format(
        evalValid['val_loss'], evalValid['val_acc_metric']))
    print("Last Model. Test - Loss: {}, Accuracy: {}".format(
        evalTest['val_loss'], evalTest['val_acc_metric']))

    if not args.no_checkpoint:
        checkpoint = torch.load(os.path.join(modelDir, filename))
        model.load_state_dict(checkpoint["state_dict"])
        evalTest = trainer.evaluate_loader(test,
                                           verbose=args.verbose,
                                           cuda_device=0 if args.cuda else -1)
        evalValid = trainer.evaluate_loader(valid,
                                            verbose=args.verbose,
                                            cuda_device=0 if args.cuda else -1)
        print("Best Model. Validation - Loss: {}, Accuracy: {}".format(
            evalValid['val_loss'], evalValid['val_acc_metric']))
        print("Best Model. Test - Loss: {}, Accuracy: {}".format(
            evalTest['val_loss'], evalTest['val_acc_metric']))

    if args.verbose > 1:
        print('Finished after {:.1f} min.'.format(
            (default_timer() - start) / 60))
def run():
    from config import get_config
    config = get_config()
    print('%s/train_embeddings.csv' % config.result_dir)
    result_dir = config.result_dir
    print('%s/train_embeddings.csv' % result_dir)
    if os.path.exists(
            '%s/train_embeddings.csv' % result_dir) and os.path.exists(
                '%s/test_embeddings.csv' % result_dir):
        return True
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    print("Saved Module Trainer Not Return")
    import losses
    import models
    from utils.make_dirs import create_dirs
    from datasets import loaders
    from torchsample.modules import ModuleTrainer
    create_dirs()
    cuda_device = -1

    model = getattr(models, config.network).get_network()(
        channel=config.network_channel, embedding_size=config.embedding)

    check_point = os.path.join(config.result_dir, "ckpt.pth.tar")
    if os.path.isfile(check_point):
        print("=> loading checkpoint '{}'".format(check_point))
        checkpoint = torch.load(check_point)
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            check_point, checkpoint['epoch']))
    else:
        print("=> no checkpoint found at '{}'".format(check_point))
    criterion = getattr(losses, config.loss)()
    if config.cuda:
        model.cuda()
        criterion.cuda()
    trainer = ModuleTrainer(model)
    metrics = []
    if config.loader_name == 'data_loaders':
        metrics.append(CategoricalAccuracy(top_k=1))
    trainer.compile(loss=criterion, optimizer='adam', metrics=metrics)

    if config.cuda:
        cuda_device = 0
    tr_data_loader, val_data_loader, te_data_loader = getattr(
        loaders, config.loader_name)(train=False, val=True)

    tr_loss = trainer.evaluate_loader(tr_data_loader, cuda_device=cuda_device)
    val_loss = trainer.evaluate_loader(val_data_loader,
                                       cuda_device=cuda_device)
    te_loss = trainer.evaluate_loader(te_data_loader, cuda_device=cuda_device)

    tr_y_pred = trainer.predict_loader(tr_data_loader, cuda_device=cuda_device)
    save_embeddings(tr_y_pred, '%s/train_embeddings.csv' % result_dir)
    save_labels(tr_data_loader, '%s/train_labels.csv' % result_dir)

    val_y_pred = trainer.predict_loader(val_data_loader,
                                        cuda_device=cuda_device)
    save_embeddings(val_y_pred, '%s/val_embeddings.csv' % result_dir)
    save_labels(val_data_loader, '%s/val_labels.csv' % result_dir)

    te_y_pred = trainer.predict_loader(te_data_loader, cuda_device=cuda_device)
    save_embeddings(te_y_pred, '%s/test_embeddings.csv' % result_dir)
    save_labels(te_data_loader, '%s/test_labels.csv' % result_dir)

    with open(config.log_path.replace("results", "best_results"), "a") as f:
        f.write('Best Train %s\nBest Val:%s\nBest Test:%s\n' %
                (str(tr_loss), str(val_loss), te_loss))