def run():
    from config import get_config
    config = get_config()
    print('%s/train_embeddings.csv' % config.result_dir)
    result_dir = config.result_dir  #.replace("results", "best_results")
    print('%s/train_embeddings.csv' % result_dir)
    if os.path.exists(
            '%s/train_embeddings.csv' % result_dir) and os.path.exists(
                '%s/test_embeddings.csv' % result_dir):
        return True
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    print("Saved Module Trainer Not Return")
    import losses
    import models
    from utils.make_dirs import create_dirs
    from datasets import loaders
    from torchsample.modules import ModuleTrainer
    create_dirs()
    cuda_device = -1

    model = getattr(models, config.network).get_network()(
        channel=config.network_channel, embedding_size=config.embedding)

    check_point = os.path.join(config.result_dir, "ckpt.pth.tar")
    if os.path.isfile(check_point):
        print("=> loading checkpoint '{}'".format(check_point))
        checkpoint = torch.load(check_point)
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            check_point, checkpoint['epoch']))
    else:
        print("=> no checkpoint found at '{}'".format(check_point))
    #criterion = getattr(losses, config.loss)()
    criterion = CrossEntropyLoss()
    if config.cuda:
        model.cuda()
        criterion.cuda()
    trainer = ModuleTrainer(model)

    trainer.compile(loss=criterion, optimizer='adam')

    if config.cuda:
        cuda_device = 0
    tr_data_loader, val_data_loader, te_data_loader = getattr(
        loaders, config.loader_name)(train=False, val=True)

    tr_y_pred = trainer.predict_loader(tr_data_loader, cuda_device=cuda_device)
    save_embeddings(tr_y_pred, '%s/train_embeddings.csv' % result_dir)
    save_labels(tr_data_loader, '%s/train_labels.csv' % result_dir)

    val_y_pred = trainer.predict_loader(val_data_loader,
                                        cuda_device=cuda_device)
    save_embeddings(val_y_pred, '%s/val_embeddings.csv' % result_dir)
    save_labels(val_data_loader, '%s/val_labels.csv' % result_dir)

    te_y_pred = trainer.predict_loader(te_data_loader, cuda_device=cuda_device)
    save_embeddings(te_y_pred, '%s/test_embeddings.csv' % result_dir)
    save_labels(te_data_loader, '%s/test_labels.csv' % result_dir)
Example #2
0
def run():
    from config import get_config
    config = get_config()

    import losses
    import models
    from utils.make_dirs import create_dirs
    from datasets import loaders
    from torchsample.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger
    from torchsample.metrics import CategoricalAccuracy
    from torchsample.modules import ModuleTrainer
    create_dirs()

    model = getattr(models, config.network).get_network()(
        channel=config.network_channel, embedding_size=config.embedding)
    criterion = getattr(losses, config.loss)()
    if config.cuda:
        model.cuda()
        criterion.cuda()
    trainer = ModuleTrainer(model)

    callbacks = [
        EarlyStopping(monitor='val_loss', patience=50),
        ModelCheckpoint(config.result_dir, save_best_only=True, verbose=1),
        CSVLogger("%s/logger.csv" % config.result_dir)
    ]
    metrics = []
    if config.loader_name == 'data_loaders':
        metrics.append(CategoricalAccuracy(top_k=1))
    trainer.compile(loss=criterion, optimizer='adam', metrics=metrics)
    trainer.set_callbacks(callbacks)

    def get_n_params(model):
        pp = 0
        for p in list(model.parameters()):
            nn = 1
            for s in list(p.size()):
                nn = nn * s
            pp += nn
        return pp

    with open("models.log", mode="a") as f:
        f.write("%s\n" % str(config.__dict__))
        f.write("%s\n" % str(model))
        f.write("%s\n" % str(get_n_params(model)))
        f.write("\n")
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        #x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

trainer.compile(loss='nll_loss',
                optimizer='adadelta',
                regularizers=[reg.L1Regularizer(1e-4)])

trainer.fit(x_train, y_train, 
            val_data=(x_test, y_test),
            num_epoch=3, 
            batch_size=128,
            verbose=1)

ypred = trainer.predict(x_train)
print(ypred.size())

eval_loss = trainer.evaluate(x_train, y_train)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

callbacks = [EarlyStopping(patience=10),
             ReduceLROnPlateau(factor=0.5, patience=5)]
regularizers = [L1Regularizer(scale=1e-3, module_filter='conv*'),
                L2Regularizer(scale=1e-5, module_filter='fc*')]
constraints = [UnitNorm(frequency=3, unit='batch', module_filter='fc*')]
initializers = [XavierUniform(bias=False, module_filter='fc*')]
metrics = [CategoricalAccuracy(top_k=3)]

trainer.compile(loss='nll_loss',
                optimizer='adadelta',
                regularizers=regularizers,
                constraints=constraints,
                initializers=initializers,
                metrics=metrics, 
Example #5
0
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        #x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

trainer.compile(loss='nll_loss',
                optimizer='adadelta',
                regularizers=[reg.L1Regularizer(1e-4)])

trainer.fit(x_train,
            y_train,
            val_data=(x_test, y_test),
            num_epoch=3,
            batch_size=128,
            verbose=1)

ypred = trainer.predict(x_train)
print(ypred.size())
Example #6
0
train_dataset = SLDetection(aimg,
                            tile=args.dim,
                            st=args.dim - 300,
                            fcount=args.fcount,
                            aug=args.aug,
                            ct=criterion)
val_dataset = SLDetection(vimg,
                          tile=args.dim,
                          st=args.dim - 300,
                          fcount=args.fcount,
                          aug=args.aug,
                          ct=criterion)
train_loader = DataLoader(train_dataset, batch_size=args.batch_size)
val_loader = DataLoader(val_dataset, batch_size=args.batch_size)

trainer = ModuleTrainer(model)

chk = ModelCheckpoint(directory="weights",
                      monitor="loss",
                      filename='trainer_' + str(args.iid) +
                      '_{epoch}_{loss}.pth.tar',
                      verbose=1)

optimizer = optim.SGD(model.parameters(),
                      lr=args.lr,
                      momentum=args.momentum,
                      weight_decay=args.weight_decay)
trainer.compile(
    loss=criterion,
    optimizer=optimizer,
    #optimizer='adadelta',
Example #7
0
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x, y, z):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x), F.log_softmax(x), F.log_softmax(x)

# with one loss function given
model = Network()
trainer = ModuleTrainer(model)

regularizers = [regs.L1Regularizer(1e-4, 'fc*'), regs.L2Regularizer(1e-5, 'conv*')]
constraints = [cons.UnitNorm(5, 'batch', 'fc*'),
               cons.MaxNorm(5, 0, 'batch', 'conv*')]
callbacks = [cbks.ReduceLROnPlateau(monitor='loss', verbose=1)]

trainer.compile(loss='nll_loss',
                optimizer='adadelta',
                regularizers=regularizers,
                constraints=constraints,
                callbacks=callbacks)

trainer.fit([x_train, x_train, x_train], 
            [y_train, y_train, y_train],
            num_epoch=3, 
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

callbacks = [EarlyStopping(patience=10),
             ReduceLROnPlateau(factor=0.5, patience=5)]
regularizers = [L1Regularizer(scale=1e-3, module_filter='conv*'),
                L2Regularizer(scale=1e-5, module_filter='fc*')]
constraints = [UnitNorm(frequency=3, unit='batch', module_filter='fc*')]
initializers = [XavierUniform(bias=False, module_filter='fc*')]
metrics = [CategoricalAccuracy(top_k=3)]

trainer.compile(loss='nll_loss',
                optimizer='adadelta',
                regularizers=regularizers,
                constraints=constraints,
                initializers=initializers,
                metrics=metrics, 
Example #9
0
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

callbacks = [
    EarlyStopping(patience=10),
    ReduceLROnPlateau(factor=0.5, patience=5)
]
regularizers = [
    L1Regularizer(scale=1e-3, module_filter='conv*'),
    L2Regularizer(scale=1e-5, module_filter='fc*')
]
constraints = [
    UnitNorm(frequency=3, unit='batch', module_filter='fc*'),
    MaxNorm(value=2., lagrangian=True, scale=1e-2, module_filter='conv*')
]
initializers = [XavierUniform(bias=False, module_filter='fc*')]
metrics = [CategoricalAccuracy(top_k=3)]
def run():
    from config import get_config
    config = get_config()
    print('%s/train_embeddings.csv' % config.result_dir)
    result_dir = config.result_dir
    print('%s/train_embeddings.csv' % result_dir)
    if os.path.exists(
            '%s/train_embeddings.csv' % result_dir) and os.path.exists(
                '%s/test_embeddings.csv' % result_dir):
        return True
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    print("Saved Module Trainer Not Return")
    import losses
    import models
    from utils.make_dirs import create_dirs
    from datasets import loaders
    from torchsample.modules import ModuleTrainer
    create_dirs()
    cuda_device = -1

    model = getattr(models, config.network).get_network()(
        channel=config.network_channel, embedding_size=config.embedding)

    check_point = os.path.join(config.result_dir, "ckpt.pth.tar")
    if os.path.isfile(check_point):
        print("=> loading checkpoint '{}'".format(check_point))
        checkpoint = torch.load(check_point)
        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            check_point, checkpoint['epoch']))
    else:
        print("=> no checkpoint found at '{}'".format(check_point))
    criterion = getattr(losses, config.loss)()
    if config.cuda:
        model.cuda()
        criterion.cuda()
    trainer = ModuleTrainer(model)
    metrics = []
    if config.loader_name == 'data_loaders':
        metrics.append(CategoricalAccuracy(top_k=1))
    trainer.compile(loss=criterion, optimizer='adam', metrics=metrics)

    if config.cuda:
        cuda_device = 0
    tr_data_loader, val_data_loader, te_data_loader = getattr(
        loaders, config.loader_name)(train=False, val=True)

    tr_loss = trainer.evaluate_loader(tr_data_loader, cuda_device=cuda_device)
    val_loss = trainer.evaluate_loader(val_data_loader,
                                       cuda_device=cuda_device)
    te_loss = trainer.evaluate_loader(te_data_loader, cuda_device=cuda_device)

    tr_y_pred = trainer.predict_loader(tr_data_loader, cuda_device=cuda_device)
    save_embeddings(tr_y_pred, '%s/train_embeddings.csv' % result_dir)
    save_labels(tr_data_loader, '%s/train_labels.csv' % result_dir)

    val_y_pred = trainer.predict_loader(val_data_loader,
                                        cuda_device=cuda_device)
    save_embeddings(val_y_pred, '%s/val_embeddings.csv' % result_dir)
    save_labels(val_data_loader, '%s/val_labels.csv' % result_dir)

    te_y_pred = trainer.predict_loader(te_data_loader, cuda_device=cuda_device)
    save_embeddings(te_y_pred, '%s/test_embeddings.csv' % result_dir)
    save_labels(te_data_loader, '%s/test_labels.csv' % result_dir)

    with open(config.log_path.replace("results", "best_results"), "a") as f:
        f.write('Best Train %s\nBest Val:%s\nBest Test:%s\n' %
                (str(tr_loss), str(val_loss), te_loss))
Example #11
0
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)


callbacks = [EarlyStopping(patience=10),
             ReduceLROnPlateau(factor=0.5, patience=5)]
regularizers = [L1Regularizer(scale=1e-3, module_filter='conv*'),
                L2Regularizer(scale=1e-5, module_filter='fc*')]
constraints = [UnitNorm(frequency=3, unit='batch', module_filter='fc*')]
initializers = [XavierUniform(bias=False, module_filter='fc*')]
metrics = [CategoricalAccuracy(top_k=3)]

trainer.compile(loss='nll_loss',
                optimizer='adadelta',
                regularizers=regularizers,
                constraints=constraints,
                initializers=initializers,
def main(retrain=False, test_datasets={}, vis_per_img=10):
    res_dir = os.path.join(out_dir, 'snapshots')
    if not os.path.exists(res_dir):
        os.makedirs(res_dir)

    # shutil.copyfile(os.path.join('configs', '%s.py' % exp_name), os.path.join(out_dir, 'config.py'))
    with open(os.path.join(out_dir, 'config.py'), 'w') as f:
        params_names = config.__dir__()
        for param_name in params_names:
            if param_name.startswith('__'):
                continue
            param_value = getattr(config, param_name)
            if isinstance(param_value, str):
                f.write('%s = "%s"\n' % (param_name, param_value))
            else:
                f.write('%s = %s\n' % (param_name, param_value))

    dataset = datasets.PixelLinkIC15Dataset(
        config.train_images_dir,
        config.train_labels_dir,
        all_trains=config.all_trains,
        version=config.version,
        mean=config.mean,
        use_rotate=config.use_rotate,
        use_crop=config.use_crop,
        image_size_train=config.image_size_train,
        image_size_test=config.image_size_test)
    # sampler = WeightedRandomSampler([1/len(dataset)]*len(dataset), config.batch_size, replacement=True)
    # dataloader = DataLoader(dataset, batch_size=config.batch_size, sampler=sampler)
    dataloader = DataLoader(dataset,
                            config.batch_size,
                            shuffle=True,
                            num_workers=6)
    model = net.PixelLinkNet(
        **config.net_params)  #net.Net(config.version, config.dilation)

    if config.gpu:
        device = torch.device("cuda:0")
        model = model.cuda()
        if config.multi_gpu:
            model = nn.DataParallel(model)
    else:
        device = torch.device("cpu")

    loss = PixelLinkLoss(config.pixel_weight, config.link_weight,
                         config.neg_pos_ratio)
    optimizer = optim.SGD(model.parameters(),
                          lr=config.learning_rate1,
                          momentum=config.momentum,
                          weight_decay=config.weight_decay)
    epoch_milestone = math.ceil(config.step2_start / len(dataloader))
    print('LR schedule')
    print('[%05d - %05d] : %E' % (0, epoch_milestone, config.learning_rate1))
    print('[%05d - %05d] : %E' %
          (epoch_milestone, config.epoch, config.learning_rate2))
    scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, [epoch_milestone],
        config.learning_rate2 / config.learning_rate1)

    global trainer, callbacks_cont
    tqdm = callbacks.TQDM()
    log_path = os.path.join(out_dir, 'log_train.csv')
    index = 0
    while os.path.exists(log_path):
        index += 1
        log_path = os.path.join(out_dir, 'log_train_%02d.csv' % index)

    logger = callbacks.CSVLogger(log_path)
    trainer = ModuleTrainer(model)
    trainer.compile(optimizer, loss, callbacks=[tqdm, logger])
    callbacks_cont = callbacks.CallbackContainer(trainer._callbacks)
    callbacks_cont.set_trainer(trainer)

    if retrain:
        # find latest snapshot
        snapshots_dir = os.path.join(out_dir, 'snapshots')
        model_files = glob.glob(snapshots_dir + '/epoch_*')
        if model_files:
            resume_path = sorted(model_files)[-1]
            start_epoch = 1 + int(
                os.path.basename(resume_path)[len('epoch_'):-4])
            print('Loading snapshot from : %s' % resume_path)
            checkpoint = torch.load(resume_path)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
        else:
            # couldnt find snapshots
            start_epoch = 0
    else:
        start_epoch = 0

    train(config.epoch,
          dataloader,
          model,
          loss,
          optimizer,
          scheduler,
          device,
          start_epoch=start_epoch,
          test_datasets=test_datasets,
          vis_per_img=vis_per_img)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x, y, z):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x), F.log_softmax(x), F.log_softmax(x)

# with one loss function given
model = Network()
trainer = ModuleTrainer(model)

trainer.compile(loss='nll_loss',
                optimizer='adadelta')

trainer.fit([x_train, x_train, x_train], 
            [y_train, y_train, y_train],
            num_epoch=3, 
            batch_size=128,
            verbose=1)

yp1, yp2, yp3 = trainer.predict([x_train, x_train, x_train])
print(yp1.size(), yp2.size(), yp3.size())

eval_loss = trainer.evaluate([x_train, x_train, x_train],
                             [y_train, y_train, y_train])
Example #14
0
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

callbacks = [
    EarlyStopping(patience=20),
    ReduceLROnPlateau(factor=0.5, patience=5),
    CSVLogger('logs.csv')
]
regularizers = [
    L1Regularizer(scale=1e-3, module_filter='conv*'),
    L2Regularizer(scale=1e-5, module_filter='fc*')
]
constraints = [UnitNorm(frequency=3, unit='batch', module_filter='fc*')]
initializers = [XavierUniform(bias=False, module_filter='fc*')]
metrics = [CategoricalAccuracy(top_k=1)]

trainer.compile(loss='nll_loss',
def run():
    device = 0 if torch.cuda.is_available() else -1
    config = BaseConfig()
    logging.info('%s_cross_entropy/ckpt.pth.tar' % config.result_dir)
    if os.path.exists('%s_cross_entropy/ckpt.pth.tar' % config.result_dir):
        return True
    logging.info("Triplet Trainer Not Return")
    create_dirs()
    tr_data_loader, val_data_loader, te_data_loader = loaders.data_loaders(
        shuffle=True)

    model = getattr(models,
                    config.network)(num_classes=len(tr_data_loader.dataset.y))

    model
    criterion = CrossEntropyLoss()

    if device == 0:
        model.cuda()
        criterion.cuda()
    trainer = ModuleTrainer(model)
    epochs = config.epochs

    callbacks = [
        EarlyStopping(monitor='val_acc', patience=20),
        ModelCheckpoint('%s_cross_entropy' % config.result_dir,
                        save_best_only=True,
                        verbose=1),
        CSVLogger("%s_cross_entropy/logger.csv" % config.result_dir)
    ]

    metrics = [CategoricalAccuracy()]

    trainer.compile(loss=criterion, optimizer='adam', metrics=metrics)
    trainer.set_callbacks(callbacks)

    trainer.fit_loader(tr_data_loader,
                       val_loader=val_data_loader,
                       num_epoch=epochs,
                       verbose=2,
                       cuda_device=device)

    tr_loss = trainer.evaluate_loader(tr_data_loader, cuda_device=device)
    logging.info(tr_loss)
    val_loss = trainer.evaluate_loader(val_data_loader, cuda_device=device)
    logging.info(val_loss)
    te_loss = trainer.evaluate_loader(te_data_loader, cuda_device=device)
    logging.info(te_loss)
    with open('%s_cross_entropy' % config.log_path, "a") as f:
        f.write('Train %s\nVal:%s\nTest:%s\n' %
                (str(tr_loss), str(val_loss), te_loss))
Example #16
0

# In[46]:

train_loader = data.DataLoader(TGSSaltDataset(train_images, train_masks),
                               batch_size=25,
                               shuffle=True)
val_loader = data.DataLoader(TGSSaltDataset(validate_images, validate_masks),
                             batch_size=50,
                             shuffle=False)

learning_rate = 1e-4
loss_fn = torch.nn.BCELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

trainer = ModuleTrainer(model)

callbacks = [
    EarlyStopping(patience=30),
    ReduceLROnPlateau(factor=0.5, patience=10)
]
regularizers = [
    L1Regularizer(scale=1e-3, module_filter='*'),
    L2Regularizer(scale=1e-5, module_filter='*')
]
constraints = [UnitNorm(frequency=3, unit='batch', module_filter='*')]
initializers = [XavierUniform(bias=False, module_filter='*')]
metrics = [MyIouMetric()]

trainer.compile(
    loss=loss_fn,
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

callbacks = [
    EarlyStopping(patience=10),
    ReduceLROnPlateau(factor=0.5, patience=5)
]
regularizers = [
    L1Regularizer(scale=1e-3, module_filter='conv*'),
    L2Regularizer(scale=1e-5, module_filter='fc*')
]
constraints = [UnitNorm(frequency=3, unit='batch', module_filter='fc*')]
initializers = [XavierUniform(bias=False, module_filter='fc*')]
metrics = [CategoricalAccuracy(top_k=3)]

trainer.compile(loss='nll_loss',
                optimizer='adadelta',
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x), F.log_softmax(x)


# one loss function for multiple targets
model = Network()
trainer = ModuleTrainer(model)
trainer.compile(loss='nll_loss',
                optimizer='adadelta')

trainer.fit_loader(train_loader,
                    num_epoch=3, 
                    verbose=1)
ypred1, ypred2 = trainer.predict(x_train)
print(ypred1.size(), ypred2.size())

eval_loss = trainer.evaluate(x_train, [y_train, y_train])
print(eval_loss)
# multiple loss functions
model = Network()
trainer = ModuleTrainer(model)
trainer.compile(loss=['nll_loss', 'nll_loss'],
Example #19
0
        transform=transforms.Compose([
            transforms.ToPILImage(),
            transforms.CenterCrop(PATCH_SIZE),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.55803171, 0.54565148, 0.57604222],
                                 std=[0.30346842, 0.3020077, 0.30457914]),
        ]))

    test_loader = DataLoader(
        test_dataset,
        #sampler = SubsetSampler(100),
        batch_size=64,
        num_workers=1,
        pin_memory=True if th.cuda.is_available() else False)

    test_trainer = ModuleTrainer(model)

    predictions = test_trainer.predict_loader(
        test_loader,
        verbose=1,
        cuda_device=0 if th.cuda.is_available() else -1)

    species = predictions[0].data[:, :8].topk(8)

    species_softmax = predictions[0].data[:, :8].exp()
    ll = len(predictions[0])
    print(ll)

    video_filename__base_frame = test_dataset.video_index_frame.ix[range(ll), [
        test_dataset.video_index_frame.columns.get_loc('video_id'),
        test_dataset.video_index_frame.columns.get_loc('base_frame')
Example #20
0
X_test = test.as_matrix()

#Normalization
X_train = X_train / 255.0
X_test = X_test / 255.0

#delete the pandas dataframes
del data, test

#Create network, criterion and optimizer
net = CNN.Net()
#criterion = torch.nn.CrossEntropyLoss()
#lr = 0.001
#optimizer = optim.Adam(net.parameters(),lr=lr)
nr_epochs = 20
trainer = ModuleTrainer(net)
metrics = [torchsample.metrics.CategoricalAccuracy(top_k=3)]
trainer.compile(optimizer=optim.Adam(net.parameters(),lr=0.001),loss=torch.nn.CrossEntropyLoss(),metrics=metrics)

batch_size = 64
X_t, X_ts, y_t, y_ts = train_test_split(X_train, y_train, test_size=0.15, random_state=42)

tf = tstf.Compose([tstf.ToPILImage(), tstf.RandomRotation(10), tstf.ToTensor()])

trainData = CNN.prepData(X_t, y_t, input_transform=tf)
trainDataL = DataLoader(trainData, batch_size=batch_size)

testData = CNN.prepData(X_ts, y_ts)
testDataL = DataLoader(testData, batch_size=batch_size)

Example #21
0
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x), F.log_softmax(x)


# one loss function for multiple targets
model = Network()
trainer = ModuleTrainer(model)
trainer.compile(loss='nll_loss', optimizer='adadelta')

trainer.fit(x_train, [y_train, y_train],
            num_epoch=3,
            batch_size=128,
            verbose=1)
ypred1, ypred2 = trainer.predict(x_train)
print(ypred1.size(), ypred2.size())

eval_loss = trainer.evaluate(x_train, [y_train, y_train])
print(eval_loss)
# multiple loss functions
model = Network()
trainer = ModuleTrainer(model)
trainer.compile(loss=['nll_loss', 'nll_loss'], optimizer='adadelta')
Example #22
0
def main(args):
    """Simply redirrcts to the correct function."""
    start = default_timer()

    args.cuda = not args.no_cuda and torch.cuda.is_available()

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    print("-------------------------------------------------")
    if args.verbose > 0:
        print("Ran on {}".format(time.strftime("%Y-%m-%d %H:%M")))
        print()

    print('Parameters: {}'.format(vars(args)))
    print()

    # PREPARES DATA
    if args.verbose > 1:
        print('Prepares data ...')
    train, valid, test = train_valid_test_datasets(
        args.dataset,
        validSize=args.validation_size,
        isHashingTrick=not args.dictionnary,
        nFeaturesRange=args.num_features_range,
        ngramRange=args.ngrams_range,
        seed=args.seed,
        num_words=args.num_embeding,
        specificArgs={'dictionnary': ['num_words']})

    num_classes = len(train.classes)
    train = DataLoader(dataset=train,
                       batch_size=args.batch_size,
                       shuffle=not args.no_shuffle)
    valid = DataLoader(dataset=valid,
                       batch_size=args.batch_size,
                       shuffle=not args.no_shuffle)
    test = DataLoader(dataset=test,
                      batch_size=args.batch_size,
                      shuffle=not args.no_shuffle)

    # PREPARES MODEL
    if args.verbose > 1:
        print('Prepares model ...')

    Model = ModelNoDict if args.model == 'embed-softmax' else ModelDict
    model = Model(args.num_embeding,
                  args.dim,
                  num_classes,
                  isHash=not args.no_hashembed,
                  seed=args.seed,
                  num_buckets=args.num_buckets,
                  append_weight=not args.no_append_weight,
                  aggregation_mode=args.agg_mode,
                  oldAlgorithm=args.old_hashembed)
    if args.cuda:
        model.cuda()

    if args.verbose > 1:
        model_parameters = filter(lambda p: p.requires_grad,
                                  model.parameters())
        nParams = sum([np.prod(p.size()) for p in model_parameters])
        print('Num parameters in model: {}'.format(nParams))
        print("Train on {} samples, validate on {} samples".format(
            len(train), len(valid)))

    # COMPILES
    trainer = ModuleTrainer(model)
    loss = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters())

    callbacks = []
    callbackMetric = "val_loss" if args.val_loss_callback else "val_acc_metric"
    if args.patience is not None:
        callbacks.append(
            EarlyStopping(patience=args.patience, monitor=callbackMetric))
    if args.plateau_reduce_lr is not None:
        callbacks.append(
            ReduceLROnPlateau(factor=args.plateau_reduce_lr[1],
                              patience=args.plateau_reduce_lr[0],
                              monitor=callbackMetric))
    if not args.no_checkpoint:
        modelDir = os.path.join(parentddir, 'models')
        filename = "{}.pth.tar".format(args.dataset)
        callbacks.append(
            ModelCheckpoint(modelDir,
                            filename=filename,
                            save_best_only=True,
                            max_save=1,
                            monitor=callbackMetric))

    metrics = [CategoricalAccuracy()]

    trainer.compile(loss=loss,
                    optimizer=optimizer,
                    callbacks=callbacks,
                    metrics=metrics)

    # TRAINS
    if args.verbose > 1:
        print('Trains ...')
    trainer.fit_loader(train,
                       val_loader=valid,
                       num_epoch=args.epochs,
                       verbose=args.verbose,
                       cuda_device=0 if args.cuda else -1)

    # EVALUATES
    print()
    evalTest = trainer.evaluate_loader(test,
                                       verbose=args.verbose,
                                       cuda_device=0 if args.cuda else -1)
    evalValid = trainer.evaluate_loader(valid,
                                        verbose=args.verbose,
                                        cuda_device=0 if args.cuda else -1)
    print("Last Model. Validation - Loss: {}, Accuracy: {}".format(
        evalValid['val_loss'], evalValid['val_acc_metric']))
    print("Last Model. Test - Loss: {}, Accuracy: {}".format(
        evalTest['val_loss'], evalTest['val_acc_metric']))

    if not args.no_checkpoint:
        checkpoint = torch.load(os.path.join(modelDir, filename))
        model.load_state_dict(checkpoint["state_dict"])
        evalTest = trainer.evaluate_loader(test,
                                           verbose=args.verbose,
                                           cuda_device=0 if args.cuda else -1)
        evalValid = trainer.evaluate_loader(valid,
                                            verbose=args.verbose,
                                            cuda_device=0 if args.cuda else -1)
        print("Best Model. Validation - Loss: {}, Accuracy: {}".format(
            evalValid['val_loss'], evalValid['val_acc_metric']))
        print("Best Model. Test - Loss: {}, Accuracy: {}".format(
            evalTest['val_loss'], evalTest['val_acc_metric']))

    if args.verbose > 1:
        print('Finished after {:.1f} min.'.format(
            (default_timer() - start) / 60))
def run():
    from config import get_config
    config = get_config()
    print("Timer")
    import losses
    import models
    from utils.make_dirs import create_dirs
    from datasets import loaders
    from torchsample.modules import ModuleTrainer
    create_dirs()
    cuda_device = -1
    tr_data_loader, val_data_loader, te_data_loader = getattr(
        loaders, config.loader_name)(train=True)

    model = getattr(models, config.network).get_network()(
        channel=config.network_channel, embedding_size=config.embedding)
    criterion = getattr(losses, config.loss)()
    if config.cuda:
        model.cuda()
        criterion.cuda()
    trainer = ModuleTrainer(model)

    trainer.compile(loss=criterion, optimizer='adam')
    if config.cuda:
        cuda_device = 0
    trainer.evaluate_loader(tr_data_loader, verbose=2, cuda_device=cuda_device)
    trainer.evaluate_loader(val_data_loader,
                            verbose=2,
                            cuda_device=cuda_device)
    trainer.evaluate_loader(te_data_loader, verbose=2, cuda_device=cuda_device)

    start_time = time.time()
    trainer.fit_loader(tr_data_loader,
                       val_loader=val_data_loader,
                       num_epoch=1,
                       verbose=2,
                       cuda_device=cuda_device)
    end_time = time.time()
    with open("./times.log", mode="a") as f:
        f.write("%s %s\n" % (config.result_dir, str(end_time - start_time)))
        self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        #x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

trainer.compile(loss='nll_loss', optimizer='adadelta')

trainer.fit_loader(train_loader, num_epoch=3, verbose=1)

ypred = trainer.predict(x_train)
print(ypred.size())

eval_loss = trainer.evaluate(x_train, y_train)
print(eval_loss)

print(trainer.history)
#print(trainer.history['loss'])
Example #25
0
        self.fc1 = nn.Linear(16 * 5 * 5, 120)
        self.fc2 = nn.Linear(120, 84)
        self.fc3 = nn.Linear(84, 10)

    def forward(self, x):
        x = self.pool(F.relu(self.conv1(x)))
        x = self.pool(F.relu(self.conv2(x)))
        x = x.view(-1, 16 * 5 * 5)
        x = F.relu(self.fc1(x))
        x = F.relu(self.fc2(x))
        x = self.fc3(x)
        return x



net = Keras_Cifar() 
trainer = ModuleTrainer(net) 

from torchsample.callbacks import *

callbacks = [EarlyStopping(patience=10),                  
             ReduceLROnPlateau(factor=0.5, patience=5), 
             CSVLogger('culo.log', ",", False)]

trainer.compile(loss='cross_entropy',
                         callbacks = callbacks,
                         optimizer='adam',
                         metrics=metrics)
 

Example #26
0
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x, y, z):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

trainer.compile(loss='nll_loss', optimizer='adadelta')

trainer.fit([x_train, x_train, x_train],
            y_train,
            val_data=([x_test, x_test, x_test], y_test),
            num_epoch=3,
            batch_size=128,
            verbose=1)

ypred = trainer.predict([x_train, x_train, x_train])
print(ypred.size())

eval_loss = trainer.evaluate([x_train, x_train, x_train], y_train)
print(eval_loss)
def train_net(train, val, model, name):
    transformations_train = transforms.apply_chain([
        transforms.random_fliplr(),
        transforms.random_flipud(),
        transforms.augment(),
        torchvision.transforms.ToTensor()
    ])

    transformations_val = transforms.apply_chain([
        torchvision.transforms.ToTensor(),
    ])

    dset_train = KaggleAmazonJPGDataset(train, paths.train_jpg, transformations_train, divide=False)
    train_loader = DataLoader(dset_train,
                              batch_size=64,
                              shuffle=True,
                              num_workers=10,
                              pin_memory=True)

    dset_val = KaggleAmazonJPGDataset(val, paths.train_jpg, transformations_val, divide=False)
    val_loader = DataLoader(dset_val,
                            batch_size=64,
                            num_workers=10,
                            pin_memory=True)

    ignored_params = list(map(id, chain(
        model.classifier.parameters(),
        model.layer1.parameters(),
        model.layer2.parameters(),
        model.layer3.parameters(),
        model.layer4.parameters()
    )))
    base_params = filter(lambda p: id(p) not in ignored_params,
                         model.parameters())

    optimizer = optim.Adam([
        {'params': base_params},
        {'params': model.layer1.parameters()},
        {'params': model.layer2.parameters()},
        {'params': model.layer3.parameters()},
        {'params': model.layer4.parameters()},
        {'params': model.classifier.parameters()}
    ], lr=0, weight_decay=0.0005)

    trainer = ModuleTrainer(model)

    def schedule(current_epoch, current_lrs, **logs):
        lrs = [1e-3, 1e-4, 1e-5]
        epochs = [0, 2, 10]

        for lr, epoch in zip(lrs, epochs):
            if current_epoch >= epoch:
                current_lrs[5] = lr
                if current_epoch >= 1:
                    current_lrs[4] = lr * 0.4
                    current_lrs[3] = lr * 0.2
                    current_lrs[2] = lr * 0.1
                    current_lrs[1] = lr * 0.05
                    current_lrs[0] = lr * 0.01

        return current_lrs

    trainer.set_callbacks([
        callbacks.ModelCheckpoint(
            paths.models,
            name,
            save_best_only=False,
            saving_strategy=lambda epoch: True
        ),
        CSVLogger('./logs/' + name),
        LearningRateScheduler(schedule)
    ])

    trainer.compile(loss=nn.BCELoss(),
                    optimizer=optimizer)

    trainer.fit_loader(train_loader,
                       val_loader,
                       nb_epoch=35,
                       verbose=1,
                       cuda_device=0)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x, y, z):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

trainer.compile(loss='nll_loss',
                optimizer='adadelta')

trainer.fit([x_train, x_train, x_train], y_train,
            val_data=([x_test, x_test, x_test], y_test),
            num_epoch=3, 
            batch_size=128,
            verbose=1)

ypred = trainer.predict([x_train, x_train, x_train])
print(ypred.size())

eval_loss = trainer.evaluate([x_train, x_train, x_train], y_train)
print(eval_loss)
Example #29
0
def run():
    from config import get_config
    config = get_config()
    load_model_epoch.run()
    return True
    print("Timer")
    import models
    from utils.make_dirs import create_dirs
    from datasets import loaders
    from torchsample.modules import ModuleTrainer
    create_dirs()
    cuda_device = -1
    tr_data_loader, val_data_loader, te_data_loader = loaders.online_pair_loaders(
    )

    model = getattr(models, config.network).get_network()(
        channel=config.network_channel, embedding_size=config.embedding)
    from losses.online_cosine import OnlineCosineLoss
    from datasets.data_utils import AllPositivePairSelector, HardNegativePairSelector

    margin = 0.5

    if args.selector == 'AllPositivePairSelector':
        criterion = OnlineCosineLoss(margin, AllPositivePairSelector())
    elif args.selector == 'HardNegativePairSelector':
        criterion = OnlineCosineLoss(margin, HardNegativePairSelector())

    if config.cuda:
        model.cuda()
        criterion.cuda()
    trainer = ModuleTrainer(model)
    trainer.compile(loss=criterion, optimizer='adam')
    if config.cuda:
        cuda_device = 0
    trainer.evaluate_loader(tr_data_loader, verbose=2, cuda_device=cuda_device)
    trainer.evaluate_loader(val_data_loader,
                            verbose=2,
                            cuda_device=cuda_device)
    trainer.evaluate_loader(te_data_loader, verbose=2, cuda_device=cuda_device)

    start_time = time.time()
    trainer.fit_loader(tr_data_loader,
                       val_loader=val_data_loader,
                       num_epoch=1,
                       verbose=2,
                       cuda_device=cuda_device)
    end_time = time.time()
    with open("./times.log", mode="a") as f:
        f.write("%s %s\n" % (config.result_dir, str(end_time - start_time)))
Example #30
0
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 10)

    def forward(self, x):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return F.log_softmax(x)


model = Network()
trainer = ModuleTrainer(model)

callbacks = [
    EarlyStopping(patience=10),
    ReduceLROnPlateau(factor=0.5, patience=5)
]
regularizers = [
    L1Regularizer(scale=1e-3, module_filter='conv*'),
    L2Regularizer(scale=1e-5, module_filter='fc*')
]
constraints = [
    UnitNorm(frequency=3, unit='batch', module_filter='fc*'),
    MaxNorm(value=2., lagrangian=True, scale=1e-2, module_filter='conv*')
]
initializers = [XavierUniform(bias=False, module_filter='fc*')]
metrics = [CategoricalAccuracy(top_k=3)]
Example #31
0
def run():
    from config import get_config
    config = get_config()

    print('%s/ckpt.pth.tar' % config.result_dir)
    if os.path.exists('%s/ckpt.pth.tar' % config.result_dir):
        return True
    print("Contrastive Trainer Not Return")
    import models
    from utils.make_dirs import create_dirs
    from datasets import loaders
    from torchsample.callbacks import EarlyStopping, ModelCheckpoint, CSVLogger
    from torchsample.metrics import CategoricalAccuracy
    from torchsample.modules import ModuleTrainer
    create_dirs()
    cuda_device = -1
    tr_data_loader, val_data_loader, te_data_loader = loaders.online_pair_loaders()

    model = getattr(models, config.network).get_network()(channel=config.network_channel,
                                                          embedding_size=config.embedding)
    from losses.online_cosine import OnlineCosineLoss
    from datasets.data_utils import AllPositivePairSelector, HardNegativePairSelector

    margin = 0.5

    if args.selector == 'AllPositivePairSelector':
        criterion = OnlineCosineLoss(margin, AllPositivePairSelector())
    elif args.selector == 'HardNegativePairSelector':
        criterion = OnlineCosineLoss(margin, HardNegativePairSelector())

    if config.cuda:
        model.cuda()
        criterion.cuda()
    trainer = ModuleTrainer(model)
    epochs = config.epochs

    callbacks = [EarlyStopping(monitor='val_loss', patience=50),
                 ModelCheckpoint(config.result_dir, save_best_only=True, verbose=1),
                 CSVLogger("%s/logger.csv" % config.result_dir)]

    metrics = []
    if config.loader_name == 'data_loaders' and 'Angle' not in config.loss:
        metrics.append(CategoricalAccuracy(top_k=1))
    trainer.compile(loss=criterion, optimizer='adam', metrics=metrics)
    trainer.set_callbacks(callbacks)
    if config.cuda:
        cuda_device = 0
    start_time = time.time()
    trainer.fit_loader(tr_data_loader, val_loader=val_data_loader, num_epoch=epochs, verbose=2,
                       cuda_device=cuda_device)
    end_time = time.time()

    with open("%s/app.log" % config.result_dir, mode="a") as f:
        f.write("%s\n" % str(model))
        f.write("%s %s\n" % (config.loss, str(end_time - start_time)))
    tr_loss = trainer.evaluate_loader(tr_data_loader, cuda_device=cuda_device)
    print(tr_loss)
    val_loss = trainer.evaluate_loader(val_data_loader, cuda_device=cuda_device)
    te_loss = trainer.evaluate_loader(te_data_loader, cuda_device=cuda_device)
    print(te_loss)
    with open(config.log_path, "a") as f:
        f.write('Train %s\nVal:%s\nTest:%s\n' % (str(tr_loss), str(val_loss), te_loss))

    tr_data_loader, val_data_loader, te_data_loader = loaders.data_loaders(train=False, val=True)

    tr_y_pred = trainer.predict_loader(tr_data_loader, cuda_device=cuda_device)
    save_embeddings(tr_y_pred, '%s/train_embeddings.csv' % config.result_dir)
    save_labels(tr_data_loader, '%s/train_labels.csv' % config.result_dir)

    val_y_pred = trainer.predict_loader(val_data_loader, cuda_device=cuda_device)
    save_embeddings(val_y_pred, '%s/val_embeddings.csv' % config.result_dir)
    save_labels(val_data_loader, '%s/val_labels.csv' % config.result_dir)

    te_y_pred = trainer.predict_loader(te_data_loader, cuda_device=cuda_device)
    save_embeddings(te_y_pred, '%s/test_embeddings.csv' % config.result_dir)
    save_labels(te_data_loader, '%s/test_labels.csv' % config.result_dir)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 1)

    def forward(self, x, y, z):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return th.abs(10 - x)


model = Network()
trainer = ModuleTrainer(model)

trainer.compile(loss='unconstrained_sum',
                optimizer='adadelta')

trainer.fit([x_train, x_train, x_train],
            num_epoch=3, 
            batch_size=128,
            verbose=1)

ypred = trainer.predict([x_train, x_train, x_train])
print(ypred.size())

eval_loss = trainer.evaluate([x_train, x_train, x_train])
print(eval_loss)
Example #33
0
from torchsample.modules import ModuleTrainer
from amazonds import AmazonDataset, CLASS_NAMES, BinaryAccuracy
from torchvision.models import resnet18, resnet50
import torch.nn as nn
from torch.autograd import Variable
from torchsample.callbacks import ModelCheckpoint

batch_size = 16
epochs = 30
num_classes = len(CLASS_NAMES)

model = resnet50(pretrained=True)
model.fc = nn.Sequential(nn.Linear(model.fc.in_features, num_classes),
                         nn.Sigmoid())

trainer = ModuleTrainer(model.cuda())

trainer.compile(loss=nn.BCELoss().cuda(),
                optimizer='adam',
                metrics=[BinaryAccuracy()],
                callbacks=[
                    ModelCheckpoint(directory="../input/torch/",
                                    filename='torch{epoch}.{loss}.pth.tar',
                                    monitor='val_loss')
                ])

from torchsample import TensorDataset
from torch.utils.data import DataLoader

train_dataset = AmazonDataset("train35.csv")
x, y = train_dataset[0]
Example #34
0
            transforms.ToTensor(),
            transforms.Normalize(
                mean=[0.40056697,  0.39674244,  0.42981134],
                std=[0.27938687,  0.28158916,  0.29005027]
                #mean=[0.485, 0.456, 0.406],
                #std=[0.229, 0.224, 0.225]
                ),
            ])
        )

    test_loader = DataLoader(test_dataset,
                      batch_size=2,
                      num_workers=1,
                      pin_memory=True if th.cuda.is_available() else False)

    test_trainer = ModuleTrainer(model)

    predictions = test_trainer.predict_loader(
        test_loader,
        verbose=1, 
        cuda_device=0 if th.cuda.is_available() else -1)

    boat_angles_frame = pd.read_csv(BOAT_ANGLES_CSV).set_index('boat_id')  

    for video_id, xy, boat_id in tqdm(zip(test_dataset.test_video_ids, predictions[0].cpu().data.numpy().squeeze(), predictions[1].cpu().data.numpy())):

        boat_id = np.argmax(boat_id)
        angle = boat_angles_frame.get_value(boat_id, 'angle')[0]
        cx =    boat_angles_frame.get_value(boat_id, 'cx')[0]
        cy =    boat_angles_frame.get_value(boat_id, 'cy')[0]
        self.conv1 = nn.Conv2d(1, 32, kernel_size=3)
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3)
        self.fc1 = nn.Linear(1600, 128)
        self.fc2 = nn.Linear(128, 1)

    def forward(self, x, y, z):
        x = F.relu(F.max_pool2d(self.conv1(x), 2))
        x = F.relu(F.max_pool2d(self.conv2(x), 2))
        x = x.view(-1, 1600)
        x = F.relu(self.fc1(x))
        x = F.dropout(x, training=self.training)
        x = self.fc2(x)
        return th.abs(10 - x)


model = Network()
trainer = ModuleTrainer(model)

trainer.compile(loss='unconstrained_sum', optimizer='adadelta')

trainer.fit([x_train, x_train, x_train],
            num_epoch=3,
            batch_size=128,
            verbose=1)

ypred = trainer.predict([x_train, x_train, x_train])
print(ypred.size())

eval_loss = trainer.evaluate([x_train, x_train, x_train])
print(eval_loss)
Example #36
0
def run():
    config = BaseConfig()
    logging.info('%s/train_embeddings.csv' % config.result_dir)
    result_dir = config.result_dir
    logging.info('%s/train_embeddings.csv' % result_dir)
    if os.path.exists(
            '%s/train_embeddings.csv' % result_dir) and os.path.exists(
                '%s/test_embeddings.csv' % result_dir):
        return True
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    logging.info("Saved Module Trainer Not Return")
    create_dirs()
    device = 0 if torch.cuda.is_available() else -1

    tr_data_loader, val_data_loader, te_data_loader = loaders.data_loaders()

    model = getattr(
        models, config.network).get_network()(embedding_size=config.embedding)

    check_point = os.path.join(config.result_dir, "ckpt.pth.tar")
    if os.path.isfile(check_point):
        logging.info("=> loading checkpoint '{}'".format(check_point))
        checkpoint = torch.load(check_point)
        model.load_state_dict(checkpoint['state_dict'])
        logging.info("=> loaded checkpoint '{}' (epoch {})".format(
            check_point, checkpoint['epoch']))
    else:
        logging.info("=> no checkpoint found at '{}'".format(check_point))
        return
    margin = 1.
    criterion = OnlineTripletLoss(margin,
                                  SemihardNegativeTripletSelector(margin))
    if device == 0:
        model.cuda()
        criterion.cuda()
    trainer = ModuleTrainer(model)

    trainer.compile(loss=criterion, optimizer='adam')

    logging.info('Train Prediction')
    tr_y_pred = trainer.predict_loader(tr_data_loader, cuda_device=device)
    logging.info('Train Save Embeddings')
    save_embeddings(tr_y_pred, '%s/train_embeddings.csv' % config.result_dir)
    logging.info('Train Save Labels')
    save_labels(tr_data_loader, '%s/train_labels.csv' % config.result_dir)
    tr_data_loader.dataset.id_list.to_csv('%s/train_ids.csv' %
                                          config.result_dir,
                                          header=None,
                                          index=None)

    logging.info('Validation Prediction')
    val_y_pred = trainer.predict_loader(val_data_loader, cuda_device=device)
    logging.info('Validation Save Embeddings')
    save_embeddings(val_y_pred, '%s/val_embeddings.csv' % config.result_dir)
    logging.info('Validation Save Labels')
    save_labels(val_data_loader, '%s/val_labels.csv' % config.result_dir)
    val_data_loader.dataset.id_list.to_csv('%s/val_ids.csv' %
                                           config.result_dir,
                                           header=None,
                                           index=None)

    logging.info('Test Prediction')
    te_y_pred = trainer.predict_loader(te_data_loader, cuda_device=device)
    logging.info('Test Save Embeddings')
    save_embeddings(te_y_pred, '%s/test_embeddings.csv' % config.result_dir)
    logging.info('Test Save Labels')
    save_labels(te_data_loader, '%s/test_labels.csv' % config.result_dir)
    te_data_loader.dataset.id_list.to_csv('%s/test_ids.csv' %
                                          config.result_dir,
                                          header=None,
                                          index=None)