Esempio n. 1
0
def main():

    model = network.Model(resnet.resnet50(pretrained=True),
                          num_features=2048,
                          input_channels=[256, 512, 1024, 2048])
    batch_size = 8
    epochs = 5
    lr = 0.0001
    wd = 1e-4

    torch.backends.cudnn.benchmark = True

    optimizer = torch.optim.Adam(model.parameters(), 0.0001, weight_decay=1e-4)

    ## training

    train_file_url = './data/nyu2_train.csv'
    train_loader = data.training_data(train_file_url, batch_size)

    for epoch in range(0, epochs):
        update_lr(optimizer, epoch)
        train(train_loader, model, optimizer, epoch)

    ## testing

    test_file_url = './data/nyu2_test.csv'
    test_loader = data.testing_data(test_file_url, 1)

    test(test_loader, model, 0.25)
Esempio n. 2
0
def test_image(
        result_folder,
        epoch=None,
        image_idx=0,
        use_cpu=False):
    """Test a network on one test image."""
    if use_cpu:
        bnet.set_mode_cpu()
    else:
        bnet.set_mode_gpu()
    _LOGGER.info("Loading data...")
    tr_data, _ = training_data()
    te_data, _ = test_data()
    from data import _MEAN
    _LOGGER.info("Loading network...")
    # Load the model for training.
    model, _, _, _ = _model(result_folder,
                            tr_data.shape[0],
                            epoch=epoch)
    _LOGGER.info("Predicting...")
    results = model.predict(te_data[:image_idx + 1],
                            test_callbacks=[
                                RandCropMonitor('data', _MEAN),
                                mnt.ProgressIndicator()
                            ],
                            out_blob_names=['score'])
    _LOGGER.info("Prediction for image %d: %s.",
                 image_idx, str(results[image_idx]))
Esempio n. 3
0
def score(
        result_folder,
        epoch=None,
        use_cpu=False):
    """Test a network on the dataset."""
    if use_cpu:
        bnet.set_mode_cpu()
    else:
        bnet.set_mode_gpu()
    _LOGGER.info("Loading data...")
    tr_data, _ = training_data()
    te_data, te_labels = test_data()
    from data import _MEAN
    _LOGGER.info("Loading network...")
    # Load the model.
    model, _, _, _ = _model(result_folder,
                            tr_data.shape[0],
                            epoch=epoch,
                            no_solver=True)
    _LOGGER.info("Predicting...")
    results = model.predict(te_data,
                            test_callbacks=[
                                RandCropMonitor('data', _MEAN),
                                mnt.ProgressIndicator()
                            ],
                            out_blob_names=['score'])
    _LOGGER.info("Accuracy: %f.",
                 accuracy_score(te_labels,
                                np.argmax(np.array(results), axis=1)))
Esempio n. 4
0
def test_image(result_folder, epoch=None, image_idx=0, use_cpu=False):
    """Test a network on one test image."""
    if use_cpu:
        bnet.set_mode_cpu()
    else:
        bnet.set_mode_gpu()
    _LOGGER.info("Loading data...")
    tr_data, _ = training_data()
    te_data, _ = test_data()
    _LOGGER.info("Loading network...")
    # Load the model for training.
    model, _, _, _ = _model(result_folder, tr_data.shape[0], epoch=epoch)
    _LOGGER.info("Predicting...")
    results = model.predict(te_data, test_callbacks=[mnt.ProgressIndicator()])
    _LOGGER.info("Prediction for image %d: %s.", image_idx,
                 str(results[image_idx]))
def main():

    model = network.Model(resnet.resnet50(pretrained=True),
                          num_features=2048,
                          input_channels=[256, 512, 1024, 2048])
    batch_size = 8
    epochs = 5
    lr = 0.0001
    wd = 1e-4

    if torch.cuda.device_count() == 8:
        model = torch.nn.DataParallel(model,
                                      device_ids=[0, 1, 2, 3, 4, 5, 6,
                                                  7]).cuda()
        batch_size = 64
    elif torch.cuda.device_count() == 4:
        model = torch.nn.DataParallel(model, device_ids=[0, 1, 2, 3]).cuda()
        batch_size = 32
    else:
        model = model.cuda()
        batch_size = 8

    torch.backends.cudnn.benchmark = True

    optimizer = torch.optim.Adam(model.parameters(), 0.0001, weight_decay=1e-4)

    ## training

    train_file_url = './data/nyu2_train.csv'
    train_loader = data.training_data(train_file_url, batch_size)

    for epoch in range(0, epochs):
        update_lr(optimizer, epoch)
        train(train_loader, model, optimizer, epoch)

    ## testing

    test_file_url = './data/nyu2_test.csv'
    test_loader = data.testing_data(test_file_url, 1)

    test(test_loader, model, 0.25)
Esempio n. 6
0
def test_image(
        result_folder,
        epoch=None,
        image_idx=0,
        use_cpu=False):
    """Test a network on one test image."""
    if use_cpu:
        bnet.set_mode_cpu()
    else:
        bnet.set_mode_gpu()
    _LOGGER.info("Loading data...")
    tr_data, _ = training_data()
    te_data, _ = test_data()
    _LOGGER.info("Loading network...")
    # Load the model for training.
    model, _, _, _ = _model(result_folder,
                            tr_data.shape[0],
                            epoch=epoch)
    _LOGGER.info("Predicting...")
    results = model.predict(te_data,
                            test_callbacks=[mnt.ProgressIndicator()])
    _LOGGER.info("Prediction for image %d: %s.",
                 image_idx, str(results[image_idx]))
Esempio n. 7
0
            current_loss = 0

        # Train until 100% training accuracy
        if best_accuracy == 100:
            print(f"100% training accuracy achieved after {epoch+1} epochs")
            break

    # Save the results for further analysis
    with open("stats/losses.pkl", "wb+") as losses_f:
        pickle.dump(all_losses, losses_f)

    with open("stats/train_acc.pkl", "wb+") as train_acc_f:
        pickle.dump(train_acc, train_acc_f)

    print(f"runtime: {time.time() - time_start} seconds")
    print(f"Best model achieved a training accuracy of {best_accuracy}%")

    return all_losses


if __name__ == "__main__":

    from data import training_data
    from model import RNN

    clean_X, clean_y, train_X, train_y = training_data()

    model = RNN()

    losses = train(model, train_X, train_y)
Esempio n. 8
0
def cli(result_folder,
        model_name=None,
        epoch=None,
        num_epoch=150,
        optimizer_name='sgd',
        lr_param=0.1,
        lr_decay_sched='90,135',
        lr_decay_ratio=0.1,
        mom_param=0.9,
        wd_param=0.0001,
        monitor=False,
        allow_overwrite=False,
        use_cpu=False):
    """Train a model."""
    print("Parameters: ", sys.argv)
    if use_cpu:
        bnet.set_mode_cpu()
    else:
        bnet.set_mode_gpu()
    # Load the data.
    tr_data, tr_labels = training_data()
    te_data, te_labels = test_data()
    from data import _MEAN
    # Setup the output folder, including logging.
    model, optimizer, out_folder, base_iter = _model(
        result_folder, tr_data.shape[0], model_name, epoch, 10, optimizer_name,
        lr_param, lr_decay_sched, lr_decay_ratio, mom_param, wd_param, False,
        allow_overwrite)
    batch_size = model.blobs['data'].shape[0]
    logger = mnt.JSONLogger(str(out_folder),
                            'model', {
                                'train': ['train_loss', 'train_accuracy'],
                                'test': ['test_loss', 'test_accuracy']
                            },
                            base_iter=base_iter,
                            write_every=round_to_mbsize(10000, batch_size),
                            create_plot=monitor)
    progr_ind = mnt.ProgressIndicator()
    cropper = RandCropMonitor('data', _MEAN)
    if monitor:
        extra_monitors = [
            mnt.ActivationMonitor(round_to_mbsize(10000, batch_size),
                                  os.path.join(str(out_folder),
                                               'visualizations' + os.sep),
                                  selected_blobs=['resblock3_out', 'avpool'],
                                  sample={'data': tr_data[0]}),
            mnt.FilterMonitor(round_to_mbsize(10000, batch_size),
                              os.path.join(str(out_folder),
                                           'visualizations' + os.sep),
                              selected_parameters={
                                  'resblock1_conv1': [0],
                                  'resblock3_conv1': [0],
                                  'resblock7_conv1': [0]
                              }),
            mnt.GradientMonitor(round_to_mbsize(10000, batch_size),
                                os.path.join(str(out_folder),
                                             'visualizations' + os.sep),
                                relative=True,
                                selected_parameters={
                                    'resblock1_conv1': [0, 1],
                                    'resblock3_conv1': [0, 1],
                                    'resblock7_conv1': [0, 1]
                                }),
        ]
    else:
        extra_monitors = []
    model.fit(round_to_mbsize(num_epoch * 50000, batch_size),
              optimizer,
              X={
                  'data': tr_data,
                  'labels': tr_labels
              },
              X_val={
                  'data': te_data,
                  'labels': te_labels
              },
              test_interval=round_to_mbsize(50000, batch_size),
              train_callbacks=[
                  progr_ind,
                  logger,
                  mnt.RotatingMirroringMonitor({'data': 0}, 0, 0.5),
                  cropper,
                  mnt.Checkpointer(os.path.join(str(out_folder), 'model'),
                                   round_to_mbsize(50000 * 10, batch_size),
                                   base_iterations=base_iter),
              ] + extra_monitors,
              test_callbacks=[progr_ind, cropper, logger],
              shuffle=True)
Esempio n. 9
0
def cli(result_folder,
        model_name=None,
        epoch=None,
        num_epoch=3,
        optimizer_name='sgd',
        lr_param=0.001,
        lr_decay_sched='90,135',
        lr_decay_ratio=0.1,
        mom_param=0.9,
        wd_param=0.0001,
        monitor=False,
        allow_overwrite=False,
        use_cpu=False):
    """Train a model."""
    print("Parameters: ", sys.argv)
    if use_cpu:
        bnet.set_mode_cpu()
    else:
        bnet.set_mode_gpu()
    # Load the data.
    tr_data, tr_labels = training_data()
    te_data, te_labels = test_data()
    # Setup the output folder, including logging.
    model, optimizer, out_folder, base_iter = _model(
        result_folder,
        tr_data.shape[0],
        model_name,
        epoch,
        1,
        optimizer_name,
        lr_param,
        lr_decay_sched,
        lr_decay_ratio,
        mom_param,
        wd_param,
        False,
        allow_overwrite)
    batch_size = model.blobs['data'].shape[0]
    logger = mnt.JSONLogger(str(out_folder),
                            'model',
                            {'train': ['train_loss', 'train_accuracy'],
                             'test': ['test_loss', 'test_accuracy']},
                            base_iter=base_iter,
                            write_every=round_to_mbsize(50000, batch_size),
                            create_plot=monitor)
    progr_ind = mnt.ProgressIndicator()

    if monitor:
        extra_monitors = [
            mnt.ActivationMonitor(round_to_mbsize(10000, batch_size),
                                  os.path.join(str(out_folder),
                                               'visualizations' + os.sep),
                                  sample={'data': tr_data[0]}),
            mnt.FilterMonitor(round_to_mbsize(10000, batch_size),
                              os.path.join(str(out_folder),
                                           'visualizations' + os.sep)),
            mnt.GradientMonitor(round_to_mbsize(10000, batch_size),
                                os.path.join(str(out_folder),
                                             'visualizations' + os.sep),
                                relative=True),
        ]
    else:
        extra_monitors = []
    model.fit(round_to_mbsize(num_epoch * tr_data.shape[0], batch_size),
              optimizer,
              X={'data': tr_data, 'labels': tr_labels},
              X_val={'data': te_data, 'labels': te_labels},
              test_interval=round_to_mbsize(tr_data.shape[0], batch_size),
              train_callbacks=[
                  progr_ind,
                  logger,
                  mnt.Checkpointer(os.path.join(str(out_folder),
                                                'model'),
                                   round_to_mbsize(tr_data.shape[0], batch_size),
                                   base_iterations=base_iter),
                  ] + extra_monitors,
              test_callbacks=[
                  progr_ind,
                  logger])
Esempio n. 10
0
def cli(
    result_folder,
    model_name=None,
    epoch=None,
    num_epoch=150,
    optimizer_name="sgd",
    lr_param=0.1,
    lr_decay_sched="90,135",
    lr_decay_ratio=0.1,
    mom_param=0.9,
    wd_param=0.0001,
    monitor=False,
    allow_overwrite=False,
    use_cpu=False,
):
    """Train a model."""
    print("Parameters: ", sys.argv)
    if use_cpu:
        bnet.set_mode_cpu()
    else:
        bnet.set_mode_gpu()
    # Load the data.
    tr_data, tr_labels = training_data()
    te_data, te_labels = test_data()
    from data import _MEAN

    # Setup the output folder, including logging.
    model, optimizer, out_folder, base_iter = _model(
        result_folder,
        tr_data.shape[0],
        model_name,
        epoch,
        10,
        optimizer_name,
        lr_param,
        lr_decay_sched,
        lr_decay_ratio,
        mom_param,
        wd_param,
        False,
        allow_overwrite,
    )
    batch_size = model.blobs["data"].shape[0]
    logger = mnt.JSONLogger(
        str(out_folder),
        "model",
        {"train": ["train_loss", "train_accuracy"], "test": ["test_loss", "test_accuracy"]},
        base_iter=base_iter,
        write_every=round_to_mbsize(10000, batch_size),
        create_plot=monitor,
    )
    progr_ind = mnt.ProgressIndicator()
    cropper = RandCropMonitor("data", _MEAN)
    if monitor:
        extra_monitors = [
            mnt.ActivationMonitor(
                round_to_mbsize(10000, batch_size),
                os.path.join(str(out_folder), "visualizations" + os.sep),
                selected_blobs=["resblock3_out", "avpool"],
                sample={"data": tr_data[0]},
            ),
            mnt.FilterMonitor(
                round_to_mbsize(10000, batch_size),
                os.path.join(str(out_folder), "visualizations" + os.sep),
                selected_parameters={"resblock1_conv1": [0], "resblock3_conv1": [0], "resblock7_conv1": [0]},
            ),
            mnt.GradientMonitor(
                round_to_mbsize(10000, batch_size),
                os.path.join(str(out_folder), "visualizations" + os.sep),
                relative=True,
                selected_parameters={"resblock1_conv1": [0, 1], "resblock3_conv1": [0, 1], "resblock7_conv1": [0, 1]},
            ),
        ]
    else:
        extra_monitors = []
    model.fit(
        round_to_mbsize(num_epoch * 50000, batch_size),
        optimizer,
        X={"data": tr_data, "labels": tr_labels},
        X_val={"data": te_data, "labels": te_labels},
        test_interval=round_to_mbsize(50000, batch_size),
        train_callbacks=[
            progr_ind,
            logger,
            mnt.RotatingMirroringMonitor({"data": 0}, 0, 0.5),
            cropper,
            mnt.Checkpointer(
                os.path.join(str(out_folder), "model"),
                round_to_mbsize(50000 * 10, batch_size),
                base_iterations=base_iter,
            ),
        ]
        + extra_monitors,
        test_callbacks=[progr_ind, cropper, logger],
        shuffle=True,
    )