Beispiel #1
0
def main():
    # model = Model(cf.segment_class, cf.level_class, cf.image_scale)
    # batch_size = 16
    dtype = torch.cuda.FloatTensor
    weights_file = "NYU_ResNet-UpProj.npy"
    print("Loading model......")
    model = Model()
    #resnet = torchvision.models.resnet50(pretrained=True)
    resnet = torchvision.models.resnet50()
    resnet.load_state_dict(torch.load('/home/xuqingyao/Multi_CNN/model/resnet50.pth'))
    #resnet.load_state_dict(torch.load('/home/xpfly/nets/ResNet/resnet50-19c8e357.pth'))
    print("resnet50 loaded.")
    resnet50_pretrained_dict = resnet.state_dict()

    model.load_state_dict(load_weights(model, weights_file, dtype))
    """
    print('\nresnet50 keys:\n')
    for key, value in resnet50_pretrained_dict.items():
        print(key, value.size())
    """
    #model_dict = model.state_dict()
    """
    print('\nmodel keys:\n')
    for key, value in model_dict.items():
        print(key, value.size())

    print("resnet50.dict loaded.")
    """
    # load pretrained weights
    #resnet50_pretrained_dict = {k: v for k, v in resnet50_pretrained_dict.items() if k in model_dict}
    print("resnet50_pretrained_dict loaded.")
    """
    print('\nresnet50_pretrained keys:\n')
    for key, value in resnet50_pretrained_dict.items():
        print(key, value.size())
    """
    #model_dict.update(resnet50_pretrained_dict)
    print("model_dict updated.")
    """
    print('\nupdated model dict keys:\n')
    for key, value in model_dict.items():
        print(key, value.size())
    """
    #model.load_state_dict(model_dict)
    print("model_dict loaded.")
    # print("========== data has been load! ==========")
    if torch.cuda.is_available():
        model.cuda()
    else:
        print("No cuda QAQ")
    trainer = Trainer(model, torch.optim.Adam(model.parameters(), lr=0.001), epoch=600, use_cuda=torch.cuda.is_available())
    trainer.train()
Beispiel #2
0
def predict(X, model_id, weights_id):
    """Output predictions for input samples using selected trained model.

    Args:
        X (numpy.ndarray): Input data to use for predictions.
        model_id (str): Model identifier in string format.
        weights_id (tuple): Weights file identifier in tuple of strings format.
            The tuple should be of the form: (model_id, iteration_id).

    Returns:
        numpy.ndarray: Predictions.

    """
    model = models.load_model(model_id)
    model_nn = weights.load_weights(weights_id[0], weights_id[1])

    single_image = False
    if len(X.shape) == 2:
        single_image = True
        X = np.array([X])

    X = model.pack_data(X)
    logger.debug(
        "after pack: min(X)={0}, max(X)={1}, avg(X)={2}, var(X)={3}".format(
            np.min(X), np.max(X), np.average(X), np.var(X)
        )
    )

    pred = model_nn.predict(X)
    logger.debug(
        "after predict: min(X)={0}, max(X)={1}, avg(X)={2}, var(X)={3}".format(
            np.min(pred), np.max(pred), np.average(pred), np.var(pred)
        )
    )

    pred = model.unpack_data(pred)

    if single_image is True:
        pred = np.squeeze(pred)

    return pred
                                     shuffle=True,
                                     drop_last=True)
val_loader = data_utils.DataLoader(val_dataset,
                                   batch_size,
                                   shuffle=True,
                                   drop_last=True)
test_loader = data_utils.DataLoader(test_dataset,
                                    batch_size,
                                    shuffle=True,
                                    drop_last=True)

model = Model(ResidualBlock, UpProj_Block, batch_size)
model.type(dtype)

# Loading pretrained weights
model.load_state_dict(load_weights(model, weights_file, dtype))

loss_fn = torch.nn.NLLLoss2d().type(dtype)

# Uncomment When transfer learning the model parameters of the semantic segmentation branch
# for name, param in model.named_parameters():
#     if name.startswith('up_conv5') or\
#        name.startswith('conv4') or\
#        name.startswith('bn4') or\
#        name.startswith('conv5') or\
#        name.startswith('bn5'):
#         param.requires_grad = True
#     else:
#         param.requires_grad = False

# Uncomment when fine tuning the model by allowing backpropogation through all layers of the model
Beispiel #4
0
def train(
    model_id, ds, ds_test, ds_val=None,
    revision_id=None, seed=None, flags=[], options={}
):
    """Train and save neural network model.

    Args:
        model_id (str): Model identifier.
        ds (datasets.Dataset): Dataset object with loaded data.
        ds_test (datasets.Dataset): Dataset object to be used for testing.
        ds_val (datasets.Dataset): Dataset object to be used for validation.
            If None is passed, validation is not performed.
        revision_id (str): Revision identifier of the model to be updated.
            If None is passed, a new model will be created.
        seed (int): Random number generator seed.
        flags (list): A flag is set if it is present in the passed list.
        List of possible flags:
            sanity-test: Flag for testing mode (model not saved).
            no-save: Do not save this model.
            no-metrics: Do not evaluate this model.
            no-early-stopping: Always train the specfied number of epochs.
                Otherwise training stops when loss begins to stagnate.
        options (dict): Additional training options. Is intersected with the
            _train_def_options dictionary.

    Returns:
        tensorflow.keras.Model: Trained model.

    """
    options = {**train_def_options, **options}

    # Load model definition
    model = models.load_model(model_id)

    if seed is None:
        seed = set_seed()

    # Set up dataset properties
    for i in [ds, ds_test, ds_val]:
        if i is None:
            continue
        i.rs = np.random.default_rng(seed=seed)
        i.batch_size = options['batch_size']
        i.shuffle_on_epoch_end = True
        i.apply(model.pack_data)

    # Save some dataset statistics to debug
    batch0 = ds[0]
    logger.debug("Statistics of first data batch:")
    logger.debug("X.shape={0}".format(batch0[0].shape))
    logger.debug("min(X)={0}, max(X)={1}, avg(X)={2}, var(X)={3}".format(
        np.min(batch0[0]), np.max(batch0[0]),
        np.average(batch0[0]), np.var(batch0[0])
    ))
    logger.debug("Y.shape={0}".format(batch0[1].shape))
    logger.debug("min(Y)={0}, max(Y)={1}, avg(Y)={2}, var(Y)={3}".format(
        np.min(batch0[1]), np.max(batch0[1]),
        np.average(batch0[1]), np.var(batch0[1])
    ))

    # Load a model to add to or set up a new one
    if ('sanity-test' not in flags and 'no-save' not in flags
    and revision_id is not None
    and weights.weights_exist(model_id, revision_id)):
        logger.info(
            'Pre-trained weights found. Loading iteration {0}.'.format(
                revision_id
            )
        )
        model_nn = weights.load_weights(model_id, revision_id)
    else:
        logger.info(
            'Pre-trained weights not used. Building model from scratch.')
        input_shape = batch0[0][0].shape
        logger.info(
            'Using input shape: {0}.'.format(input_shape))
        model_nn = model.build(options['learning_rate'], input_shape)

    # Define callbacks
    callbacks = []
    callbacks.append(tf.keras.callbacks.TensorBoard(
        log_dir=lib.logger.tensorboard_log_path(model_id),
        write_graph=True,
        write_images=True
    ))
    if 'no-early-stopping' not in flags and hasattr(model, 'es_callback'):
        callbacks.append(model.es_callback)
    # if 'sanity-test' not in flags:
    #     callbacks.append(tf.keras.callbacks.ModelCheckpoint(
    #         os.path.join(
    #             options['checkpoint_dir'], options['checkpoint_file']),
    #         monitor='val_loss',
    #         save_best_only=True,  # checkpoint only when `val_loss` improves
    #         save_freq='epoch',
    #         verbose=1
    #     ))

    # Train the autoencoder model
    model_nn.fit(
        x=ds,
        validation_data=ds_val,
        epochs=options['epochs'],
        callbacks=callbacks,
        verbose=2
    )

    # Evaluate
    metrics = []
    if 'no-metrics' not in flags:
        logger.info('Evaluating model.')
        metrics = model_nn.evaluate(
            x=ds_test,
            verbose=0
        )
        model.metrics(metrics, logger)

    # Save model to weights directory
    if 'sanity-test' not in flags and 'no-save' not in flags:
        if revision_id is None:
            weights_id = weights.available(model_id, str(seed))
        else:
            weights_id = weights.available(model_id, revision_id)

        weights_path = weights.path(weights_id[0], weights_id[1])
        logger.info('Saving model to `{0}`.'.format(weights_path))
        model_nn.save(weights_path)

    return model_nn, metrics
Beispiel #5
0
def main():
    batch_size = 32
    data_path = 'nyu_depth_v2_labeled.mat'
    learning_rate = 1.0e-5
    monentum = 0.9
    weight_decay = 0.0005
    num_epochs = 50
    resume_from_file = False

    # 1.Load data
    train_lists, val_lists, test_lists = load_split()
    print("Loading data......")
    train_loader = torch.utils.data.DataLoader(NyuDepthLoader(
        data_path, train_lists),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               drop_last=True)
    val_loader = torch.utils.data.DataLoader(NyuDepthLoader(
        data_path, val_lists),
                                             batch_size=batch_size,
                                             shuffle=True,
                                             drop_last=True)
    test_loader = torch.utils.data.DataLoader(NyuDepthLoader(
        data_path, test_lists),
                                              batch_size=batch_size,
                                              shuffle=True,
                                              drop_last=True)
    print(train_loader)
    # 2.Load model
    print("Loading model......")
    model = FCRN(batch_size)
    resnet = torchvision.models.resnet50(pretrained=True)
    #resnet = torchvision.models.resnet50()
    #resnet.load_state_dict(torch.load('/home/pengfei/data/nets/ResNet/resnet50-19c8e357.pth'))
    #resnet.load_state_dict(torch.load('/home/xpfly/nets/ResNet/resnet50-19c8e357.pth'))
    print("resnet50 loaded.")
    resnet50_pretrained_dict = resnet.state_dict()

    model.load_state_dict(load_weights(model, weights_file, dtype))
    """
    print('\nresnet50 keys:\n')
    for key, value in resnet50_pretrained_dict.items():
        print(key, value.size())
    """
    #model_dict = model.state_dict()
    """
    print('\nmodel keys:\n')
    for key, value in model_dict.items():
        print(key, value.size())

    print("resnet50.dict loaded.")
    """
    # load pretrained weights
    #resnet50_pretrained_dict = {k: v for k, v in resnet50_pretrained_dict.items() if k in model_dict}
    print("resnet50_pretrained_dict loaded.")
    """
    print('\nresnet50_pretrained keys:\n')
    for key, value in resnet50_pretrained_dict.items():
        print(key, value.size())
    """
    #model_dict.update(resnet50_pretrained_dict)
    print("model_dict updated.")
    """
    print('\nupdated model dict keys:\n')
    for key, value in model_dict.items():
        print(key, value.size())
    """
    #model.load_state_dict(model_dict)
    print("model_dict loaded.")
    model = model.cuda()

    # 3.Loss
    loss_fn = torch.nn.MSELoss().cuda()
    print("loss_fn set.")

    # 5.Train
    best_val_err = 1.0e3

    # validate
    model.eval()
    num_correct, num_samples = 0, 0
    loss_local = 0
    with torch.no_grad():
        for input, depth in val_loader:
            input_var = Variable(input.type(dtype))
            depth_var = Variable(depth.type(dtype))

            output = model(input_var)

            input_rgb_image = input_var[0].data.permute(
                1, 2, 0).cpu().numpy().astype(np.uint8)
            input_gt_depth_image = depth_var[0][0].data.cpu().numpy().astype(
                np.float32)
            pred_depth_image = output[0].data.squeeze().cpu().numpy().astype(
                np.float32)

            input_gt_depth_image /= np.max(input_gt_depth_image)
            pred_depth_image /= np.max(pred_depth_image)

            plot.imsave('input_rgb_epoch_0.png', input_rgb_image)
            plot.imsave('gt_depth_epoch_0.png',
                        input_gt_depth_image,
                        cmap="viridis")
            plot.imsave('pred_depth_epoch_0.png',
                        pred_depth_image,
                        cmap="viridis")

            # depth_var = depth_var[:, 0, :, :]
            # loss_fn_local = torch.nn.MSELoss()

            loss_local += loss_fn(output, depth_var)

            num_samples += 1

    err = float(loss_local) / num_samples
    print('val_error before train:', err)

    start_epoch = 0

    resume_file = 'checkpoint.pth.tar'
    if resume_from_file:
        if os.path.isfile(resume_file):
            print("=> loading checkpoint '{}'".format(resume_file))
            checkpoint = torch.load(resume_file)
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                resume_file, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(resume_file))

    df_loss = pd.DataFrame(columns=['train_loss', 'val_loss'])
    for epoch in range(num_epochs):

        # 4.Optim
        optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
        # optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=monentum)
        # optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, momentum=monentum, weight_decay=weight_decay)
        print("optimizer set.")

        print('Starting train epoch %d / %d' %
              (start_epoch + epoch + 1, num_epochs))
        model.train()
        running_loss = 0
        count = 0
        epoch_loss = 0
        epoch_df_loss = []

        #for i, (input, depth) in enumerate(train_loader):
        for input, depth in train_loader:
            # input, depth = data
            #input_var = input.cuda()
            #depth_var = depth.cuda()
            input_var = Variable(input.type(dtype))
            depth_var = Variable(depth.type(dtype))

            output = model(input_var)
            loss = loss_fn(output, depth_var)
            print('loss:', loss.data.cpu().item())
            count += 1
            running_loss += loss.data.cpu().numpy()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        epoch_loss = running_loss / count
        epoch_df_loss += [epoch_loss]
        print('epoch loss:', epoch_loss)

        # validate
        model.eval()
        num_correct, num_samples = 0, 0
        loss_local = 0
        with torch.no_grad():
            for input, depth in val_loader:
                input_var = Variable(input.type(dtype))
                depth_var = Variable(depth.type(dtype))

                output = model(input_var)

                input_rgb_image = input_var[0].data.permute(
                    1, 2, 0).cpu().numpy().astype(np.uint8)
                input_gt_depth_image = depth_var[0][0].data.cpu().numpy(
                ).astype(np.float32)
                pred_depth_image = output[0].data.squeeze().cpu().numpy(
                ).astype(np.float32)

                input_gt_depth_image /= np.max(input_gt_depth_image)
                pred_depth_image /= np.max(pred_depth_image)

                plot.imsave(
                    'input_rgb_epoch_{}.png'.format(start_epoch + epoch + 1),
                    input_rgb_image)
                plot.imsave('gt_depth_epoch_{}.png'.format(start_epoch +
                                                           epoch + 1),
                            input_gt_depth_image,
                            cmap="viridis")
                plot.imsave('pred_depth_epoch_{}.png'.format(start_epoch +
                                                             epoch + 1),
                            pred_depth_image,
                            cmap="viridis")

                # depth_var = depth_var[:, 0, :, :]
                # loss_fn_local = torch.nn.MSELoss()

                loss_local += loss_fn(output, depth_var)

                num_samples += 1

        err = float(loss_local) / num_samples
        epoch_df_loss += [err]
        print('val_error:', err)

        df_loss.loc[epoch] = epoch_df_loss

        if err < best_val_err:
            best_val_err = err
            torch.save(
                {
                    'epoch': start_epoch + epoch + 1,
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }, 'checkpoint.pth.tar')

        if epoch % 10 == 0:
            learning_rate = learning_rate * 0.6
    df_loss.to_csv('./loss.csv')
Beispiel #6
0
def main():
    batch_size = 16
    data_path = './data/nyu_depth_v2_labeled.mat'
    learning_rate = 1.0e-4
    monentum = 0.9
    weight_decay = 0.0005
    num_epochs = 100


    # 1.Load data
    train_lists, val_lists, test_lists = load_split()
    print("Loading data...")
    train_loader = torch.utils.data.DataLoader(NyuDepthLoader(data_path, train_lists),
                                               batch_size=batch_size, shuffle=False, drop_last=True)
    val_loader = torch.utils.data.DataLoader(NyuDepthLoader(data_path, val_lists),
                                               batch_size=batch_size, shuffle=True, drop_last=True)
    test_loader = torch.utils.data.DataLoader(NyuDepthLoader(data_path, test_lists),
                                             batch_size=batch_size, shuffle=True, drop_last=True)
    print(train_loader)
    # 2.Load model
    print("Loading model...")
    model = FCRN(batch_size)
    model.load_state_dict(load_weights(model, weights_file, dtype)) #加载官方参数,从tensorflow转过来
    #加载训练模型
    resume_from_file = False
    resume_file = './model/model_300.pth'
    if resume_from_file:
        if os.path.isfile(resume_file):
            checkpoint = torch.load(resume_file)
            start_epoch = checkpoint['epoch']
            model.load_state_dict(checkpoint['state_dict'])
            print("loaded checkpoint '{}' (epoch {})"
                  .format(resume_file, checkpoint['epoch']))
        else:
            print("can not find!")
    model = model.cuda()

    # 3.Loss
    # 官方MSE
    # loss_fn = torch.nn.MSELoss()
    # 自定义MSE
    # loss_fn = loss_mse()
    # 论文的loss,the reverse Huber
    loss_fn = loss_huber()
    print("loss_fn set...")

    # 4.Optim
    optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
    print("optimizer set...")

    # 5.Train
    best_val_err = 1.0e-4
    start_epoch = 0

    for epoch in range(num_epochs):
        print('Starting train epoch %d / %d' % (start_epoch + epoch + 1, num_epochs + start_epoch))
        model.train()
        running_loss = 0
        count = 0
        epoch_loss = 0
        for input, depth in train_loader:

            input_var = Variable(input.type(dtype))
            depth_var = Variable(depth.type(dtype))

            output = model(input_var)
            loss = loss_fn(output, depth_var)
            print('loss: %f' % loss.data.cpu().item())
            count += 1
            running_loss += loss.data.cpu().numpy()

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

        epoch_loss = running_loss / count
        print('epoch loss:', epoch_loss)

        # validate
        model.eval()
        num_correct, num_samples = 0, 0
        loss_local = 0
        with torch.no_grad():
            for input, depth in val_loader:
                input_var = Variable(input.type(dtype))
                depth_var = Variable(depth.type(dtype))

                output = model(input_var)
                if num_epochs == epoch + 1:
                    # 关于保存的测试图片可以参考 loader 的写法
                    # input_rgb_image = input_var[0].data.permute(1, 2, 0).cpu().numpy().astype(np.uint8)
                    input_rgb_image = input[0].data.permute(1, 2, 0)
                    input_gt_depth_image = depth_var[0][0].data.cpu().numpy().astype(np.float32)
                    pred_depth_image = output[0].data.squeeze().cpu().numpy().astype(np.float32)

                    input_gt_depth_image /= np.max(input_gt_depth_image)
                    pred_depth_image /= np.max(pred_depth_image)

                    plot.imsave('./result/input_rgb_epoch_{}.png'.format(start_epoch + epoch + 1), input_rgb_image)
                    plot.imsave('./result/gt_depth_epoch_{}.png'.format(start_epoch + epoch + 1), input_gt_depth_image, cmap="viridis")
                    plot.imsave('./result/pred_depth_epoch_{}.png'.format(start_epoch + epoch + 1), pred_depth_image, cmap="viridis")

                loss_local += loss_fn(output, depth_var)

                num_samples += 1

        err = float(loss_local) / num_samples
        print('val_error: %f' % err)

        if err < best_val_err or epoch == num_epochs - 1:
            best_val_err = err
            torch.save({
                'epoch': start_epoch + epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }, './model/model_' + str(start_epoch + epoch + 1) + '.pth')

        if epoch % 10 == 0:
            learning_rate = learning_rate * 0.8
Beispiel #7
0
def get_tf_implementation(weights_path, all_layer_outs=False):
    print("Making Structure")
    tf_model = model.make_deng_tf_test(verbose=True, all_layer_outs=all_layer_outs)
    print("Loading Weights")
    tf_model = weights.load_weights(tf_model, weights_path, verbose=True)
    return tf_model