Ejemplo n.º 1
0
def main():
    print(tf.__version__)

    cp_callback_coarse = tf.keras.callbacks.ModelCheckpoint(
        filepath=COARSE_CHECKPOINT_PATH, save_weights_only=True, verbose=1)
    cp_callback_refine = tf.keras.callbacks.ModelCheckpoint(
        filepath=REFINED_CHECKPOINT_PATH, save_weights_only=True, verbose=1)

    csv_logger = CSVLogger('log.csv', append=False, separator=',')

    nyu_data_generator = NyuDepthGenerator(batch_size=10,
                                           csv_path='data/train.csv')
    # eval_data_generator = NyuDepthGenerator(batch_size=1, csv_path='data/dev.csv')

    latest_checkpoint_refine = tf.train.latest_checkpoint(
        REFINED_CHECKPOINT_DIR)
    latest_checkpoint_coarse = tf.train.latest_checkpoint(
        COARSE_CHECKPOINT_DIR)
    if RUN_REFINE:
        refine_model, coarse_model = models.refined_network_model()
        model = refine_model
        if latest_checkpoint_refine:
            print("\nRestored refine model from checkpoint")
            refine_model.load_weights(latest_checkpoint_refine)
        elif latest_checkpoint_coarse:
            print("\nRestored coarse model from checkpoint")
            coarse_model.load_weights(latest_checkpoint_coarse)
        else:
            print(
                "\nCoarse model not restored. Please exit and run coarse model first"
            )
            print("\nStarting one pass training")
    else:
        coarse_model, _, _ = models.coarse_network_model()
        model = coarse_model
        if latest_checkpoint_coarse:
            print("\nRestored coarse model from checkpoint")
            coarse_model.load_weights(latest_checkpoint_coarse)
        else:
            print("\nNo coarse checkpoint saved")

    model.compile(
        optimizer=keras.optimizers.Adam(),  # Optimizer
        # Loss function to minimize
        loss=models.depth_loss,
        # List of metrics to monitor
        metrics=[
            metrics.rmse, metrics.abs_relative_diff,
            metrics.squared_relative_diff
        ])

    predict_while_train = PredictWhileTrain(nyu_data_generator)
    if not os.path.isdir(PREDICT_FILE_PATH):
        os.mkdir(TRAIN_PREDICT_FILE_PATH)
    print('Fit model on training data')
    if RUN_REFINE:
        history = model.fit(
            x=nyu_data_generator,
            epochs=30,
            callbacks=[cp_callback_refine, csv_logger, predict_while_train])
    else:
        history = model.fit(
            x=nyu_data_generator,
            epochs=30,
            callbacks=[cp_callback_coarse, csv_logger, predict_while_train])

    print('\nHistory dict:', history.history)

    result = model.evaluate(x=nyu_data_generator, steps=1)
    print("Final eval loss: ", result)

    if not os.path.isdir(PREDICT_FILE_PATH):
        os.mkdir(PREDICT_FILE_PATH)

    predictions = model.predict(x=nyu_data_generator, steps=1)
    print("Prediction dim: " + str(predictions.shape))

    for i in range(predictions.shape[0]):
        predictions[i] = (predictions[i] / np.max(predictions[i])) * 255.0
        image_name = os.path.join(PREDICT_FILE_PATH, '%05d_d.png' % i)
        image_im = Image.fromarray(np.uint8(predictions[i].reshape(
            TARGET_HEIGHT, TARGET_WIDTH)),
                                   mode="L")
        image_im.save(image_name)
Ejemplo n.º 2
0
def main():
    print(tf.__version__)

    x_train = []
    y_train = []
    x_eval = []
    y_eval = []
    h5file = h5py.File(NYU_FILE_PATH, 'r')
    file_count = h5file['images'].shape[0]

    dev_split = 0.9
    train_count = file_count * dev_split
    for i in range(file_count):
        if i % 10 == 0:
            print("processing file " + str(i))

        image = np.transpose(h5file['images'][i], (2, 1, 0))
        depth = np.transpose(h5file['depths'][i], (1, 0))

        image_im = Image.fromarray(np.uint8(image))
        image_im = image_im.resize((IMAGE_WIDTH, IMAGE_HEIGHT))
        image_np_arr = np.array(image_im)
        # print ("image_np_arr shape: " + str(image_np_arr.shape))

        depth_scaled = (depth / 10.0) * 255.0
        depth_im = Image.fromarray(np.uint8(depth_scaled))
        depth_im = depth_im.resize((TARGET_WIDTH, TARGET_HEIGHT))
        depth_np_arr = np.array(depth_im)
        depth_np_arr = depth_np_arr / 255.0 * 10.0
        # print ("depth_np_arr shape: " + str(depth_np_arr.shape))
        # print ("depth_np_arr: " + str(depth_np_arr))
        if i < train_count:
            x_train.append(image_np_arr)
            y_train.append(depth_np_arr)
        else:
            x_eval.append(image_np_arr)
            y_eval.append(depth_np_arr)

    x_train = np.array(x_train) / 255.0
    y_train = np.array(y_train)
    x_eval = np.array(x_eval) / 255.0
    y_eval = np.array(y_eval)
    print(x_train.shape)
    print(y_train.shape)
    print(x_eval.shape)
    print(y_eval.shape)

    cp_callback_coarse = tf.keras.callbacks.ModelCheckpoint(
        filepath=COARSE_CHECKPOINT_PATH,
        save_weights_only=True,
        verbose=1,
        period=10)
    cp_callback_refine = tf.keras.callbacks.ModelCheckpoint(
        filepath=REFINED_CHECKPOINT_PATH,
        save_weights_only=True,
        verbose=1,
        period=10)

    csv_logger = CSVLogger('log.csv', append=False, separator=',')

    latest_checkpoint_refine = tf.train.latest_checkpoint(
        REFINED_CHECKPOINT_DIR)
    latest_checkpoint_coarse = tf.train.latest_checkpoint(
        COARSE_CHECKPOINT_DIR)
    if RUN_REFINE:
        refine_model, coarse_model = models.refined_network_model()
        model = refine_model
        if latest_checkpoint_refine:
            print("\nRestored refine model from checkpoint")
            refine_model.load_weights(latest_checkpoint_refine)
        elif latest_checkpoint_coarse:
            print("\nRestored coarse model from checkpoint")
            coarse_model.load_weights(latest_checkpoint_coarse)
        else:
            print(
                "\nCoarse model not restored. Please exit and run coarse model first"
            )
            print("\nStarting one pass training")
    else:
        coarse_model, _, _ = models.coarse_network_model()
        model = coarse_model
        if latest_checkpoint_coarse:
            print("\nRestored coarse model from checkpoint")
            coarse_model.load_weights(latest_checkpoint_coarse)
        else:
            print("\nNo coarse checkpoint saved")

    model.compile(
        optimizer=keras.optimizers.Adam(),  # Optimizer
        # Loss function to minimize
        loss=models.depth_loss_2,
        metrics=[
            metrics.abs_relative_diff, metrics.squared_relative_diff,
            metrics.rmse, metrics.rmse_log, metrics.rmse_scale_invariance_log
        ])

    predict_while_train = PredictWhileTrain(x_train)
    early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                  patience=5)
    if not os.path.isdir(TRAIN_PREDICT_FILE_PATH):
        os.mkdir(TRAIN_PREDICT_FILE_PATH)
    print('Fit model on training data')
    if RUN_REFINE:
        history = model.fit(x=x_train,
                            y=y_train,
                            validation_data=(x_eval, y_eval),
                            epochs=300,
                            callbacks=[cp_callback_refine, csv_logger])
    else:
        history = model.fit(x=x_train,
                            y=y_train,
                            validation_data=(x_eval, y_eval),
                            epochs=300,
                            callbacks=[cp_callback_coarse, csv_logger])

    print('\nHistory dict:', history.history)

    result = model.evaluate(x=x_eval, y=y_eval)
    print("Final eval loss on validation: ", result)
Ejemplo n.º 3
0
def main():
    print(tf.__version__)

    cp_callback_coarse = tf.keras.callbacks.ModelCheckpoint(
        filepath=COARSE_CHECKPOINT_PATH,
        save_weights_only=True,
        verbose=1,
        period=10)
    cp_callback_refine = tf.keras.callbacks.ModelCheckpoint(
        filepath=REFINED_CHECKPOINT_PATH,
        save_weights_only=True,
        verbose=1,
        period=10)

    csv_logger = CSVLogger('log.csv', append=False, separator=',')

    x_train, y_train = csv_inputs(csv_file_path='data/train.csv')
    x_eval, y_eval = csv_inputs(csv_file_path='data/dev.csv')

    latest_checkpoint_refine = tf.train.latest_checkpoint(
        REFINED_CHECKPOINT_DIR)
    latest_checkpoint_coarse = tf.train.latest_checkpoint(
        COARSE_CHECKPOINT_DIR)
    if RUN_REFINE:
        refine_model, coarse_model = models.refined_network_model()
        model = refine_model
        if latest_checkpoint_refine:
            print("\nRestored refine model from checkpoint")
            refine_model.load_weights(latest_checkpoint_refine)
        elif latest_checkpoint_coarse:
            print("\nRestored coarse model from checkpoint")
            coarse_model.load_weights(latest_checkpoint_coarse)
        else:
            print(
                "\nCoarse model not restored. Please exit and run coarse model first"
            )
            print("\nStarting one pass training")
    else:
        coarse_model, _, _ = models.coarse_network_model()
        model = coarse_model
        if latest_checkpoint_coarse:
            print("\nRestored coarse model from checkpoint")
            coarse_model.load_weights(latest_checkpoint_coarse)
        else:
            print("\nNo coarse checkpoint saved")

    model.compile(
        optimizer=keras.optimizers.Adam(),  # Optimizer
        # Loss function to minimize
        loss=models.rmse_scale_invariance_log_loss,
        metrics=[
            metrics.abs_relative_diff, metrics.squared_relative_diff,
            metrics.rmse, metrics.rmse_log, metrics.rmse_scale_invariance_log
        ])

    predict_while_train = PredictWhileTrain(x_train)
    early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                  patience=5)
    if not os.path.isdir(TRAIN_PREDICT_FILE_PATH):
        os.mkdir(TRAIN_PREDICT_FILE_PATH)
    print('Fit model on training data')
    if RUN_REFINE:
        history = model.fit(
            x=x_train,
            y=y_train,
            validation_data=(x_eval, y_eval),
            epochs=2005,
            callbacks=[cp_callback_refine, csv_logger, predict_while_train])
    else:
        history = model.fit(
            x=x_train,
            y=y_train,
            validation_data=(x_eval, y_eval),
            epochs=2005,
            callbacks=[cp_callback_coarse, csv_logger, predict_while_train])

    print('\nHistory dict:', history.history)

    result = model.evaluate(x=x_eval, y=y_eval)
    print("Final eval loss on validation: ", result)
Ejemplo n.º 4
0
def main():
    print(tf.__version__)

    latest_checkpoint_refine = tf.train.latest_checkpoint(
        REFINED_CHECKPOINT_DIR)
    latest_checkpoint_coarse = tf.train.latest_checkpoint(
        COARSE_CHECKPOINT_DIR)
    if RUN_REFINE:
        refine_model, coarse_model = models.refined_network_model()
        model = refine_model
        if latest_checkpoint_refine:
            print("\nRestored refine model from checkpoint")
            refine_model.load_weights(latest_checkpoint_refine)
        elif latest_checkpoint_coarse:
            print("\nRestored coarse model from checkpoint")
            coarse_model.load_weights(latest_checkpoint_coarse)
        else:
            print(
                "\nCoarse model not restored. Please exit and run coarse model first"
            )
            print("\nStarting one pass training")
    else:
        coarse_model, _, _ = models.coarse_network_model()
        model = coarse_model
        if latest_checkpoint_coarse:
            print("\nRestored coarse model from checkpoint")
            coarse_model.load_weights(latest_checkpoint_coarse)
        else:
            print("\nNo coarse checkpoint saved")

    model.compile(
        optimizer=keras.optimizers.Adam(),  # Optimizer
        # Loss function to minimize
        loss=models.depth_loss,
        # List of metrics to monitor
        metrics=None)

    x_eval, y_eval = csv_inputs(csv_file_path='data/dev.csv')
    result = model.evaluate(x=x_eval, y=y_eval)
    print("Final eval loss on validation: ", result)

    if not os.path.isdir(PREDICT_FILE_PATH):
        os.mkdir(PREDICT_FILE_PATH)

    predictions = model.predict(x=x_eval)
    print("Prediction dim: " + str(predictions.shape))

    for i in range(predictions.shape[0]):
        predictions[i] = (predictions[i] / np.max(predictions[i])) * 255.0
        prediction_name = os.path.join(PREDICT_FILE_PATH,
                                       '%05d_predict.png' % i)
        prediction_im = Image.fromarray(
            np.uint8(predictions[i].reshape(TARGET_HEIGHT, TARGET_WIDTH)))
        prediction_im.save(prediction_name)

        color_name = os.path.join(PREDICT_FILE_PATH, '%05d_c.png' % i)
        color_im = Image.fromarray(np.uint8(x_eval[i] * 255.0))
        color_im.save(color_name)

        depth_name = os.path.join(PREDICT_FILE_PATH, '%05d_d.png' % i)
        depth_im = Image.fromarray(np.uint8(y_eval[i] * 255.0))
        depth_im.save(depth_name)
Ejemplo n.º 5
0
def main():
    print(tf.__version__)

    x_eval = []
    y_eval = []
    h5file = h5py.File(NYU_FILE_PATH, 'r')
    file_count = h5file['images'].shape[0]

    # file_count = 100
    dev_split = 0.9
    train_count = file_count * dev_split
    for i in range(file_count):
        if i < train_count:
            continue
        else:
            print("processing image " + str(i))
            image = np.transpose(h5file['images'][i], (2, 1, 0))
            depth = np.transpose(h5file['depths'][i], (1, 0))

            image_im = Image.fromarray(np.uint8(image))
            image_im = image_im.resize((IMAGE_WIDTH, IMAGE_HEIGHT))
            image_np_arr = np.array(image_im)
            # print ("image_np_arr shape: " + str(image_np_arr.shape))

            depth_scaled = (depth / 10.0) * 255.0
            depth_im = Image.fromarray(np.uint8(depth_scaled))
            depth_im = depth_im.resize((TARGET_WIDTH, TARGET_HEIGHT))
            depth_np_arr = np.array(depth_im)
            depth_np_arr = depth_np_arr / 255.0 * 10.0
            # print ("depth_np_arr shape: " + str(depth_np_arr.shape))
            # print ("depth_np_arr: " + str(depth_np_arr))
            x_eval.append(image_np_arr)
            y_eval.append(depth_np_arr)

    x_eval = np.array(x_eval) / 255.0
    y_eval = np.array(y_eval)

    latest_checkpoint_refine = tf.train.latest_checkpoint(
        REFINED_CHECKPOINT_DIR)
    latest_checkpoint_coarse = tf.train.latest_checkpoint(
        COARSE_CHECKPOINT_DIR)
    if RUN_REFINE:
        refine_model, coarse_model = models.refined_network_model()
        model = refine_model
        if latest_checkpoint_refine:
            print("\nRestored refine model from checkpoint")
            refine_model.load_weights(latest_checkpoint_refine)
        elif latest_checkpoint_coarse:
            print("\nRestored coarse model from checkpoint")
            coarse_model.load_weights(latest_checkpoint_coarse)
        else:
            print(
                "\nCoarse model not restored. Please exit and run coarse model first"
            )
            print("\nStarting one pass training")
    else:
        coarse_model, _, _ = models.coarse_network_model()
        model = coarse_model
        if latest_checkpoint_coarse:
            print("\nRestored coarse model from checkpoint")
            coarse_model.load_weights(latest_checkpoint_coarse)
        else:
            print("\nNo coarse checkpoint saved")

    model.compile(
        optimizer=keras.optimizers.Adam(),  # Optimizer
        # Loss function to minimize
        loss=models.depth_loss_2,
        # List of metrics to monitor
        #   metrics= [metrics.rmse])
        metrics=[
            metrics.abs_relative_diff, metrics.squared_relative_diff,
            metrics.rmse, metrics.rmse_log, metrics.rmse_scale_invariance_log
        ])

    result = model.evaluate(x=x_eval, y=y_eval, batch_size=32)
    print("Final eval loss on validation: ", result)

    # print("truth: " + str(y_eval))

    # return
    if not os.path.isdir(PREDICT_FILE_PATH):
        os.mkdir(PREDICT_FILE_PATH)

    predictions = model.predict(x=x_eval)
    print("Prediction dim: " + str(predictions.shape))

    for i in range(predictions.shape[0]):
        # print("prediction before: " + str(predictions[i]))

        predictions[i] = (predictions[i] / 10.0) * 255.0

        # print("prediction after: " + str(predictions[i]))
        prediction_name = os.path.join(PREDICT_FILE_PATH,
                                       '%05d_predict.png' % i)
        prediction_im = Image.fromarray(
            np.uint8(predictions[i].reshape(TARGET_HEIGHT, TARGET_WIDTH)))
        prediction_im.save(prediction_name)

        color_name = os.path.join(PREDICT_FILE_PATH, '%05d_c.png' % i)
        color_im = Image.fromarray(np.uint8(x_eval[i] * 255.0))
        color_im.save(color_name)

        depth_name = os.path.join(PREDICT_FILE_PATH, '%05d_d.png' % i)
        depth_im = Image.fromarray(np.uint8((y_eval[i] / 10.0) * 255.0))
        depth_im.save(depth_name)