def evaluate():
    batch_dataset = build_features.get_test_batches(batch_size=1,
                                                    infinite=False)
    itr_dataset = batch_dataset.make_one_shot_iterator()

    # Create the network
    *inputs, labels = itr_dataset.get_next()
    net = regnet.Regnet(hasPipeline=True,
                        pipeline_inputs=inputs,
                        pipeline_labels=labels)

    # Load pretrained model
    model_path = paths.checkpoints.regnet()
    net.model.load_weights(str(model_path))

    # Configures the model for evaluation
    metrics = [tf.losses.huber_loss]
    metrics.extend(net.metrics)
    net.model.compile(optimizer=net.train_opt,
                      loss=net.model_loss,
                      target_tensors=[net.label],
                      metrics=metrics)

    # Evaluate
    test_files = build_features.get_dataset_tensors('test')
    print('feed forward on {} test frames'.format(len(test_files)))

    loss = net.model.evaluate(steps=len(test_files))
    loss = zip(net.model.metrics_names, loss)

    for label, value in loss:
        print('{}: {}'.format(label, value))
def feed_forward(input_rgb,
                 input_depth,
                 h_init,
                 drive_date,
                 h_gt=None,
                 verbose=VERBOSE):
    print_args = dict()

    # Create the network
    net = regnet.Regnet()

    # Load pretrained model
    model_path = str(paths.checkpoints.regnet(
        rot=ROT, disp=DISP))  # ./checkpoints/regnet/train
    net.model.load_weights(model_path)

    # Predict
    pred = net.model.predict([[input_rgb], [input_depth]])

    # Transform prediction
    if DUAL_QUATERNIONS:
        pred = quaternion.dualqt_op.mat4_dualqt(pred)
    else:
        pred = np.reshape(pred, (4, 4))

    # Log loss
    label = h_init
    if DUAL_QUATERNIONS:
        label = quaternion.dualqt_op.mat4_dualqt(label)

    total_loss = euclidean_loss_np(logits=pred, labels=label)
    total_loss = np.array([total_loss])
    print_args['loss'] = total_loss

    rot_loss = euclidean_loss_np(logits=quaternion.helpers.mat3_mat4(pred),
                                 labels=quaternion.helpers.mat3_mat4(label))
    rot_loss = np.array([rot_loss])
    print_args['rot_loss'] = rot_loss

    disp_loss = euclidean_loss_np(logits=quaternion.helpers.vec_mat4(pred),
                                  labels=quaternion.helpers.vec_mat4(label))
    disp_loss = np.array([disp_loss])
    print_args['disp_loss'] = disp_loss

    # Make H^ for projection
    h_gt = make_decalib.get_Hgt(drive_date)
    h_init = np.dot(label, h_gt)
    h_predict = phi_decal(h_init, pred)  # Hinit . H.gt⁻¹
    save_path = paths.ROOT_PATH.joinpath('pred.txt')
    make_decalib.save_as_txt(h_predict, save_path)

    if verbose == Verbose.DEBUG:
        diff = np.subtract(label, pred)
        diff = np.absolute(diff)

        print_args['prediction'] = pred
        print_args['actual'] = label
        print_args['difference'] = diff
    print_results(**print_args)
Esempio n. 3
0
def evaluate(batch_size=1, dataset='valid'):
    assert dataset in ['valid', 'test'
                       ], 'dataset has to be one of \'valid\' or \'test\''

    # Destroy old graph
    K.clear_session()

    # Initialize validation batch generator
    if dataset == 'valid':
        batch_dataset = build_features.get_valid_batches(batch_size=batch_size,
                                                         infinite=False)
    elif dataset == 'test':
        batch_dataset = build_features.get_test_batches(batch_size=batch_size,
                                                        infinite=False)
    itr_dataset = batch_dataset.make_one_shot_iterator()

    # Create the network
    *inputs, labels = itr_dataset.get_next()
    net = regnet.Regnet(hasPipeline=True,
                        pipeline_inputs=inputs,
                        pipeline_labels=labels)

    # Load pretrained model
    model_path = paths.checkpoints.regnet(
        rot=ROT, disp=DISP)  # ./checkpoints/regnet/train
    net.model.load_weights(str(model_path))

    # Configures the model for evaluation
    net.model.compile(optimizer=net.train_opt,
                      loss=net.model_loss,
                      target_tensors=[net.label])

    # Evaluate
    valid_files = build_features.get_dataset_tensors(dataset)
    print('feed forward on {} {} frames'.format(len(valid_files), dataset))

    loss = net.model.evaluate(steps=len(valid_files))
    loss = zip(net.model.metrics_names, loss)

    for label, value in loss:
        value = np.array([value])
        print('{} {}: {}'.format(dataset, label, value))
Esempio n. 4
0
def run(epochs,
        num_batches,
        batch_size=1,
        learning_rate=0.001,
        beta1=0.9,
        beta2=0.999,
        epsilon=1e-08,
        save_every=10,
        patience=5,
        baseline=2e-5,
        resume=False):
    # Destroy old graph
    K.clear_session()

    # Initialize batch generators
    batch_train = build_features.get_train_batches(batch_size=batch_size)
    batch_valid = build_features.get_valid_batches(batch_size=batch_size)

    # Create TensorFlow Iterator object
    itr_train = build_features.make_iterator(batch_train)
    itr_valid = build_features.make_iterator(batch_valid)

    # Init callbacks
    cbs = list()

    # EarlyStopping callback: stops whenever loss doesn't imporve
    # cbs.append(early_stopping.EarlyStopping(monitor='val_loss', mode='min', patience=patience,
    #                                         verbose=1, baseline=baseline))

    # ModelCheckpoint callback: saves model every SAVE_EVERY
    save_path = paths.checkpoints.regnet(
        rot=ROT, disp=DISP)  # ./checkpoints/regnet/train
    save_path.parent.mkdir(exist_ok=True, parents=True)
    if save_path.exists() and not resume:
        save_path.unlink()  # deletes file before training
    cbs.append(
        callbacks.ModelCheckpoint(str(save_path),
                                  save_best_only=True,
                                  period=save_every))

    # TensorBoard callback: saves logs for tensorboard
    log_path = str(paths.logs.regnet())  # ./logs/regnet/train
    cbs.append(
        callbacks.TensorBoard(log_dir=log_path,
                              batch_size=batch_size,
                              write_graph=True))

    # History callback: saves all losses
    cbs.append(
        callbacks.CSVLogger(save_path.with_suffix('.csv'),
                            append=True,
                            separator=','))

    # Create the network
    net = regnet.Regnet(learning_rate, beta1, beta2, epsilon)

    # Configures the model for training
    net.model.compile(optimizer=net.train_opt,
                      loss=net.model_loss,
                      metrics=net.metrics)

    # Load the pretrained imagenet weights
    load_weights.imagenet_weights(net.model)

    if resume:
        net.model = keras.models.load_model(save_path,
                                            custom_objects=CUSTOM_LAYERS,
                                            compile=True)

    # Train network
    net.model.fit_generator(generator=itr_train,
                            validation_data=itr_valid,
                            validation_steps=batch_size,
                            epochs=epochs,
                            steps_per_epoch=num_batches,
                            callbacks=cbs,
                            verbose=1,
                            workers=0)
Esempio n. 5
0
    zipped_array = np.column_stack((weights, biases))
    for weights, biases in zipped_array:
        weights_layer_name = Path(weights.name).parent
        biases_layer_name = Path(biases.name).parent
        assert weights_layer_name == biases_layer_name

    return zipped_array


def assign_layer_var(model, checkpoint_path, weight, biases, sess_regnet):
    layer_name = Path(weight.name).parent.name

    weights_var = sess_regnet.run(weight)
    biases_var = sess_regnet.run(biases)

    model.get_layer(layer_name).set_weights([weights_var, biases_var])


def get_variable_by_name(checkpoint_path, name):
    return tf.train.load_variable(str(checkpoint_path), name)


if __name__ == '__main__':
    net = regnet.Regnet(LEARNING_RATE, BETA1, BETA2, EPSILON)
    net.model.compile(optimizer=net.train_opt,
                      loss=net.model_loss,
                      metrics=net.metrics)
    load_tensorflow_weights(net.model, PRETRIAN_MODEL_PATH)
    net.model.save('training.h5')  # creates a HDF5 file 'my_model.h5'