def load_data():
    """Get data with labels, split into training, validation and test set."""
    ourtrainingdata = get_training_data()
    ourtestingdata = get_testing_data()

    #X_train, y_train = data[0]
    X_train = ourtrainingdata[0]
    y_train = ourtrainingdata[1]

    #X_test, y_test = data[2]
    X_test = ourtestingdata[0]
    y_test = ourtestingdata[1]

    y_train = numpy.asarray(y_train, dtype=numpy.int32)
    y_test = numpy.asarray(y_test, dtype=numpy.int32)

    return dict(
        X_train=X_train,
        y_train=y_train,
        X_test=X_test,
        y_test=y_test,
        num_examples_train=len(X_train),
        num_examples_test=len(X_test),
        input_dim=X_train[0],
        output_dim=10,
    )
Beispiel #2
0
from prepare_data import get_training_data
from prepare_plots import plot_results
from build_cnn_encoderdecoder_model import build_cnn_encoderdecoder_model

import numpy as np

if __name__ == "__main__":
    profile_pngs_objs, midcurve_pngs_objs = get_training_data(size=(128, 128))

    profile_pngs_objs = np.asarray(profile_pngs_objs)
    midcurve_pngs_objs = np.asarray(midcurve_pngs_objs)

    profile_pngs_objs = np.expand_dims(profile_pngs_objs, axis=-1)
    midcurve_pngs_objs = np.expand_dims(midcurve_pngs_objs, axis=-1)

    profile_pngs_objs = (profile_pngs_objs - 127.5) / 127.5  #Normalize [-1, 1]
    midcurve_pngs_objs = (midcurve_pngs_objs -
                          127.5) / 127.5  #Normalize [-1, 1]

    x_coord = np.zeros(shape=(128, 128, 1))
    y_coord = np.zeros(shape=(128, 128, 1))
    for i in range(0, 128):
        x_coord[:, i, 0] = i
        y_coord[i, :, 0] = i
    coords = np.append(x_coord, y_coord, axis=-1)
    coords = (coords - 63.5) / 63.5  #Normalize [-1, 1]
    #coords = np.expand_dims(coords, axis=0)

    profile_pngs = []

    for i in range(len(profile_pngs_objs)):
Beispiel #3
0
                
            self.x = profile_pngs_objs
            self.y = midcurve_pngs_objs
            self.autoencoder.fit(self.x, self.y,
                        epochs=self.epochs,
                        batch_size=5,
                        shuffle=True)                
            # Save models
            self.autoencoder.save(self.autoencoder_model_pkl)
            self.encoder.save(self.encoder_model_pkl)
            self.decoder.save(self.decoder_model_pkl)  
        else:
            # Save models
            self.autoencoder = load_model(self.autoencoder_model_pkl)
            self.encoder= load_model(self.encoder_model_pkl)
            self.decoder = load_model(self.decoder_model_pkl)
    
    def predict(self, test_profile_images):
        png_profile_images = self.process_images(test_profile_images)
        encoded_imgs = self.encoder.predict(png_profile_images)
        decoded_imgs = self.decoder.predict(encoded_imgs)    
        return test_profile_images,decoded_imgs  
           
if __name__ == "__main__":
    profile_gray_objs, midcurve_gray_objs = get_training_data()
    endec = simple_encoderdecoder()
    endec.train(profile_gray_objs, midcurve_gray_objs)
    
    test_gray_images = random.sample(profile_gray_objs,5)
    original_profile_imgs,predicted_midcurve_imgs = endec.predict(test_gray_images)
    plot_results(original_profile_imgs,predicted_midcurve_imgs)
    data_dir)
num_classes = len(set(train_labels))

print(num_classes)

# Build the graph using appropriate class from slim
graph = tf.Graph()
ckpt = tf.train.latest_checkpoint(os.getcwd())

with graph.as_default():
    if ckpt == None:
        # Indicates whether we are in training or in test mode
        is_training = tf.placeholder(tf.bool, name="is_training")

        # Get data iterators
        images_train, labels_train, train_init_op, iterator = pd.get_training_data(
            train_filenames, train_labels)
        images_val, labels_val, val_init_op, _ = pd.get_validation_data(
            val_filenames, val_labels, iterator_val=iterator)

        # Get the pretrained model, specifying the num_classes argument to create a new
        # fully connected replacing the last one.
        # Each model has a different architecture, so the final layer will change in another model.
        # Here, logits gives us directly the predicted scores we wanted from the images.

        logits = dm.predict(model_type, images_train, num_classes,
                            weight_decay, drop_out, True)

        # Specify where the model checkpoint is (pretrained weights).
        assert (os.path.isfile(model_path))

        # Restore layers that we want
from prepare_data import get_training_data
from prepare_plots import plot_results
from build_simple_encoderdecoder_w_denoiser_model import simple_encoderdecoder_w_denoiser

if __name__ == "__main__":
    profile_pngs_objs, midcurve_pngs_objs = get_training_data()
	endec = simple_encoderdecoder_w_denoiser()
    original_imgs,decoded_imgs = endec.simple_encoderdecoder_w_denoiser(profile_pngs_objs, midcurve_pngs_objs)
    plot_results(original_imgs,decoded_imgs)