Exemplo n.º 1
0
 def __init__(self):
     self.image, self.rect = utils.load_images("mainmenu/title.png")
     self.rect.right = 0
     self.tweener = pytweener.Tweener()
     self.x = - self.rect.width
     self.tweener.addTween(self, x=200, tweenTime=1,
             tweenType=pytweener.Easing.Elastic.easeOut)
Exemplo n.º 2
0
 def __init__(self, game):
     GameSceneMessage.__init__(self, game)
     self.graphic_message, self.rect = utils.load_images('gamescene/pause.png')
     self.x = - 100
     self.rect.y = 200
     self.tweener.addTween(self, x=200, tweenTime=0.3,
             tweenType=pytweener.Easing.Elastic.easeOut)
Exemplo n.º 3
0
 def __init__(self, director):
     scene.Scene.__init__(self, director)
     self.background, rect = utils.load_images("options/background.png")
     self.menu = menu.Menu(
             [
                 ("Fullscreen", self.on_toggle),
                 ("Regresar", self.on_return),
             ])
Exemplo n.º 4
0
    def __init__(self, start_y, item_height, initial_selected):
        self.image, self.rect = utils.load_images("cursor.png")
        self.rect.centerx = 320
        self.start_y = start_y
        self.y = 0
        self.item_height = item_height
        self.tweener = pytweener.Tweener()

        self.set_position(initial_selected)
Exemplo n.º 5
0
    def __init__(self, director):
        scene.Scene.__init__(self, director)

        self.background, rect = utils.load_images("creditscene/background.png")
        self.font = utils.load_font("FreeSans.ttf", 20)
        
        self.program = ["Hugo Ruscitti", "Juanxo", "Dokan", "lacabra25", 
                        "Juan Carlos", "thepoi", "joksnet"]
        self.art = ["Walter Velazquez"]
                        
        self.rendered_program = self.render_authors(self.program)
        self.rendered_art = self.render_authors(self.art)
Exemplo n.º 6
0
 def __init__(self, director):
     scene.Scene.__init__(self, director)
     self.graphic_message = None
     self.running = True
     self.delay_showing_line_animation = 0
     self.current_message = None
     self.current_message_rect = None
     self.board = board.Board(self)
     self.display = display.Display()
     self.background, tmp = utils.load_images("gamescene/background.png")
     self.pieces = piece.Group()
     self.game_speed = 0
     self.create_return_message()
     self.show_graphic_message(game_scene_messages.AreYouReadyMessage(self))
     self.line_animation = None
     self.delay_showing_line_animation = 0
Exemplo n.º 7
0
    decoder_A.load_weights("models/decoder_A.h5")
    decoder_B.load_weights("models/decoder_B.h5")
except:
    pass


def save_model_weights():
    encoder.save_weights("models/encoder.h5")
    decoder_A.save_weights("models/decoder_A.h5")
    decoder_B.save_weights("models/decoder_B.h5")
    print("save model weights")


images_A = get_image_paths("data/trump")
images_B = get_image_paths("data/cage")
images_A = load_images(images_A) / 255.0
images_B = load_images(images_B) / 255.0

images_A += images_B.mean(axis=(0, 1, 2)) - images_A.mean(axis=(0, 1, 2))

print("press 'q' to stop training and save model")

for epoch in range(1000000):
    batch_size = 64
    warped_A, target_A = get_training_data(images_A, batch_size)
    warped_B, target_B = get_training_data(images_B, batch_size)

    loss_A = autoencoder_A.train_on_batch(warped_A, target_A)
    loss_B = autoencoder_B.train_on_batch(warped_B, target_B)
    print(loss_A, loss_B)
Exemplo n.º 8
0
#!/usr/bin/env python3

import numpy as np
import tensorflow as tf
from train_model import TrainModel
from utils import load_images, load_csv, generate_triplets

images, filenames = load_images('HBTNaligned', as_array=True)
triplet_names = load_csv('FVTriplets.csv')
A, P, N = generate_triplets(images, filenames, triplet_names)
triplets = [A[:-2], P[:-2], N[:-2]] # to make all batches divisible by 32

tm = TrainModel('models/face_verification.h5', 0.2)
history = tm.train(triplets, epochs=1)
print(history.history)
import sac
import sys

if len(sys.argv) != 2:
  print "Usage: ./display_saved_network.py somefile.pickle"
  sys.exit(1)

fname = sys.argv[1]
f = open(fname, "r")
solution = pickle.load(f)

utils.save_as_figure((solution.W1 + solution.b1).T, "loadedW1.png")
utils.save_as_figure(solution.W2, "loadedW2.png")


images = utils.load_images("data/train-images-idx3-ubyte")
labels = utils.load_labels("data/train-labels-idx1-ubyte")
utils.save_as_figure(images[:, 0:100], "output/input.png")

patches = images[:, 0:10000]
visible_size = 28*28
hidden_size = 196


options = sac.SparseAutoEncoderOptions(visible_size,
                                       hidden_size,
                                       output_dir="output",
                                       max_iterations = 400)

network = sac.SparseAutoEncoder(options, patches)
    def load_data(self):
        # load and prepare names
        input_image_names = load_image_names(self._config.data_dir,
                                             self._config.match_pattern)
        target_image_names = input_image_names if not self._config.target_data_dir else \
            load_image_names(self._config.target_data_dir, self._config.match_pattern)
        name_size = min(len(input_image_names), len(target_image_names))
        if len(input_image_names) != len(target_image_names):
            tf.logging.warning(
                "Reducing data set to {} (input: {}, target: {})".format(
                    name_size, len(input_image_names),
                    len(target_image_names)))
            input_image_names = input_image_names[:name_size]
            assert self._config.data_dir == self._config.target_data_dir, "should shuffle target images before reducing"
            target_image_names = target_image_names[:name_size]
        assert not self._config.train_disc_on_extra_targets, "not implemented"

        # load images
        input_images = load_images(input_image_names,
                                   self._config.data_dir,
                                   self._config.input_type,
                                   flip_lr=self._config.augmentation_flip_lr,
                                   flip_ud=self._config.augmentation_flip_ud)
        target_images = load_images(target_image_names,
                                    self._config.target_data_dir
                                    or self._config.data_dir,
                                    self._config.target_type,
                                    flip_lr=self._config.augmentation_flip_lr,
                                    flip_ud=self._config.augmentation_flip_ud)
        assert len(input_images) == len(target_images)

        # load/prepare test data
        test_input_images = None
        test_target_images = None
        if self._config.test_data_dir:
            assert not self._config.target_data_dir, "alternative test target data currently isn't supported"
            tf.logging.info("Loading test data")
            test_input_image_names = load_image_names(
                self._config.test_data_dir, self._config.match_pattern)
            test_input_images = load_images(test_input_image_names,
                                            self._config.test_data_dir,
                                            self._config.input_type,
                                            flip_lr=False,
                                            flip_ud=False)
            test_target_images = load_images(test_input_image_names,
                                             self._config.test_data_dir,
                                             self._config.target_type,
                                             flip_lr=False,
                                             flip_ud=False)
            assert len(test_input_images) == len(test_target_images)
        if self._config.test_data_percentage:
            tf.logging.warning(
                "Using the first {}% of the training data for testing".format(
                    100 * self._config.test_data_percentage))
            split = int(len(target_images) * self._config.test_data_percentage)
            test_input_images = input_images[:split]
            input_images = input_images[split:]
            test_target_images = target_images[:split]
            target_images = target_images[split:]
        data_set_size = len(target_images)
        test_set_size = len(
            [] if test_target_images is None else test_target_images)

        # set up epoch samples
        sample_indexes = np.random.choice(data_set_size,
                                          np.prod(self._epoch_images_shape),
                                          replace=False)
        self._epoch_sample_input = tf.convert_to_tensor(
            input_images[sample_indexes])
        self._epoch_sample_targets = target_images[sample_indexes]

        # build data sets
        self._data_set = tf.data.Dataset.from_tensor_slices((input_images, target_images))\
            .shuffle(data_set_size).batch(self._config.batch_size)
        del input_images
        del target_images
        if test_target_images is not None:
            self._test_data_set = tf.data.Dataset.from_tensor_slices((test_input_images, test_target_images))\
                .shuffle(test_set_size).batch(self._config.batch_size)
            del test_input_images
            del test_target_images
        else:
            tf.logging.warning("Running evaluation without test data!")

        return data_set_size, test_set_size
Exemplo n.º 11
0
# Custom object needed for inference and training
custom_objects = {
    'BilinearUpSampling2D': BilinearUpSampling2D,
    'depth_loss_function': depth_loss_function
}

print('Loading model...')

# Load model into GPU / CPU
model = load_model(args.model, custom_objects=custom_objects, compile=False)

print('\nModel loaded ({0}).'.format(args.model))

# Input images
inputs = load_images(glob.glob(args.input))
ipdb.set_trace()
print('\nLoaded ({0}) images of size {1}.'.format(inputs.shape[0],
                                                  inputs.shape[1:]))

for input_image in range(inputs.shape[0]):
    img = np.expand_dims(inputs[input_image], axis=0)
    outputs = predict(model, img)
    save_images('image_' + str(input_image), outputs)

# Compute results

# Display results
# viz = display_images(outputs.copy(), inputs.copy())
# plt.figure(figsize=(10,5))
# plt.imsave('depth_pred.png', viz)
    def load_data(self):
        # load and prepare names
        input_image_names = load_image_names(self._config.data_dir,
                                             self._config.match_pattern)
        second_input_image_names = input_image_names if not self._config.second_data_dir else \
            load_image_names(self._config.second_data_dir, self._config.match_pattern)
        if self._config.use_extra_first_inputs:
            assert len(second_input_image_names) < len(input_image_names)
            second_input_times = 2
            tf.logging.warning(
                "Using each second input {} times".format(second_input_times))
            second_input_image_names = second_input_image_names * second_input_times
        if len(second_input_image_names) < len(input_image_names):
            tf.logging.warning(
                "There are fewer second input images; shuffling and reducing input images"
            )
            np.random.shuffle(input_image_names)
            input_image_names = input_image_names[:len(second_input_image_names
                                                       )]
        target_image_names = input_image_names if not self._config.target_data_dir else \
            second_input_image_names if self._config.target_data_dir == self._config.second_data_dir else \
            load_image_names(self._config.target_data_dir, self._config.match_pattern)
        assert len(target_image_names) >= len(input_image_names)
        name_size = min(
            min(len(input_image_names), len(second_input_image_names)),
            len(target_image_names))
        if len(input_image_names) != len(target_image_names) or len(
                second_input_image_names) != len(target_image_names):
            tf.logging.warning(
                "Reducing data set to {} (input: {}, second input: {}, target: {})"
                .format(name_size, len(input_image_names),
                        len(second_input_image_names),
                        len(target_image_names)))
            input_image_names = input_image_names[:name_size]
            second_input_image_names = second_input_image_names[:name_size]
            tf.logging.info(
                "Input and target data are different; shuffling targets before reducing"
            )
            np.random.shuffle(target_image_names)
            if self._config.train_disc_on_extra_targets:
                extra_target_image_names = target_image_names[name_size:]
                extra_target_image_names = extra_target_image_names[:2 * len(
                    input_image_names)]  # select the first X extra images
            else:
                extra_target_image_names = None
            target_image_names = target_image_names[:name_size]
        else:
            assert not self._config.train_disc_on_extra_targets, "there are no extra targets to train on"
            extra_target_image_names = None

        # load images
        input_images = load_images(input_image_names,
                                   self._config.data_dir,
                                   self._config.input_type,
                                   flip_lr=self._config.augmentation_flip_lr,
                                   flip_ud=self._config.augmentation_flip_ud)
        second_input_images = load_images(
            second_input_image_names,
            self._config.second_data_dir or self._config.data_dir,
            self._config.second_input_type,
            flip_lr=self._config.augmentation_flip_lr,
            flip_ud=self._config.augmentation_flip_ud)
        combined_input_images = np.concatenate(
            [input_images, second_input_images], axis=-1)
        del input_images
        del second_input_images
        target_images = load_images(target_image_names,
                                    self._config.target_data_dir
                                    or self._config.data_dir,
                                    self._config.target_type,
                                    flip_lr=self._config.augmentation_flip_lr,
                                    flip_ud=self._config.augmentation_flip_ud)
        if extra_target_image_names:
            extra_target_images = load_images(
                extra_target_image_names,
                self._config.target_data_dir or self._config.data_dir,
                self._config.target_type,
                flip_lr=self._config.augmentation_flip_lr,
                flip_ud=self._config.augmentation_flip_ud)
            tf.logging.warning(
                "Adding {} extra targets for the discriminator!".format(
                    len(extra_target_images)))
            self._extra_discriminator_data_set = tf.data.Dataset.from_tensor_slices(extra_target_images)\
                .shuffle(len(extra_target_images)).batch(self._config.batch_size)
            del extra_target_images
        assert len(combined_input_images) == len(target_images)

        # load/prepare test data
        test_input_images = None
        test_target_images = None
        if self._config.test_data_dir:
            tf.logging.info("Loading test data")
            test_input_image_names = load_image_names(
                self._config.test_data_dir, self._config.match_pattern)
            test_second_input_image_names = test_input_image_names if not self._config.test_second_data_dir else \
                load_image_names(self._config.test_second_data_dir, self._config.match_pattern)
            if len(test_second_input_image_names) < len(
                    test_input_image_names):
                tf.logging.warning(
                    "TEST: There are fewer second input images; shuffling and reducing input images"
                )
                np.random.shuffle(test_input_image_names)
                test_input_image_names = test_input_image_names[:len(
                    test_second_input_image_names)]
            test_target_image_names = test_input_image_names if not self._config.test_target_data_dir else \
                test_second_input_image_names if self._config.test_target_data_dir == self._config.test_second_data_dir else \
                load_image_names(self._config.test_target_data_dir, self._config.match_pattern)
            assert len(test_target_image_names) >= len(test_input_image_names)
            name_size = min(
                min(len(test_input_image_names),
                    len(test_second_input_image_names)),
                len(test_target_image_names))
            if len(test_input_image_names) != len(
                    test_target_image_names) or len(
                        test_second_input_image_names) != len(
                            test_target_image_names):
                tf.logging.warning(
                    "Reducing data set to {} (input: {}, second input: {}, target: {})"
                    .format(name_size, len(test_input_image_names),
                            len(test_second_input_image_names),
                            len(test_target_image_names)))
                test_input_image_names = test_input_image_names[:name_size]
                test_second_input_image_names = test_second_input_image_names[:
                                                                              name_size]
                tf.logging.info(
                    "Input and target data are different; shuffling targets before reducing"
                )
                np.random.shuffle(test_target_image_names)
                test_target_image_names = test_target_image_names[:name_size]

            test_input_images = load_images(test_input_image_names,
                                            self._config.test_data_dir,
                                            self._config.input_type,
                                            flip_lr=False,
                                            flip_ud=False)
            test_second_input_images = load_images(
                test_second_input_image_names,
                self._config.test_second_data_dir
                or self._config.test_data_dir,
                self._config.second_input_type,
                flip_lr=False,
                flip_ud=False)
            test_combined_input_images = np.concatenate(
                [test_input_images, test_second_input_images], axis=-1)
            del test_input_images
            del test_second_input_images
            test_target_images = load_images(test_target_image_names,
                                             self._config.test_target_data_dir
                                             or self._config.test_data_dir,
                                             self._config.target_type,
                                             flip_lr=False,
                                             flip_ud=False)
            assert len(test_combined_input_images) == len(test_target_images)

        if self._config.test_data_percentage:
            tf.logging.warning(
                "Using the first {}% of the training data for testing".format(
                    100 * self._config.test_data_percentage))
            split = int(len(target_images) * self._config.test_data_percentage)
            test_combined_input_images = combined_input_images[:split]
            combined_input_images = combined_input_images[split:]
            test_target_images = target_images[:split]
            target_images = target_images[split:]
            assert False, "not implemented"
        data_set_size = len(target_images)
        test_set_size = len(
            [] if test_target_images is None else test_target_images)

        # set up epoch samples
        sample_indexes = np.random.choice(data_set_size,
                                          np.prod(self._epoch_images_shape),
                                          replace=False)
        self._epoch_sample_input = tf.convert_to_tensor(
            combined_input_images[sample_indexes])

        # build data sets
        self._data_set = tf.data.Dataset.from_tensor_slices((combined_input_images, target_images))\
            .shuffle(data_set_size).batch(self._config.batch_size)
        del combined_input_images
        del target_images
        if test_set_size:
            self._test_data_set = tf.data.Dataset.from_tensor_slices((test_combined_input_images, test_target_images))\
                .shuffle(test_set_size).batch(self._config.batch_size)
            del test_combined_input_images
            del test_target_images
        else:
            tf.logging.warning("Running evaluation without test data!")

        return data_set_size, test_set_size
Exemplo n.º 13
0
        # Get the command line parameter value.
        arg_index = sys.argv.index('--patches_file')
        patches_file = sys.argv[arg_index + 1]

    patches = None
    if patches_file is not None:
        # If the patches_file command line parameter was provided,
        # read it from disk.
        pprint("Reading patches file ...")
        with open(patches_file, 'rb') as fp:
            patches = pickle.load(fp)
    else:
        # If the patches_file command line parameter was not provided,
        # load the images from disk, ...
        pprint("Loading images ...")
        images, image_files = load_images(images_path, extensions=['.png'],
                                          img_shape=(256, 256))
        if create_patches:
            # and if the --create_patches was provided generate the patches.
            pprint("Extracting patches ...")
            patches = extract_patches(images, patch_shape=(16, 16))
            if patches_file is not None:
                pprint("Saving patches to disk ...")
                with open(patches_file, 'wb') as fp:
                    pickle.dump(patches, fp)
        else:
            # or, if the --create_patches was not provided, use the images
            # themselves as the patches.
            patches = images

    # Finally, start the learning procedure.
    pprint("Starting training ...")
Exemplo n.º 14
0
#!/usr/local/bin/python
from website import app
from utils import clear, load, load_images, add_zips
import sys


if __name__ == '__main__':
    if len(sys.argv) == 1:
        app.debug = True
        app.run(host='0.0.0.0', port=5000)

    elif len(sys.argv) == 2:
        if sys.argv[1] == 'clear':
            clear()
        elif sys.argv[1] == 'load':
            load()
        elif sys.argv[1] == 'load_images':
            load_images()
        elif sys.argv[1] == 'add_zips':
            add_zips()
        else:
            print "error starting server: could not interpret argument"
    else:
        print "error starting server: too many arguments"
Exemplo n.º 15
0
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from utils import load_images
from skimage.feature import hog
from sklearn.model_selection import GridSearchCV

# load images
images, labels = load_images()
images = images[0:10000]
labels = labels[0:10000]
hogs = []
for image in images:
    hogs.append(hog(image, pixels_per_cell=(7, 7), cells_per_block=(4, 4)))
print("Done")
X_train, X_test, y_train, y_test = train_test_split(hogs,
                                                    labels,
                                                    test_size=.15)

grid_params = {'n_neighbors': [3, 5, 7, 9], 'weights': ['uniform', 'distance']}

# Fit a KNN classifier on the training set
search = GridSearchCV(KNeighborsClassifier(), grid_params)
search.fit(X_train, y_train)
total = 0
right = 0
print(search.best_params_)
for image, label in zip(X_test, y_test):
    if search.predict(image.reshape(1, -1)) == label:
        right += 1
    total += 1
Exemplo n.º 16
0
def main(type,
         input_names,
         save_folder='./detections',
         iou_threshold=0.5,
         confidence_threshold=0.5,
         class_names_file=_CLASS_NAMES_FILE,
         create_csv=False):
    # Get class names and number
    class_names = load_class_names(class_names_file)
    n_classes = len(class_names)

    # Tensorflow prep
    tf.compat.v1.reset_default_graph()

    # Load Yolo_v3 model
    model = Yolo_v3(n_classes=n_classes,
                    model_size=_MODEL_SIZE,
                    max_output_size=_MAX_OUTPUT_SIZE,
                    iou_threshold=iou_threshold,
                    confidence_threshold=confidence_threshold)

    if type == 'images':

        # Load pictures and set up detection inputs
        batch_size = len(input_names)
        batch = load_images(input_names, model_size=_MODEL_SIZE)
        inputs = tf.compat.v1.placeholder(tf.float32,
                                          [batch_size, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)

        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))

        # Load the weights model.ckpt and run detection on inputs
        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')
            detection_result = sess.run(detections, feed_dict={inputs: batch})

        # Using detection results, draw detection boxes on input pictures and save them
        draw_boxes(input_names, detection_result, class_names, _MODEL_SIZE,
                   save_folder)

        print('Detections have been saved successfully.')

    elif type == 'video':

        # Set yolo_v3 to tensorflow
        inputs = tf.compat.v1.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))

        # Run tensorflow session
        with tf.compat.v1.Session() as sess:
            # Load model
            saver.restore(sess, './weights/model.ckpt')

            # Create window for output video
            win_name = 'Video detection'
            cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
            cv2.resizeWindow(win_name, 1280, 720)

            # Create OpenCV capture and get video metadata
            cap = cv2.VideoCapture(input_names[0])
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'X264')
            fps = cap.get(cv2.CAP_PROP_FPS)

            # Set name and save destination for output video
            input_name_base = os.path.basename(input_names[0])
            video_save_path = save_folder + '/' + os.path.splitext(input_name_base)[0] + '_analysed' + \
                              os.path.splitext(input_name_base)[1]
            if os.path.splitext(input_name_base)[1] in _TO_MP4_FORMAT_LIST:
                video_save_path = save_folder + '/' + os.path.splitext(
                    input_name_base)[0] + '_analysed.mp4'

            # Create output video
            out = cv2.VideoWriter(video_save_path, fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))

            # Create csv file and insert row of time, frame, and classes if create_csv is set to True
            if create_csv:
                csv_save_path = save_folder + '/' + os.path.splitext(
                    input_name_base)[0] + '_statistics.csv'
                csv_field_names = class_names[:]
                csv_field_names.insert(0, "time")
                csv_field_names.insert(0, "frame")
                print(fps)
                sec_counter = 0
                with open(csv_save_path, 'w', newline='') as csv_file:
                    csv_writer = csv.writer(csv_file)
                    csv_file.write("sep=,")
                    csv_file.write('\n')
                    csv_writer.writerow(csv_field_names)
                csv_input_dict = {"frame": cap.get(cv2.CAP_PROP_POS_FRAMES)}
            try:
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break

                    # Resize frame to fit model and run detection
                    resized_frame = cv2.resize(frame,
                                               dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(
                        detections, feed_dict={inputs: [resized_frame]})
                    # In one second intervals, insert the maximum number of detections per class to a new row in csv
                    if create_csv:

                        csv_input_dict["frame"] = cap.get(
                            cv2.CAP_PROP_POS_FRAMES)
                        csv_input_dict["time"] = cap.get(cv2.CAP_PROP_POS_MSEC)
                        for cls in range(len(class_names)):
                            number_of_obj = len(detection_result[0][cls])
                            if number_of_obj != 0:
                                print(class_names[cls] + str(number_of_obj))
                                if class_names[cls] in csv_input_dict:
                                    csv_input_dict[class_names[cls]] = max(
                                        number_of_obj,
                                        csv_input_dict[class_names[cls]])
                                else:
                                    csv_input_dict[
                                        class_names[cls]] = number_of_obj
                        if cap.get(
                                cv2.CAP_PROP_POS_MSEC) / 1000 >= sec_counter:
                            with open(csv_save_path, 'a',
                                      newline='') as csv_file:
                                csv_writer = csv.DictWriter(
                                    csv_file, fieldnames=csv_field_names)
                                csv_writer.writerow(csv_input_dict)
                            sec_counter += 1
                            for cls in range(len(class_names)):
                                csv_input_dict.pop(class_names[cls], None)
                            print(sec_counter)

                    # Draw detection boxes on the frame being handled
                    draw_frame(frame, frame_size, detection_result,
                               class_names, _MODEL_SIZE)

                    # Show the current output frame on window
                    cv2.imshow(win_name, frame)

                    # Poll for key inputs, if 'q' is pressed, break to end processing video
                    key = cv2.waitKey(1) & 0xFF

                    if key == ord('q'):
                        break

                    # Write the current frame to the output file
                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    # Not in use currently
    elif type == 'webcam':
        inputs = tf.compat.v1.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(
            tf.compat.v1.global_variables(scope='yolo_v3_model'))

        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')

            win_name = 'Webcam detection'
            cv2.namedWindow(win_name)
            cap = cv2.VideoCapture(0)
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'X264')
            fps = cap.get(cv2.CAP_PROP_FPS)
            out = cv2.VideoWriter('./detections/detections.mp4', fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))

            try:
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break
                    resized_frame = cv2.resize(frame,
                                               dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(
                        detections, feed_dict={inputs: [resized_frame]})

                    draw_frame(frame, frame_size, detection_result,
                               class_names, _MODEL_SIZE)

                    cv2.imshow(win_name, frame)

                    key = cv2.waitKey(1) & 0xFF

                    if key == ord('q'):
                        break

                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    else:
        raise ValueError(
            "Inappropriate data type. Please choose either 'video' or 'images'."
        )
Exemplo n.º 17
0
parser.add_argument('--mindepth', type=float, default=10.0, help='Minimum of input depths')
parser.add_argument('--maxdepth', type=float, default=1000.0, help='Maximum of input depths')
args = parser.parse_args()

# Custom object needed for inference and training
custom_objects = {'BilinearUpSampling2D': BilinearUpSampling2D, 'depth_loss_function': None}

print('Loading model...')

# Load model into GPU / CPU
model = load_model(args.model, custom_objects=custom_objects, compile=False)

print('\nModel loaded ({0}).'.format(args.model))

# Input images
inputs = load_images( glob.glob(args.input) )
print('\nLoaded ({0}) images of size {1}.'.format(inputs.shape[0], inputs.shape[1:]))

# Compute results
outputs = predict(model, inputs, minDepth=args.mindepth, maxDepth=args.maxdepth, batch_size=args.bs)

#matplotlib problem on ubuntu terminal fix
#matplotlib.use('TkAgg')   

# Display results
if args.input_depth=='examples/eye/*.png':
    inputs_depth = load_images( glob.glob(args.input_depth))    
    viz = display_images(outputs.copy(), inputs.copy(), inputs_depth.copy())
else:
    viz = display_images(outputs.copy(), inputs.copy())
Exemplo n.º 18
0
try:
    encoder  .load_weights( "models/encoder.h5"   )
    decoder_A.load_weights( "models/decoder_A.h5" )
    decoder_B.load_weights( "models/decoder_B.h5" )
except:
    pass

def save_model_weights():
    encoder  .save_weights( "models/encoder.h5"   )
    decoder_A.save_weights( "models/decoder_A.h5" )
    decoder_B.save_weights( "models/decoder_B.h5" )
    print( "save model weights" )

images_A = get_image_paths( "data/trump" )
images_B = get_image_paths( "data/cage"  )
images_A = load_images( images_A ) / 255.0
images_B = load_images( images_B ) / 255.0

images_A += images_B.mean( axis=(0,1,2) ) - images_A.mean( axis=(0,1,2) )

print( "press 'q' to stop training and save model" )

for epoch in range(1000000):
    batch_size = 64
    warped_A, target_A = get_training_data( images_A, batch_size )
    warped_B, target_B = get_training_data( images_B, batch_size )

    loss_A = autoencoder_A.train_on_batch( warped_A, target_A )
    loss_B = autoencoder_B.train_on_batch( warped_B, target_B )
    print( loss_A, loss_B )
Exemplo n.º 19
0
from keras.preprocessing.image import ImageDataGenerator
from model import ai_model

# Import each method individually for clarity purposes.
from utils import load_images, save_model, save_the_images
from utils import prepare_accuracy_visualisation_images, train

# Prepare an array for images to be loaded into
X = []

# Load a number of images from a specified folder into the X array.
# 1: Batch size to load
# 2: The array to save the images in.
# 3: (Optional) Folder with the images (default: "Train/")
batch_size = 700
load_images(batch_size, X)

# Convert standard array to numpy array
X = np.array(X)  #, dtype=float) #float gives error, use 1.0/255 instead.

# Split the loaded dataset into training and testing part
# as per the given percentage (80% by default)
training_percentage = 0.9
split = int(training_percentage * len(X))

# use the given percentage for training
Xtrain = X[:split]

#normalise the data (divide it by 255), while keeping it as float
Xtrain = 1.0 / 255 * Xtrain
Exemplo n.º 20
0
    X, y, X_val, y_val, model_type, m, params = args
    model = ModelFactory.create_model(model_type, m, **params)
    model.fit(X, y)
    return (model.score(X_val, y_val), model, params)


def choose_best_model(X, X_val, y, y_val, model_type, params):
    p = Pool(processes=NUM_PROCESSES)
    scores = p.map(_choose_slave, [(X, y, X_val, y_val, model_type, X_train.shape[0], dict(zip(params.keys(), ps))) for ps in itertools.product(*params.values())])
    best_acc, best_model, best_params = max(scores)
    return best_params, best_acc, best_model


if __name__ == '__main__':
    t = time.time()
    X, y = utils.load_hog(PPC, TRAIN_TOTAL) if HOG else utils.load_images(TRAIN_TOTAL)
    print 'Done loading', str(time.time() - t)
    t = time.time()

    # split data
    X_train, X_val, y_train, y_val = cross_validation.train_test_split(X, y, test_size=VALSIZE)
    print 'Done splitting train', str(time.time() - t)
    t = time.time()

    # sweep hyperparameters field and pick best model
    params, acc, model = choose_best_model(X_train, X_val, y_train, y_val, MODEL, PARAMS[MODEL])
    # if not SVM:
    #     reg, acc, model = find_regularization(X_train, X_val, y_train, y_val, REGS_HOG if HOG else REGS)
    #     print "Best regularization strength is: " + str(reg)
    # else:
    #     params, acc, model = choose_best_model(X_train, X_val, y_train, y_val, MODEL, PARAMS[MODEL])
Exemplo n.º 21
0
    def get_prediction_from_path(self,
                                 sess,
                                 x_data,
                                 compatibility_multiplier=32,
                                 tgt_size=None,
                                 method=None,
                                 full_prediction=False,
                                 keep_prob=1.0):
        """ Prediction of the neural network graph. """
        # Load images
        x_data = utils.load_images(x_data, tgt_size=tgt_size, method=method)

        # Do it one by one if different sizes
        if method is None:
            pred = []
            for x in x_data:
                # Pad if required
                x, pads = utils.match_size_with_pad(x,
                                                    compatibility_multiplier)
                x = np.expand_dims(x, axis=0)
                # Run for full mask
                p = sess.run(tf.nn.softmax(self.full_mask_logits_tf),
                             feed_dict={
                                 self.x_tf: x,
                                 self.keep_prob_tf: keep_prob
                             })
                if self.multi_head and full_prediction:
                    # Run for borders and untouching mask
                    b = sess.run(tf.nn.softmax(self.borders_logits_tf),
                                 feed_dict={
                                     self.x_tf: x,
                                     self.keep_prob_tf: keep_prob
                                 })

                    p = np.concatenate([p[0, :, :, 1:], b[0, :, :, 1:]], -1)
                else:
                    p = p[0, :, :, 1]
                # Unpad
                pred.append(utils.unpad_image_to_original_size(p, pads))

        # Do it for all the batch
        else:
            pred = sess.run(tf.nn.softmax(self.full_mask_logits_tf),
                            feed_dict={
                                self.x_tf: x_data,
                                self.keep_prob_tf: keep_prob
                            })

            if self.multi_head and full_prediction:
                borders = sess.run(tf.nn.softmax(self.borders_logits_tf),
                                   feed_dict={
                                       self.x_tf: x_data,
                                       self.keep_prob_tf: keep_prob
                                   })
                pred = np.concatenate(
                    [pred[:, :, :, 1:], borders[:, :, :, 1:]], -1)

            else:
                pred = pred[:, :, :, 1:]

        return pred
Exemplo n.º 22
0
 def load_images(self, path):
     image, rect = utils.load_images(path, True)
     w = rect.w / 4
     h = rect.h
     self.frames = [image.subsurface(x * w, 0, w, h) for x in range(0, 4)]
Exemplo n.º 23
0
# -*- coding: utf-8 -*-

import os
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from model import NeuralStyleTransferModel
import settings
import utils

# 创建模型
model = NeuralStyleTransferModel()

# 加载内容图片
content_image = utils.load_images(settings.CONTENT_IMAGE_PATH)
# 风格图片
style_image = utils.load_images(settings.STYLE_IMAGE_PATH)

# 计算出目标内容图片的内容特征备用
target_content_features = model([
    content_image,
])['content']
# 计算目标风格图片的风格特征
target_style_features = model([
    style_image,
])['style']

M = settings.WIDTH * settings.HEIGHT
N = 3

Exemplo n.º 24
0
def main(type, iou_threshold, confidence_threshold, input_names):
    class_names = load_class_names(_CLASS_NAMES_FILE)
    n_classes = len(class_names)

    model = Yolo_v3(n_classes=n_classes, model_size=_MODEL_SIZE,
                    max_output_size=_MAX_OUTPUT_SIZE,
                    iou_threshold=iou_threshold,
                    confidence_threshold=confidence_threshold)

    if type == 'images':
        batch_size = len(input_names)
        batch = load_images(input_names, model_size=_MODEL_SIZE)
        inputs = tf.compat.v1.placeholder(tf.float32, [batch_size, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(scope='yolo_v3_model'))

        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')
            detection_result = sess.run(detections, feed_dict={inputs: batch})

        draw_boxes(input_names, detection_result, class_names, _MODEL_SIZE)

        print('Detections have been saved successfully.')

    elif type == 'video':
        inputs = tf.compat.v1.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(scope='yolo_v3_model'))

        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')

            win_name = 'Video detection'
            cv2.namedWindow(win_name)
            cap = cv2.VideoCapture(input_names[0])
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'X264')
            fps = cap.get(cv2.CAP_PROP_FPS)
            out = cv2.VideoWriter('./detections/detections.mp4', fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))

            try:
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break
                    resized_frame = cv2.resize(frame, dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(detections,
                                                feed_dict={inputs: [resized_frame]})

                    draw_frame(frame, frame_size, detection_result,
                               class_names, _MODEL_SIZE)

                    cv2.imshow(win_name, frame)

                    key = cv2.waitKey(1) & 0xFF

                    if key == ord('q'):
                        break

                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    elif type == 'webcam':
        inputs = tf.compat.v1.placeholder(tf.float32, [1, *_MODEL_SIZE, 3])
        detections = model(inputs, training=False)
        saver = tf.compat.v1.train.Saver(tf.compat.v1.global_variables(scope='yolo_v3_model'))

        with tf.compat.v1.Session() as sess:
            saver.restore(sess, './weights/model.ckpt')

            win_name = 'Webcam detection'
            cv2.namedWindow(win_name)
            cap = cv2.VideoCapture(0)
            frame_size = (cap.get(cv2.CAP_PROP_FRAME_WIDTH),
                          cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            fourcc = cv2.VideoWriter_fourcc(*'X264')
            fps = cap.get(cv2.CAP_PROP_FPS)
            out = cv2.VideoWriter('./detections/detections.mp4', fourcc, fps,
                                  (int(frame_size[0]), int(frame_size[1])))

            try:
                while True:
                    ret, frame = cap.read()
                    if not ret:
                        break
                    resized_frame = cv2.resize(frame, dsize=_MODEL_SIZE[::-1],
                                               interpolation=cv2.INTER_NEAREST)
                    detection_result = sess.run(detections,
                                                feed_dict={inputs: [resized_frame]})

                    draw_frame(frame, frame_size, detection_result,
                               class_names, _MODEL_SIZE)

                    cv2.imshow(win_name, frame)

                    key = cv2.waitKey(1) & 0xFF

                    if key == ord('q'):
                        break

                    out.write(frame)
            finally:
                cv2.destroyAllWindows()
                cap.release()
                print('Detections have been saved successfully.')

    else:
        raise ValueError("Inappropriate data type. Please choose either 'video' or 'images'.")
Exemplo n.º 25
0
def runUFRESH():
    directory_x = 'Testing_Images/FRESH_upscaled/Set5'
    pattern = '.bmp'
    directory_y = 'Testing_Images/GT/Set5'

    XpathCell = myglob(directory_x, pattern)
    Xcell = load_images(XpathCell)
    YpathCell = myglob(directory_y, pattern)
    Ycell = load_images(YpathCell)

    blocksize = [5, 5]

    Psnr = np.zeros([len(Xcell)])
    prepsnr = np.zeros([len(Xcell)])
    for imgIdx in range(0, len(Xcell)):
        t1 = time.time()
        print('--------------------------------------------------------')
        print('Processing image ', str(imgIdx + 1), ' of total ',
              str(len(Xcell)))
        Xtest = Xcell[imgIdx]
        Ytest = Ycell[imgIdx]
        mse = np.square((Xtest - Ytest).ravel()).mean(axis=0)
        prepsnr[imgIdx] = 10 * np.log10(1 / mse)
        print('PSNR before processing = ', prepsnr[imgIdx])
        for stage in [1, 2]:
            heir = sp.io.loadmat('pyHeirarchy4096.mat')
            heirarchy = heir['heirarchy'].astype(float32)
            index = heir['index'].astype(float32)
            mymap = sp.io.loadmat('pyMap4096cell96.mat')

            C = np.squeeze(mymap['Map'])
            # Map = np.empty((C.shape[0], C[0].shape[0], C[0].shape[1]))
            # for i in range(Map.shape[0]):
            Map = [np.ndarray([C[0].shape[0], C[0].shape[1]])] * C.shape[0]
            for i in range(len(Map)):
                Map[i] = C[i].astype(float)

            Xrec = np.zeros([len(Xtest), len(Xtest[0]), 4])
            for rot in range(0, 4):
                print(rot)
                Xtestrot = np.rot90(Xtest, rot)
                # Xtestrot = sp.ndimage.rotate(Xtest, 90*rot)
                X = ufresh(Xtestrot, blocksize, heirarchy, index, Map)
                X = backprojection_2X(X, np.rot90(Ytest, rot))
                X = np.rot90(X, 4 - rot)
                # X = sp.ndimage.rotate(X, 360-90*rot)
                Xrec[:, :, rot] = X

            Xtest = np.mean(Xrec, axis=2)
            Xtest = backprojection_2X(Xtest, Ytest)

        mse = np.square((Xtest - Ytest).ravel()).mean(axis=0)
        Psnr[imgIdx] = 10 * np.log10(1 / mse)
        print('PSNR after processing  = ', Psnr[imgIdx])
        t2 = time.time()
        print('Elapsed time is ', str(t2 - t1))

        # plt.figure()
        # plt.imshow(Xtest, cmap='gray')
        # plt.title('Xtest')
        # plt.figure()
        # plt.imshow(Ytest, cmap='gray')
        # plt.title('Ytest')
        # plt.show()
    print('========================================================')
    print('Average PSNR across all runs = ', str(Psnr.mean(axis=0)))
    print('Average improvement in PSNR  = ', str(
        (Psnr - prepsnr).mean(axis=0)))
Exemplo n.º 26
0
import test_lib
import utils
from vgg19 import Vgg19

model = np.load(test_lib.model_vgg19).item()
print("The VGG model is loaded.")

imgs_path = ["./img-airplane-224x224.jpg",
             "./img-guitar-224x224.jpg",
             "./img-puzzle-224x224.jpg",
             "./img-tatoo-plane-224x224.jpg",
             "./img-dog-224x224.jpg",
             "./img-paper-plane-224x224.jpg",
             "./img-pyramid-224x224.jpg",
             "./img-tiger-224x224.jpg"]
imgs = utils.load_images(*imgs_path)
print("The input image(s) is loaded.")
for i, img in enumerate(imgs_path):
    print("%d %s" % (i, img))
print("")

# Design the graph.
graph = tf.Graph()
with graph.as_default():
    nn = Vgg19(model=model)

# Run the graph in the session.
with tf.Session(graph=graph) as sess:
    tf.initialize_all_variables().run()
    print("Tensorflow initialized all variables.")
#!/usr/bin/env python3

from align import FaceAlign
import matplotlib.pyplot as plt
import numpy as np
import os
from utils import load_images, save_images

fa = FaceAlign('models/landmarks.dat')
images, filenames = load_images('HBTN', as_array=False)
x = [[0.194157, 0.16926692], [0.7888591, 0.15817115], [0.4949509, 0.5144414]]
anchors = np.array(x, dtype=np.float32)
aligned = []

alin = 0
for image in images:
    aligned.append(fa.align(image, np.array([36, 45, 33]), anchors, 96))
    print("alin = {}".format(alin))
    alin = alin + 1
aligned = np.array(aligned)
print(aligned.shape)
if not os.path.isdir('HBTNaligned'):
    print(save_images('HBTNaligned', aligned, filenames))
    os.mkdir('HBTNaligned')
print(save_images('HBTNaligned', aligned, filenames))
print(os.listdir('HBTNaligned'))
image = plt.imread('HBTNaligned/KirenSrinivasan.jpg')
plt.imshow(image)
plt.show()
Exemplo n.º 28
0
        # Get the command line parameter value.
        arg_index = sys.argv.index('--bsp_out_dir')
        bsp_out_dir = sys.argv[arg_index + 1]
    # Check whether the --asp_out_dir command line parameter was
    # provided.
    asp_out_dir = None
    if '--asp_out_dir' in sys.argv:
        # Get the command line parameter value.
        arg_index = sys.argv.index('--asp_out_dir')
        asp_out_dir = sys.argv[arg_index + 1]

    if bsp_columns_file is None and asp_columns_file is None:
        sys.exit('One or both of the --bsp_columns_file'
                 ' --asp_columns_file must be provided.')

    images, _ = load_images(imgs_dir, extensions=('.jpg',),
                            img_shape=(256, 256))
    patches = extract_patches(images, (16, 16), images.shape[0]*256,
                              randomize=False)

    if asp_columns_file is not None:
        with open(asp_columns_file, 'rb') as fp:
            asp_columns = pickle.load(fp)
        asp_activations =\
            reconstruct_images(alg='asp', images=patches, columns=asp_columns,
                               connect_threshold=0.2,
                               desired_activity_mult=0.05, min_overlap=3,
                               img_shape=(256, 256), out_dir=asp_out_dir)
        calculate_print_stats(asp_activations, alg='ASP')
        save_activations(asp_activations, asp_columns_file)

    if bsp_columns_file is not None:
def remove_distortion(images):
    out = calibrate(images)
    matrix = out['camera_matrix']
    dist = out['distortion_coefficient']

    undistorted_images = []
    for (image, color_image) in images:
        size = image.shape[::-1]
        new_matrix, roi = cv.getOptimalNewCameraMatrix(matrix, dist, size,
                                                       1, size)

        img = cv.undistort(color_image, matrix, dist, None, new_matrix)
        undistorted_images.append(img)

    return undistorted_images


if __name__ == '__main__':
    args_parser = argparse.ArgumentParser()
    args_parser.add_argument('folder', help='a folder wit himages\
                                             to be processed')
    args_parser.add_argument('-s', '--scale', type=float, default=0.45,
                             help='display scale to control window size')
    args = args_parser.parse_args()

    DISPLAY_SCALE = args.scale

    images = load_images(args.folder)
    undistorted_images = remove_distortion(images)
    display_images(undistorted_images, DISPLAY_SCALE)
Exemplo n.º 30
0
def launch():
    seed = 1234
    np.random.seed(seed)
    light = Light()
    light.initials()
    light.file_snapshot()
    light.set_seed(seed)

    w, h = 28, 28
    fast_test = False 
    test_ratio = 0.25
    valid_ratio = 0.25

    light.set("w", w)
    light.set("h", h)
    light.set("test_ratio", test_ratio)
    light.set("valid_ratio", valid_ratio)

    images = load_images(w=w, h=h)
    X = images.reshape((-1, w*h))

    # prepare
    X = shuffle(X)
    if fast_test is True:
        max_evaluations_hp = 1
        default_params = dict(
               max_epochs=2
        )
        X = X[0:100]
    else:
        default_params = dict()
        max_evaluations_hp = 20
    default_params["batch_size"] = 128
    #default_params["nb_layers"] = 1
    eval_function = lambda model, X_v, _: float(model.get_reconstruction_error(X_v))
    X_train_full, X_test = train_test_split(X, test_size=test_ratio)
    X_train, X_valid = train_test_split(X_train_full, test_size=valid_ratio)

    # show original data
    #X_ =  X.reshape((X.shape[0], im[0], im[1]))
    #X_ = X_[0:10]
    #grid_plot(X_, imshow_options={"cmap": "gray"})
    #plt.savefig(dirname+"/orig.png")
    #plt.show()

    all_hp, all_scores = find_all_hp(
        AA,
        minimize_fn_with_hyperopt,
        X_train,
        X_valid,
        None,
        None,
        max_evaluations=max_evaluations_hp,
        default_params=default_params,
        not_allowed_params=["batch_size"],
        eval_function=eval_function
    )
    argmin = min(range(len(all_hp)), key=lambda i:all_scores[i])
    best_hp, best_score = all_hp[argmin], all_scores[argmin]

    best_hp.update(default_params)
    aa = AA(**best_hp)
    aa.fit(X_train_full, X_test)
    best_model = aa

    light.set("best_hp", best_hp)
    light.set("best_score", best_score)
    light.set("all_hp", all_hp)
    light.set("all_scores", all_scores)
    #light.set("best_model", light.insert_blob(best_model))
    names = best_model.capsule.batch_optimizer.stats[0].keys()
    stats = dict()
    for name in names:
        stats[name] =  get_stat(name, best_model.capsule.batch_optimizer.stats)
    light.set("layer_weights", light.insert_blob([layer.W.get_value() for layer in (best_model.all_layers[1:-1])]))
    light.set("best_model_stats", stats)
    light.set("nb_layers", aa.nb_layers * 2 - 1)

    # reconstructions
    R = np.arange(20)
    X_test_hat = best_model.capsule.predict(X_test[R]).tolist()
    light.set("reconstructions", light.insert_blob(X_test_hat))
    light.endings()
Exemplo n.º 31
0
import utils
import sac
import sys

if len(sys.argv) != 2:
    print "Usage: ./display_saved_network.py somefile.pickle"
    sys.exit(1)

fname = sys.argv[1]
f = open(fname, "r")
solution = pickle.load(f)

utils.save_as_figure((solution.W1 + solution.b1).T, "loadedW1.png")
utils.save_as_figure(solution.W2, "loadedW2.png")

images = utils.load_images("data/train-images-idx3-ubyte")
labels = utils.load_labels("data/train-labels-idx1-ubyte")
utils.save_as_figure(images[:, 0:100], "output/input.png")

patches = images[:, 0:10000]
visible_size = 28 * 28
hidden_size = 196

options = sac.SparseAutoEncoderOptions(visible_size,
                                       hidden_size,
                                       output_dir="output",
                                       max_iterations=400)

network = sac.SparseAutoEncoder(options, patches)

theta = network.flatten(solution.W1, solution.W2, solution.b1, solution.b2)