Example #1
0
def create_extended_model(model: Model, configuration: Configuration, is_gpu: bool, random_state=1) -> Model:
    np.random.seed(random_state)
    set_random_seed(random_state)

    constructors = {
        'BatchNormalization': lambda params: BatchNormalization(**params),
        'Dense': lambda params: TimeDistributed(Dense(**params)),
        'LSTM': lambda params: Bidirectional(CuDNNLSTM(**params) if is_gpu else
                                             LSTM(activation='tanh', recurrent_activation='sigmoid', **params),
                                             merge_mode='sum'),
        'ReLU': lambda params: ReLU(**params)
    }

    input_tensor = model.inputs[0]
    x = model.layers[-2].output     # without softmax layer

    layers = configuration.data['extension']['layers']
    for params in layers:
        name = params.pop('name')
        constructor = constructors[name]
        x = constructor(params)(x)

    *_, output_dim = model.layers[-1].output_shape
    output_tensor = TimeDistributed(Dense(units=output_dim, activation='softmax'))(x)
    model = Model(input_tensor, output_tensor, name='DeepSpeech')
    return model
Example #2
0
    def text_model(self, text, anchor_center=False):

        positions = []
        texture_coordinates = []
        indices = []

        cursor_x, cursor_y = 0, 0
        index = 0

        height = self.texture.height

        for character in text:
            info = self.characters[ord(character)]
            tx, ty = info['x'], info['y']
            tw, th = info['width'], info['height']
            x, y = cursor_x + info['xoffset'], cursor_y - info['yoffset']

            v = [
                x,
                y,  # topleft
                x,
                y - th,  # bottomleft
                x + tw,
                y - th,  # bottomright
                x + tw,
                y,  # topright
            ]
            t = [
                tx,
                height - ty,  # topleft
                tx,
                height - (ty + th),  # bottomleft
                tx + tw,
                height - (ty + th),  # bottomright
                tx + tw,
                height - ty  # topright
            ]
            i = [index, index + 1, index + 3, index + 3, index + 1, index + 2]

            positions.extend(v)
            texture_coordinates.extend(t)
            indices.extend(i)

            index += 4

            cursor_x += info['xadvance']

        # Normalize
        max_value = max((self.texture.height, self.texture.width))

        if anchor_center:
            width = cursor_x
            offset = (width / 2) / max_value
            positions = [i / max_value - offset for i in positions]
        else:
            positions = [i / max_value for i in positions]

        texture_coordinates = [i / max_value for i in texture_coordinates]

        positions = VBO.create(positions, dimension=2)
        texture_coordinates = VBO.create(texture_coordinates, dimension=2)
        indices = IBO.create(indices)

        return Model.create(vbos=(positions, texture_coordinates), ibo=indices)
Example #3
0
    def test_shortest_paths(self):
        model = Model()
        model.set_adjacency(ALPHA)
        model.set_threshold(0)

        model.set_source(0, 1)
        model.set_receiver(4, 5)

        (power, error, paths) = model.solve()
        self.assertEqual(len(paths), 1)

        model.set_source(0, 3)
        model.set_receiver(2, 5)

        (power, error, paths) = model.solve()
        self.assertEqual(len(paths), 2)
Example #4
0
    def test_zero_power(self):
        model = Model()
        model.set_adjacency(ALPHA)
        model.set_source(0, 1)
        model.set_receiver(4, 5)
        model.set_threshold(0)

        (power, error, paths) = model.solve()
        self.assertAlmostEqual(power, 0)
        self.assertAlmostEqual(error, 0)

        model.set_threshold(2)

        (power, error, paths) = model.solve()
        self.assertAlmostEqual(power, 0)
        self.assertAlmostEqual(error, 0)

        model.set_adjacency(BETA)
        model.set_source(0, 1)
        model.set_receiver(4, 5)
        model.set_threshold(0)

        (power, error, paths) = model.solve()
        self.assertAlmostEqual(power, 0)
        self.assertAlmostEqual(error, 0)

        model.set_threshold(2)

        (power, error, paths) = model.solve()
        self.assertAlmostEqual(power, 0)
        self.assertAlmostEqual(error, 0)

        model.set_adjacency(WIDTH)
        model.set_source(0, 1)
        model.set_receiver(4, 5)
        model.set_threshold(0)

        (power, error, paths) = model.solve()
        self.assertAlmostEqual(power, 0)
        self.assertAlmostEqual(error, 0)

        model.set_threshold(2)

        (power, error, paths) = model.solve()
        self.assertAlmostEqual(power, 0)
        self.assertAlmostEqual(error, 0)
Example #5
0
 def test_setters(self):
     model = Model()
     model.set_adjacency(ALPHA)
     with self.assertRaises(ValueError):
         model.set_source(-1, 0)  # nodes out of range
     with self.assertRaises(ValueError):
         model.set_source(5, 6)  # nodes out of range
     with self.assertRaises(ValueError):
         model.set_source(0, 2)  # nodes not neighbours
     with self.assertRaises(ValueError):
         model.set_receiver(-1, 0)  # nodes out of range
     with self.assertRaises(ValueError):
         model.set_receiver(5, 6)  # nodes out of range
     with self.assertRaises(ValueError):
         model.set_receiver(0, 2)  # nodes not neighbours
     with self.assertRaises(ValueError):
         model.set_threshold(-1)
Example #6
0
from source.model import Model
from source.utils import random_image
import gradio as gr
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--usage',
                    type=str,
                    default='random',
                    choices=['random', 'gradio'],
                    help='Usage mode')
args = parser.parse_args()
model = Model()  # Instantiate model
model.load_weights('model_Caltech101')

if args.usage == 'gradio':

    def gr_predict(inp):
        classes, top_prob = model.predict(inp)
        return {classes[i]: float(top_prob[i]) for i in range(5)}

    inputs = gr.inputs.Image(type='pil')
    outputs = gr.outputs.Label(num_top_classes=4)
    gr.Interface(fn=gr_predict, inputs=inputs, outputs=outputs).launch()
else:
    model.predict(random_image(), plot_pred=True)
Example #7
0
from source.constructor import Constructor
from source.model import Model
from source.view import View
from source.controller import Controller

if __name__ == '__main__':
    constructor = Constructor()
    model = Model()
    view = View()
    controller = Controller(constructor, model, view)

    view.mainloop()
Example #8
0
    def filt_image(self, 
                   image,
                   plane,
                   cam_to_img, 
                   image_size,
                   mask,
                   cube_size=[1.5, 2.5, 2.5],
                   crop_size=[64, 64],
                   cube_size_second=[1.45, 1.55, 4.00]):
        x_grid = self.x_grid
        num_x, num_z = x_grid.get_shape().as_list()
        num_xz = num_x * num_z
        xyz_flatten = self.get_anchor_centers(
                          plane, y_offset=0.0)[:3]
        xyz = tf.expand_dims(
                  tf.transpose(xyz_flatten, [1, 0]), 2)
        corners = self.compute_corners(
                      dimensions=tf.constant(
                          np.reshape(
                              cube_size, [1, 3]),
                          dtype=tf.float32),
                      alpha=tf.constant(0.0,
                          dtype=tf.float32))
        xyz = tf.boolean_mask(xyz, mask)
        kept_corners = xyz + corners
        bbox = self.corners_to_bbox(
                   kept_corners, 
                   cam_to_img,
                   image_size, 
                   tf_order=True)
        num_bbox = bbox.get_shape().as_list()[0]
        box_indices = tf.zeros_like(bbox[:, 0],
                          dtype=tf.int32)
        image = tf.expand_dims(tf.squeeze(image), 0)
        image = tf.reduce_mean(image, axis=3, 
                               keepdims=True)
        image_crops = tf.image.crop_and_resize(
                          image=image,
                          boxes=bbox,
                          box_ind=box_indices,
                          crop_size=crop_size)
        image_crops = image_crops / (tf.reduce_max(
                          image_crops, 
                          axis=[1, 2], 
                          keepdims=True) + 1e-6)
        image_model = Model(input_image=image_crops,
                          batch_size=None)
        class_prob_image = image_model.class_prob[:, 1]
        rotation_local = image_model.rotation
        bottom_centers = tf.squeeze(xyz, 2)
        rotation = rotation_local + tf.constant(np.pi / 2,
                                        dtype=tf.float32) - \
                   tf.atan2(bottom_centers[:, 2], 
                            bottom_centers[:, 0])

        corners_second = self.compute_corners(
                      dimensions=tf.constant(
                          np.reshape(
                              cube_size_second, 
                              [1, 3]),
                          dtype=tf.float32),
                      alpha=rotation)

        kept_corners_second = xyz + corners_second
        bbox_second = self.corners_to_bbox(
                   kept_corners_second,
                   cam_to_img,
                   image_size,
                   tf_order=True)
        image_crops_second = tf.image.crop_and_resize(
                          image=image,
                          boxes=bbox_second,
                          box_ind=box_indices,
                          crop_size=crop_size)
        image_crops_second = image_crops_second / (tf.reduce_max(
                          image_crops_second,
                          axis=[1, 2],
                          keepdims=True) + 1e-6)
        image_model_second = Model(input_image=image_crops_second,
                          batch_size=None)
    
        rotation_local_second = image_model_second.rotation
        rotation = rotation_local_second + tf.constant(np.pi / 2,
                                        dtype=tf.float32) - \
                   tf.atan2(bottom_centers[:, 2],
                            bottom_centers[:, 0])

        class_prob_image = image_model_second.class_prob[:, 1]
        class_prob = class_prob_image

        full_prob_image = image_model_second.full_prob[:, 1]
        full_prob = full_prob_image

        return bottom_centers, rotation, class_prob, full_prob
Example #9
0
        os.path.join(args.path, "*", "blenderproc", "*.hdf5"))
else:
    image_paths = glob.glob(args.path.replace("@", "*"))

if len(image_paths) == 0:
    raise Exception("No .hdf5 files where found here: {}".format(args.path))

settings_file_path = os.path.join(os.path.dirname(__file__), "settings",
                                  "settings_file.yml")
settings = SettingsReader(settings_file_path)

input_ph = tf.placeholder(tf.float32,
                          (None, settings.img_size, settings.img_size, 3))

# create the model
model = Model()
model_result = model.create(input_ph)
last_layer, _, _, _ = model.get_results()

# Saver
saver = tf.train.Saver()

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:

    sess.run(tf.global_variables_initializer())

    saver.restore(sess, args.model_path)

    for image_path in image_paths:
train_steps = int(
    (settings.max_dataset_size - validation_size) // settings.batch_size)

# Dataset iterators
trn_op, val_op = data_loader.load_default_iterator()
x_iter, y_iter = trn_op.get_next()
x_iter_val, y_iter_val = val_op.get_next()

val_bool = tf.placeholder(dtype=bool, shape=())
data = tf.cond(val_bool, lambda: x_iter, lambda: x_iter_val)
ground_truth = tf.cond(val_bool, lambda: y_iter, lambda: y_iter_val)
tf.summary.image('ground truth', (ground_truth + 1.) / 2. * 255.)
tf.summary.image('color', data * 255.)

# create the model
model = Model()
model_result = model.create(data)

# LossManager
last_layer, _, _, _ = model.get_results()
loss_manager = LossManager(ground_truth, last_layer)
loss = loss_manager.cosine_similarity()
op, cost = model.compile(settings.learning_rate, loss)

# Timers
model_timer = StopWatch()
train_sum_timer = StopWatch()
val_sum_timer = StopWatch()

config = tf.ConfigProto()
config.gpu_options.allow_growth = True
Example #11
0
    def __init__(self,
                 autoencoder_path,
                 probes_list,
                 decode=False,
                 beta=None,
                 seed=None,
                 fairness=None,
                 checkpoints=None,
                 task_list=None):

        with open(autoencoder_path, 'r') as stream:
            self.config_autoencoder = yaml.load(stream, Loader=yaml.SafeLoader)

        if beta is not None:
            self.config_autoencoder['beta'] = beta

        if fairness is not None:
            self.config_autoencoder['gamma'] = fairness

        if seed is not None:
            self.config_autoencoder['data']['seed'] = seed

        save = False
        if checkpoints is not None:
            save = True

        self.results = {}
        self.results['autoencoder'] = copy.deepcopy(
            self.config_autoencoder['net'])
        self.results['loss'] = copy.deepcopy(self.config_autoencoder['loss'])
        self.results['method'] = self.config_autoencoder['experiment']
        self.results['beta'] = self.config_autoencoder['beta']

        self.results['training'] = {}
        self.results['validation'] = {}
        self.results['training']['rec_loss'] = {}
        self.results['validation']['rec_loss'] = {}
        self.results['validation']['bit_rate'] = {}

        self.autoencoder = Model.from_dict(self.config_autoencoder)
        n_epochs = self.config_autoencoder['n_epochs']

        if 'run' in self.config_autoencoder:
            run = self.config_autoencoder['run']
            logger.info(f'Loading checkpoint {run}')
            checkpoint = torch.load(run, map_location='cpu')
            self.autoencoder.net.load_state_dict(
                checkpoint['model_state_dict'])

        self.transfer = False
        if 'transfer' in self.config_autoencoder:
            self.transfer = self.config_autoencoder['transfer']
            self.config_autoencoder['data']['transfer'] = True

        self.transfer_small = False
        if 'transfer_small' in self.config_autoencoder:
            self.transfer_small = self.config_autoencoder['transfer_small']
            self.config_autoencoder['data']['transfer_small'] = True

        if 'prun' in self.config_autoencoder:
            prun = self.config_autoencoder['prun']
            logger.info(f'Loading checkpoint {prun}')
            checkpoint = torch.load(prun, map_location='cpu')
            self.autoencoder.pmodel.load_state_dict(
                checkpoint['model_state_dict'])

        if 'code' in self.config_autoencoder:
            code = self.config_autoencoder['code']
            logger.info(f'Loading symbols {code}')
            self.autoencoder.net.code = torch.from_numpy(np.load(code)).float()

        self.probes_list = probes_list

        if self.transfer | self.transfer_small | (task_list is not None):
            self.task_list = task_list
        else:
            self.taks_list = probes_list

        self.decode = decode

        dataname = self.config_autoencoder['data'].pop('name')
        self.results['dataname'] = dataname

        train_dset = globals()[dataname].from_dict(
            self.config_autoencoder['data'], type='train')
        test_dset = globals()[dataname].from_dict(
            self.config_autoencoder['data'], type='test')
        validate_dset = globals()[dataname].from_dict(
            self.config_autoencoder['data'], type='validate')

        train_loader = DataLoader(
            train_dset,
            batch_size=self.config_autoencoder['batch_size'],
            shuffle=True,
            pin_memory=True,
            num_workers=16)
        validate_loader = DataLoader(
            validate_dset,
            batch_size=self.config_autoencoder['batch_size'],
            pin_memory=True,
            num_workers=16)

        logger.info(f'Train autoencoder to generate representations')
        self.autoencoder.train(train_loader,
                               validate_loader,
                               n_epochs,
                               self.results,
                               save=save,
                               chkpt_dir=checkpoints)
        self.autoencoder.net.eval()

        self.results['validation']['bit_rate'] = {}
        self.results['validation']['rec_loss_final'] = {}

        for beta in [0.0, self.autoencoder.beta]:
            val_loss, accuracy, s_loss, entr_loss, active_bits = self.autoencoder.eval(
                validate_loader, beta)
            self.results['validation']['bit_rate'][beta] = entr_loss.item()
            self.results['validation']['rec_loss_final'][beta] = val_loss.item(
            )

        self.device = self.autoencoder.device
        self.nclass = self.config_autoencoder['nclass_outcome']
        self.nclass_sensitive = self.config_autoencoder['nclass_sensitive']

        self.train_dset = train_dset
        self.test_dset = test_dset
        self.validate_dset = validate_dset

        self.threshold = 0
        self.validate_rep_loader = None
        self.test_rep_loader = None