Ejemplo n.º 1
0
def backbone(inputs, is_training):
    arg_scope = resnet_arg_scope()
    with slim.arg_scope(arg_scope):
        _, end_points = resnet_v2_50(inputs, is_training=is_training)
    C3 = end_points["resnet_v2_50/block2/unit_3/bottleneck_v2"]
    C4 = end_points["resnet_v2_50/block3/unit_5/bottleneck_v2"]
    C5 = end_points["resnet_v2_50/block4/unit_3/bottleneck_v2"]
    P5 = conv("conv5", C5, 256, 1, 1, "SAME")
    P4 = merge("merge1", C4, P5)
    P3 = merge("merge2", C3, P4)
    P6 = conv("conv6", C5, 256, 3, 2, "SAME")
    P7 = conv("conv7", relu(P6), 256, 3, 2, "SAME")

    P3_class_logits = class_subnet(P3)
    P3_box_logits = box_subnet(P3)

    P4_class_logits = class_subnet(P4)
    P4_box_logits = box_subnet(P4)

    P5_class_logits = class_subnet(P5)
    P5_box_logits = box_subnet(P5)

    P6_class_logits = class_subnet(P6)
    P6_box_logits = box_subnet(P6)

    P7_class_logits = class_subnet(P7)
    P7_box_logits = box_subnet(P7)
    class_logits = tf.concat([P3_class_logits, P4_class_logits, P5_class_logits, P6_class_logits, P7_class_logits], axis=1)
    box_logits = tf.concat([P3_box_logits, P4_box_logits, P5_box_logits, P6_box_logits, P7_box_logits], axis=1)
    class_logits_dict = {"P3": P3_class_logits, "P4": P4_class_logits, "P5": P5_class_logits,
                         "P6": P6_class_logits, "P7": P7_class_logits}
    box_logits_dict = {"P3": P3_box_logits, "P4": P4_box_logits, "P5": P5_box_logits,
                       "P6": P6_box_logits, "P7": P7_box_logits}
    return class_logits, box_logits, class_logits_dict, box_logits_dict
    pass

# inputs = tf.placeholder(tf.float32, [None, IMG_H, IMG_W, 3])
# is_training = tf.placeholder(tf.bool)
# backbone(inputs, is_training)
Ejemplo n.º 2
0
def __perform_keyplot(emma, message="Grouping keys..."):
    if emma.conf.remote:
        async_result = parallel_work(emma.dataset.trace_set_paths, emma.conf)
        em_result = wait_until_completion(async_result, message=message)
    else:
        em_result = ops.work(emma.dataset.trace_set_paths, emma.conf)
        em_result = ops.merge(em_result, emma.conf)

    visualizations.plot_keyplot(
        em_result.means,
        time_domain=(not (conf_has_op(emma.conf, 'spec')
                          or conf_has_op(emma.conf, 'fft')))
        or emma.conf.plot_force_timedomain,
        sample_rate=1.0,
        show=True)
Ejemplo n.º 3
0
def __perform_keyplot(emma, message="Grouping keys..."):
    for subkey in range(emma.conf.key_low, emma.conf.key_high):
        emma.conf.subkey = subkey  # Set in conf, so the workers know which subkey to attack

        if emma.conf.remote:
            async_result = parallel_work(emma.dataset.trace_set_paths,
                                         emma.conf)
            em_result = wait_until_completion(async_result, message=message)
        else:
            em_result = ops.work(emma.dataset.trace_set_paths, emma.conf)
            em_result = ops.merge(em_result, emma.conf)

        visualizations.plot_keyplot(
            em_result.means,
            time_domain=(not (conf_has_op(emma.conf, 'spec')
                              or conf_has_op(emma.conf, 'fft')))
            or emma.conf.plot_force_timedomain,
            sample_rate=1.0,
            show=True)
Ejemplo n.º 4
0
    def build(self, states):

        with tf.variable_scope('net'), op.context(default_activation_fn='relu'):
            conv1,     w1, b1 = op.conv2d(states, size=8, filters=32, stride=4, name='conv1')
            conv2,     w2, b2 = op.conv2d(conv1, size=4, filters=64, stride=2, name='conv2')
            conv3,     w3, b3 = op.conv2d(conv2, size=3, filters=64, stride=1, name='conv3')
            fc4,       w4, b4 = op.linear(op.flatten(conv3), 256, name='fc4')

            h,         w5, b5 = op.linear(fc4, 256, name='h')
            h1,        w6, b6 = op.linear(h, 256, name='h1')
            hhat,      w7, b7 = op.linear(h1, 256, name='hhat')

            fc8,       w8, b8 = op.linear(op.merge(h, hhat, name="fc8"), 256, name='fc8')
            output,    w9, b9 = op.linear(fc8, self.environment.get_num_actions(), activation_fn='none', name='output')

        with tf.name_scope('prediction'), tf.variable_scope('net', reuse=True), op.context(default_activation_fn='relu'):
            hhat_conv1, _, _ = op.conv2d(self.inputs.lookaheads, size=8, filters=32, stride=4, name='conv1')
            hhat_conv2, _, _ = op.conv2d(hhat_conv1, size=4, filters=64, stride=2, name='conv2')
            hhat_conv3, _, _ = op.conv2d(hhat_conv2, size=3, filters=64, stride=1, name='conv3')
            hhat_truth, _, _ = op.linear(op.flatten(hhat_conv3), 256, name='fc4')

            self.constraint_error = tf.reduce_mean((hhat - hhat_truth)**2, reduction_indices=1, name='prediction_error')

        return output
from tensorflow.examples.tutorials.mnist import input_data
from vae import vae
import numpy as np
from scipy.misc import imsave
from ops import merge

mnist = input_data.read_data_sets('MNIST_data')
# display(mnist.train.images[0,:])
vae = vae()

opt = tf.train.AdamOptimizer(learning_rate=0.01).minimize(vae.loss)

with tf.Session() as sess:
    batch = mnist.train.next_batch(100)
    imsave("results/base.jpg",
           merge(np.reshape(batch[0], [100, 28, 28])[:64], [8, 8]))

    sess.run(tf.global_variables_initializer())
    for i in range(1000):
        batch = mnist.train.next_batch(100)[0]

        _, loss, gen_images_train = sess.run([opt, vae.loss, vae.gen_image],
                                             feed_dict={vae.ip_image_x: batch})
        print('epoch' + str(i) + ' : ' + str(loss))
        if i % 100 == 0:
            eval_batch = mnist.test.images[:100]
            gen_images_test = sess.run([vae.gen_image],
                                       feed_dict={vae.ip_image_x: eval_batch})
            imsave("results/" + str(i) + ".jpg",
                   merge(gen_images_test[0][:64], [8, 8]))
# f = open('gen_images','wb')