예제 #1
0
    def __call__(self, input_data, states):
        """

        :param input_data: shape (batch, time, H, W, channel)
        :param states: shape (batch, H, W, channel)

        :return: outputs: shape(batch, time, H, W, channel)
        """
        assert input_data is not None
        assert states is not None
        with tf.device(self.device):
            outputs = []
            for i in range(self.time_seq):
                in_data = input_data[:,i-1,:,:,:]
                i2h = conv2d(in_data, name="i2h", kshape=self.i2h_shape, dtype=self.dtype)
                i2h = tf.split(i2h, 3, axis=3)

                h2h = conv2d(states, name="h2h", kshape=self.h2h_shape, dtype=self.dtype)
                h2h = tf.split(h2h, 3, axis=3)

                reset_gate = tf.nn.sigmoid(i2h[0] + h2h[0], name="reset")
                update_gate = tf.nn.sigmoid(i2h[1] + h2h[1], name="update")

                new_mem = tf.nn.leaky_relu(i2h[2] + reset_gate * h2h[2], alpha=0.2, name="leaky")

                next_h = update_gate * states + (1 - update_gate) * new_mem

                states = next_h
                outputs.append(next_h)
            outputs = tf.stack(outputs, axis=1)

        return outputs
예제 #2
0
def get_model(inputs, is_training, bn_decay=None):

    stem = util.conv2d(inputs,
                       32, [3, 3],
                       scope='conv1',
                       stride=[2, 2],
                       padding='VALID',
                       use_xavier=True,
                       is_training=is_training,
                       bn_decay=bn_decacy)
    stem = util.conv2d(stem,
                       32, [3, 3],
                       scope='conv2',
                       stride=[1, 1],
                       padding='VALID',
                       use_xavier=True,
                       is_training=is_training,
                       bn_decay=bn_decacy)
    stem = util.conv2d(stem,
                       64, [3, 3],
                       scope='conv3',
                       stride=[1, 1],
                       padding='VALID',
                       use_xavier=True,
                       is_training=is_training,
                       bn_decay=bn_decacy)
예제 #3
0
 def _build_network(self, name, conv):
     if conv:
         input_s = tf.placeholder(tf.float32,
                                  [None, self.width, self.height, 1])
         with tf.variable_scope(name):
             conv1 = tf_utils.conv2d(input_s, 64, (3, 3), 1)
             conv2 = tf_utils.conv2d(conv1, 32, (1, 1), 1)
             conv3 = tf_utils.conv2d(conv2, 32, (1, 1), 1)
             reward = tf_utils.conv2d(conv3, 1, (1, 1), 1)
         theta = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                   scope=name)
         return input_s, tf.squeeze(tf.reshape(reward,
                                               (-1, self.n_input))), theta
     else:
         input_s = tf.placeholder(tf.float32, [None, self.n_input])
         with tf.variable_scope(name):
             fc1 = tf_utils.fc(
                 input_s,
                 self.n_h1,
                 scope="fc1",
                 activation_fn=tf.nn.elu,
                 initializer=tf.contrib.layers.variance_scaling_initializer(
                     mode="FAN_IN"))
             fc2 = tf_utils.fc(
                 fc1,
                 self.n_h2,
                 scope="fc2",
                 activation_fn=tf.nn.elu,
                 initializer=tf.contrib.layers.variance_scaling_initializer(
                     mode="FAN_IN"))
             reward = tf_utils.fc(fc2, self.n_input, scope="reward")
         theta = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                   scope=name)
         return input_s, tf.squeeze(reward), theta
예제 #4
0
def pc_encoder(point_cloud, nasmples, is_training, bn_decay=None):
    nn_dis, idx_batch = tf_utils.get_knn(point_cloud, nasmples)
    batch_size = point_cloud.get_shape()[0].value
    num_point = point_cloud.get_shape()[1].value
    point_dim = point_cloud.get_shape()[2].value
    idx_batch = tf.cast(idx_batch, dtype=tf.int32)
    latent_feature = {}
    # con_point = tf.concat([point_cloud, cov_batch], axis=2)
    # encoder_input = tf.expand_dims(con_point, -1)  # (32 2048 3 1)
    encoder_input = tf.expand_dims(point_cloud, -1)  # (32 2048 3 1)
    # (32, 2048, 1, 64)
    net = tf_utils.conv2d(encoder_input,
                          64, [1, 3],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='mlp_1',
                          bn_decay=bn_decay)
    # (32, 2048, 1, 64)
    net = tf_utils.conv2d(net,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='mlp_2',
                          bn_decay=bn_decay)
    # (32, 2048, 1, 64)
    net = tf_utils.conv2d(net,
                          64, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='mlp_3',
                          bn_decay=bn_decay)
    net = tf_utils.conv2d(net,
                          128, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='mlp_4',
                          bn_decay=bn_decay)

    net = tf_utils.conv2d(net,
                          1024, [1, 1],
                          padding='VALID',
                          stride=[1, 1],
                          bn=True,
                          is_training=is_training,
                          scope='mlp_5',
                          bn_decay=bn_decay)
    global_feat = tf_utils.max_pool2d(net, [num_point, 1],
                                      padding='VALID',
                                      scope='maxpool')
    net = tf.reshape(global_feat, [batch_size, -1])
    return net
예제 #5
0
def CNNEncoder(image, trainable=True):
    x = image
    with tf.variable_scope(
            'CNNEncoder',
            initializer=tf.contrib.layers.xavier_initializer(),
    ):
        with tf.variable_scope('layer1'):
            x = utils.conv2d(x,
                             name='conv1',
                             shape=[3, 3, 1, 64],
                             padding='SAME',
                             activation_func=tf.nn.relu,
                             trainable=trainable,
                             use_bn=True)
            x = tf.nn.max_pool(x,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME')
            # 14x14

        with tf.variable_scope('layer2'):
            x = utils.conv2d(x,
                             name='conv1',
                             shape=[3, 3, 64, 64],
                             padding='SAME',
                             activation_func=tf.nn.relu,
                             trainable=trainable,
                             use_bn=True)
            x = tf.nn.max_pool(x,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME')
            # 7x7

        with tf.variable_scope('layer3'):
            x = utils.conv2d(x,
                             name='conv1',
                             shape=[3, 3, 64, 64],
                             padding='SAME',
                             activation_func=tf.nn.relu,
                             trainable=trainable,
                             use_bn=True)
            # x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1],
            #                    padding='SAME')

        with tf.variable_scope('layer4'):
            x = utils.conv2d(x,
                             name='conv1',
                             shape=[3, 3, 64, 64],
                             padding='SAME',
                             activation_func=tf.nn.relu,
                             trainable=trainable,
                             use_bn=True)

    return x
예제 #6
0
 def _build_network(self, name):
     input_s = tf.placeholder(tf.float32, [None] + self.input_shape)
     # input_s = tf.placeholder(tf.float32, [None]+self.input_shape+[1])
     # with tf.variable_scope(name):
     #   fc1 = tf_utils.fc(input_s, self.n_h1, scope="fc1", activation_fn=tf.nn.elu,
     #                     initializer=tf.contrib.layers.variance_scaling_initializer(mode="FAN_IN"))
     #   fc2 = tf_utils.fc(fc1, self.n_h2, scope="fc2", activation_fn=tf.nn.elu,
     #                     initializer=tf.contrib.layers.variance_scaling_initializer(mode="FAN_IN"))
     #   reward = tf_utils.fc(fc2, 1, scope="reward")
     with tf.variable_scope(name):
         conv1 = tf_utils.conv2d(input_s, 8, [2, 2])
         conv2 = tf_utils.conv2d(conv1, 16, [4, 4])
         reward = tf_utils.conv2d(conv2, 1, [4, 4])
     theta = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
     return input_s, reward, theta
def reduce35(net,activation_fn=tf.nn.relu,scope = None,reuse = None,is_training=False):
	output = None
	with tf.variable_scope(scope,'Reduction-A',[net],reuse=reuse):
		with tf.variable_scope('branch_1'):
			layer1_1 = util.max_pool2d(net,[3,3],scope = '6/1_MaxPool',stride = [2,2] , padding='VALID')
   
		with tf.variable_scope('branch_2'):
			layer2_1 = util.conv2d(net,384,[3,3],scope = '6/2_conv_3x3',stride = [2,2],padding='VALID',use_xavier = True,bn=True,is_training=is_training)
		with tf.variable_scope('branch_3'):
			layer3_1 = util.conv2d(net,256,[1,1],scope = '6/3_conv_1x1',stride = [1,1],padding='SAME',use_xavier = True,activation_fn = None,bn=True,is_training=is_training)
			layer3_2 = util.conv2d(layer3_1,256,[3,3],scope = '6/3_conv_3x3_1',stride = [1,1],padding='SAME',use_xavier = True,bn=True,is_training=is_training)
			layer3_3 = util.conv2d(layer3_2,384,[3,3],scope = '6/3_conv_3x3_2',stride = [2,2],padding='VALID',use_xavier = True,bn=True,is_training=is_training)
		concat= util.filter_concat(3,[layer1_1,layer2_1,layer3_3])
		output = concat
	return output
예제 #8
0
def _setup_net(placeholder, layers, weights, mean_pixel):
    """
    Returns the cnn built with given weights and normalized with mean_pixel
    """
    net = {}
    placeholder -= mean_pixel
    for i, name in enumerate(layers):
        kind = name[:4]
        with tf.variable_scope(name):
            if kind == 'conv':
                kernels, bias = weights[i][0][0][0][0]
                # matconvnet: [width, height, in_channels, out_channels]
                # tensorflow: [height, width, in_channels, out_channels]
                kernels = tf_utils.get_variable(
                    np.transpose(kernels, (1, 0, 2, 3)),
                    name=name + "_w")
                bias = tf_utils.get_variable(
                    bias.reshape(-1),
                    name=name + "_b")
                placeholder = tf_utils.conv2d(placeholder, kernels, bias)
            elif kind == 'relu':
                placeholder = tf.nn.relu(placeholder, name=name)
                tf_utils.add_activation_summary(placeholder, collections=['train'])
            elif kind == 'pool':
                placeholder = tf_utils.max_pool_2x2(placeholder)
            net[name] = placeholder

    return net
예제 #9
0
def cls_model(seg_out, seg_out_former, is_training, bn_decay):
    out_1 = tf_utils.conv2d(seg_out_former, kernel_shape=[1,1], strides=1, channel=32,
                            activation_fn=tf.nn.relu, scope='conv11')

    out_2 = tf_utils.maxpool2d(seg_out, kernel_shape=[128,128], strides=1, padding='VALID')
    out_3 = tf_utils.avgpool2d(seg_out, kernel_shape=[128,128], strides=1, padding='VALID')
    out_4 = tf_utils.maxpool2d(out_1, kernel_shape=[128, 128], strides=1, padding='VALID')
    out_5 = tf_utils.avgpool2d(out_1, kernel_shape=[128, 128], strides=1, padding='VALID')

    out_6 = tf.concat([out_2, out_3, out_4, out_5], -1)

    out_7 = tf_utils.conv2d(out_6, kernel_shape=[1,1], strides=1, channel=1,
                            bn=True, bn_decay=bn_decay, is_training=is_training,
                            activation_fn=tf.nn.sigmoid, scope='conv12')

    return out_7
예제 #10
0
def FC(x,
       units,
       activations,
       bn,
       bn_decay,
       is_training,
       use_bias=True,
       drop=None):
    if isinstance(units, int):
        units = [units]
        activations = [activations]
    elif isinstance(units, tuple):
        units = list(units)
        activations = list(activations)
    assert type(units) == list
    for num_unit, activation in zip(units, activations):
        if drop is not None:
            x = tf_utils.dropout(x, drop=drop, is_training=is_training)
        x = tf_utils.conv2d(x,
                            output_dims=num_unit,
                            kernel_size=[1, 1],
                            stride=[1, 1],
                            padding='VALID',
                            use_bias=use_bias,
                            activation=activation,
                            bn=bn,
                            bn_decay=bn_decay,
                            is_training=is_training)
    return x
예제 #11
0
def block8(net,is_training=False,activation_fn=tf.nn.relu,scope = None,reuse = None,scale = 0.2):
    output = None
    with tf.variable_scope(scope,'Inception-C',[net],reuse = reuse):
        with tf.variable_scope('branch_1'):
            layer1_1 = util.conv2d(net,192,[1,1],scope = '9/1_conv_1x1',stride = [1,1],padding='SAME',use_xavier = True,activation_fn = None,bn=True)
        with tf.variable_scope('branch_2'):
            layer2_1 = util.conv2d(net,192,[1,1],scope = '9/2_conv_1x1',stride = [1,1],padding='SAME',use_xavier = True,activation_fn = None,bn=True)
            layer2_2 = util.conv2d(layer2_1,160,[1,7],scope = '9/2_conv_1x7',stride = [1,1],padding='SAME',use_xavier = True,activation_fn=activation_fn,bn = True)
            layer2_3 = util.conv2d(layer2_2,192,[7,1],scope = '9/2_conv_7x1',stride = [1,1],padding='SAME',use_xavier = True,activation_fn=activation_fn,bn = True)
        concat = util.filter_concat(3,[layer1_1,layer2_3])
        concat = util.conv2d(concat,net.get_shape()[3].value,[1,1],scope = '9_concat',stride = [1,1],padding = 'SAME',use_xavier = True , activation_fn = None,bn=None,stddev = None)
        concat = concat * scale
        shortcut = net + concat
        shortcut = activation_fn(shortcut)
		
        output = shortcut
    return output
    def _build_network(self, name):
        input_s = tf.placeholder(tf.float32, [None, self.n_input])
        input_inv = tf.placeholder(tf.float32, [None, self.n_input])
        img_in = tf.reshape(input_s, shape=[-1, 1, 4, 1])
        img_inv = tf.reshape(input_inv, shape=[-1, 1, 4, 1])
        with tf.variable_scope(name):
            cnv1 = tf_utils.conv2d(img_in, 2, (2, 2))
            # max_conv_p = tf_utils.max_pool(cnv1_p)
            fltn_conv = tf_utils.flatten(cnv1)
            fc1 = tf_utils.fc(
                fltn_conv,
                self.n_h2,
                scope="fc1",
                activation_fn=tf.nn.elu,
                initializer=tf.contrib.layers.variance_scaling_initializer(
                    mode="FAN_IN"))

            cnv1_inv = tf_utils.conv2d(img_inv, 2, (2, 2))
            # max_conv_p = tf_utils.max_pool(cnv1_p)
            fltn_conv_inv = tf_utils.flatten(cnv1_inv)
            fc1_inv = tf_utils.fc(
                fltn_conv_inv,
                self.n_h2,
                scope="fc1_inv",
                activation_fn=tf.nn.elu,
                initializer=tf.contrib.layers.variance_scaling_initializer(
                    mode="FAN_IN"))

            subt = tf.subtract(fc1, fc1_inv)
            # blah = tf.multiply(tf.divide(fc2, fc1_p), 0.35)
            # comb = tf.concat([fc1, fc1_inv], 1)
            fc_p1 = tf_utils.fc(
                subt,
                2 * self.n_h1,
                scope="fc_p1",
                activation_fn=tf.nn.elu,
                initializer=tf.contrib.layers.variance_scaling_initializer(
                    mode="FAN_IN"))
            # fc_p2 = tf_utils.fc(fc_p1, self.n_h2, scope="fc_p2", activation_fn=tf.nn.elu,
            #   initializer=tf.contrib.layers.variance_scaling_initializer(mode="FAN_IN"))
            reward = tf_utils.fc(fc_p1, 1, scope="reward")
        theta = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
        return input_s, input_inv, reward, theta
예제 #13
0
def RelationNetwork(encoder, hidden_size, trainable=True):
    x = encoder
    with tf.variable_scope('RelationNetwork') as scope:
        with tf.variable_scope('layer1'):
            x = utils.conv2d(x,
                             name='conv1',
                             shape=[3, 3, 128, 64],
                             padding='SAME',
                             activation_func=tf.nn.relu,
                             trainable=trainable,
                             use_bn=True)
            x = tf.nn.max_pool(x,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME')

        with tf.variable_scope('layer2'):
            x = utils.conv2d(x,
                             name='conv1',
                             shape=[3, 3, 64, 64],
                             padding='SAME',
                             activation_func=tf.nn.relu,
                             trainable=trainable,
                             use_bn=True)
            x = tf.nn.max_pool(x,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='SAME')

        with tf.variable_scope('fc1'):
            x = utils.fc(x,
                         num_out=hidden_size,
                         name='fc1',
                         activation_func=tf.nn.relu)

        with tf.variable_scope('fc2'):
            x = utils.fc(x, num_out=1, name='fc2', activation_func=None)

    return x
예제 #14
0
    def define_convgru_graph_2(self):
        with tf.name_scope("Graph"):
            with tf.variable_scope("encoder") and tf.device('/device:GPU:0'):
                self.in_data = tf.placeholder(self.dtype,
                                              name="input",
                                              shape=[None, self.time_seq, c.H, c.W, 1])
                self.gt_data = tf.placeholder(self.dtype,
                                              name="gt",
                                              shape=[None, self.time_seq, c.H, c.W, 1])
                conv_in = tf.reshape(self.in_data, shape=[-1, c.H, c.W, 1])

                first_conv = conv2d(conv_in, name="conv1",
                                    kshape=(7, 7, 1, c.NUM_FILTER), dtype=self.dtype)

            with tf.variable_scope("Conv_Gru", reuse=tf.AUTO_REUSE):
                gru = ConvGru(dtype=self.dtype)
                gru_input = tf.reshape(first_conv, shape=[self.batch, self.time_seq, c.H, c.W, c.NUM_FILTER])
                states = gru.init_state(shape=[self.batch, c.H, c.W, c.NUM_FILTER])
                outputs = gru(gru_input, states)

            with tf.variable_scope("decoder") and tf.device('/device:GPU:0'):
                dec_in = tf.reshape(outputs, shape=[-1, c.H, c.W, c.NUM_FILTER])
                # dec = deconv2d(dec_in, name="dec2_", kshape=(7, 7), n_outputs=1)
                dec = conv2d(dec_in, name="conv2",
                                    kshape=(7, 7, c.NUM_FILTER, 1), dtype=self.dtype)
                # dec = tf.cast(dec, dtype=tf.float16)
                out = tf.reshape(dec, shape=[-1, self.time_seq, c.H, c.W, 1])
                print(out)
            with tf.variable_scope("loss"):
                self.result = out
                if c.USE_BALANCED_LOSS:
                    self.loss = weighted_l2(out, self.gt_data)
                else:
                    self.loss = tf.reduce_mean(tf.square(tf.subtract(out, self.gt_data)))
                self.mse = tf.reduce_mean(tf.square(tf.subtract(out, self.gt_data)))
                self.rmse = tf.sqrt(self.mse)
                self.mae = tf.reduce_mean(tf.abs(tf.subtract(out, self.gt_data)))

                self.optimizer = tf.train.AdamOptimizer(self._lr).minimize(self.loss)
예제 #15
0
 def _build_network(self, name):
   input_s = tf.placeholder(tf.float32, [None, self.n_input])
   img_in = tf.reshape(input_s, shape=[-1, 1, 4, 1])
   with tf.variable_scope(name):
     cnv1 = tf_utils.conv2d(img_in, 2, (2,2))
     fltn_conv = tf_utils.flatten(cnv1)
     # fc1 = tf_utils.fc(input_s, self.n_h1, scope="fc1", activation_fn=tf.nn.elu,
     #   initializer=tf.contrib.layers.variance_scaling_initializer(mode="FAN_IN"))
     fc2 = tf_utils.fc(fltn_conv, self.n_h2, scope="fc2", activation_fn=tf.nn.elu,
       initializer=tf.contrib.layers.variance_scaling_initializer(mode="FAN_IN"))
     reward = tf_utils.fc(fc2, 1, scope="reward")
   theta = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name)
   return input_s, reward, theta
예제 #16
0
def seg_model(input, is_training, bn_decay):
    extend_image = tf.expand_dims(input, axis=-1)

    out_1 = tf_utils.conv2d(extend_image, kernel_shape=[11,11], strides=2, channel=32,
                            bn=True, bn_decay=bn_decay, is_training=is_training,
                            activation_fn=tf.nn.relu, scope='conv1')
    out_2 = tf_utils.conv2d(out_1, kernel_shape=[11,11], strides=1, channel=32,
                            bn=True, bn_decay=bn_decay, is_training=is_training,
                            activation_fn=tf.nn.relu, scope='conv2')
    out_3 = tf_utils.conv2d(out_2, kernel_shape=[11,11], strides=1, channel=32,
                            bn=True, bn_decay=bn_decay, is_training=is_training,
                            activation_fn=tf.nn.relu, scope='conv3')

    out_4 = tf_utils.conv2d(out_3, kernel_shape=[7, 7], strides=2, channel=64,
                            bn=True, bn_decay=bn_decay, is_training=is_training,
                            activation_fn=tf.nn.relu, scope='conv4')
    out_5 = tf_utils.conv2d(out_4, kernel_shape=[7, 7], strides=1, channel=64,
                            bn=True, bn_decay=bn_decay, is_training=is_training,
                            activation_fn=tf.nn.relu, scope='conv5')
    out_6 = tf_utils.conv2d(out_5, kernel_shape=[7, 7], strides=1, channel=64,
                            bn=True, bn_decay=bn_decay, is_training=is_training,
                            activation_fn=tf.nn.relu, scope='conv6')

    out_7 = tf_utils.conv2d(out_6, kernel_shape=[3, 3], strides=1, channel=128,
                            bn=True, bn_decay=bn_decay, is_training=is_training,
                            activation_fn=tf.nn.relu, scope='conv7')
    out_8 = tf_utils.conv2d(out_7, kernel_shape=[3, 3], strides=1, channel=128,
                            bn=True, bn_decay=bn_decay, is_training=is_training,
                            activation_fn=tf.nn.relu, scope='conv8')
    out_9 = tf_utils.conv2d(out_8, kernel_shape=[3, 3], strides=1, channel=128,
                            bn=True, bn_decay=bn_decay, is_training=is_training,
                            activation_fn=tf.nn.relu, scope='conv9')

    seg_layer = tf_utils.conv2d(out_9, kernel_shape=[1, 1], strides=1, channel=1,
                                bn=True, bn_decay=bn_decay, is_training=is_training,
                                activation_fn=tf.nn.relu, scope='conv10')

    return seg_layer, out_9
예제 #17
0
    def stack_rnn_forecaster(self, block_state_list):
        with tf.variable_scope("Forecaster"):
            rnn_block_num = len(c.NUM_FILTER)
            rnn_block_output = []
            curr_inputs = None
            for i in range(rnn_block_num - 1, -1, -1):
                with tf.name_scope("Forecaster_rnn_block_" + str(i)):
                    with tf.name_scope("rnn_blocks_" + str(i)):
                        rnn_out, rnn_state = self.rnn_blocks[i].unroll(
                            length=self._seq,
                            inputs=curr_inputs,
                            begin_state=block_state_list[i],
                        )
                        rnn_block_output.append(rnn_out)

                        if i > 0:
                            print(rnn_out)
                            with tf.name_scope("up_sample_" + str(i)):
                                upsample = up_sampling(
                                    rnn_out,
                                    kshape=c.UPSAMPLE[i - 1][0],
                                    stride=c.UPSAMPLE[i - 1][1],
                                    num_filter=c.NUM_FILTER[i],
                                    name="Up_sample_" + str(i))
                            curr_inputs = upsample

            deconv1 = deconv2d_act(rnn_block_output[-1],
                                   kernel=c.LAST_DECONV[1],
                                   stride=c.LAST_DECONV[2],
                                   num_filters=c.LAST_DECONV[0],
                                   use_bias=False,
                                   dtype=self._dtype,
                                   name="last_conv")
            with tf.name_scope('conv_final'):
                conv_final = conv2d_act(deconv1,
                                        kernel=3,
                                        strides=1,
                                        num_filters=8,
                                        name="conv_final")
                pred = conv2d(conv_final, kshape=(1, 1, 8, 1), name="out")
                if self.mode == 'online':
                    pred = tf.reshape(pred,
                                      shape=(self._batch, self._seq, c.PRED_H,
                                             c.PRED_W, self._in_c))
                else:
                    pred = tf.reshape(pred,
                                      shape=(self._batch, self._seq, c.H_TRAIN,
                                             c.W_TRAIN, self._in_c))
                self.pred = pred
예제 #18
0
def build_model(net):
  # Construct model..
  X = tf.placeholder("float", [BATCH, T_in, IMG_H, IMG_W, IMG_CH])
  Y = tf.placeholder("float", [BATCH, T_pred, IMG_H, IMG_W, IMG_CH])

  # Flatten the images going in s.t. BATCH * T_in, height, width, ch..
  X_flat = tf.reshape(X, [BATCH * T_in, IMG_H, IMG_W, IMG_CH])
  conv1 = conv2d(X_flat, net.weights['wc1'], net.biases['bc1'], 2)
  conv2 = conv2d(conv1, net.weights['wc2'], net.biases['bc2'], 2)
  conv3 = conv2d(conv2, net.weights['wc3'], net.biases['bc3'], 2)

  # Now we hyperflatten everything for the lstm: BATCH, T-in, everything.
  res = tf.reshape(conv3, [BATCH, T_in, -1])
  prediction = net.EncoderDecoder(res)

  # Infer on BATCH * T_pred, everything.
  fc_out = fc(prediction, net.weights['wfc1'], net.biases['bfc1'])
  # Reshape to on BATCH, T_pred, IMG_H * IMG_W * IMG_CH.
  fc_out = tf.reshape(fc_out, [BATCH, T_pred, IMG_H * IMG_W * IMG_CH])
  sig_out = tf.sigmoid(fc_out)

  # Calculate difference..
  Y_flat = tf.reshape(Y, [BATCH, T_pred, IMG_H * IMG_W * IMG_CH])
  diff = fc_out - Y_flat

  # Compute loss...
  vs = tf.trainable_variables() 
  lossL2 = tf.add_n([tf.nn.l2_loss(v) for v in vs
                     if 'bias' not in v.name ]) * 0.001

  loss_op = tf.reduce_sum(tf.reduce_sum(diff * diff, axis=2), axis=1) + lossL2
  loss_op = tf.reduce_mean(loss_op)

  train_op = tf.train.AdamOptimizer(learning_rate=LR).minimize(loss_op)

  return fc_out, sig_out, X, Y, loss_op, train_op
    def _conv_layer(input_,
                    num_filters=32,
                    filter_size=3,
                    strides=1,
                    relu=True,
                    name=None):
        with tf.compat.v1.variable_scope(name):
            input_ = tf_utils.padding2d(input_,
                                        p_h=int(filter_size / 2),
                                        p_w=int(filter_size / 2),
                                        pad_type='REFLECT')
            input_ = tf_utils.conv2d(input_,
                                     output_dim=num_filters,
                                     k_h=filter_size,
                                     k_w=filter_size,
                                     d_h=strides,
                                     d_w=strides,
                                     padding='VALID')
            input_ = tf_utils.instance_norm(input_)

            if relu:
                input_ = tf.nn.relu(input_)

        return input_
예제 #20
0
def block35(net,is_training=False,activation_fn=tf.nn.relu,scope = None,reuse = None,scale=.2):
    output = None
    with tf.variable_scope(scope,'Inception-A',[net],reuse = reuse):
        with tf.variable_scope('branch_1'):
            layer1_1 = util.conv2d(net,32,[1,1],scope = '5/1_conv_1x1',stride = [1,1],padding='SAME',use_xavier = True,activation_fn = None,bn=True)
        with tf.variable_scope('branch_2'):
            layer2_1 = util.conv2d(net,32,[1,1],scope = '5/2_conv_1x1',stride = [1,1],padding='SAME',use_xavier = True,activation_fn = None,bn=True)
            layer2_2 = util.conv2d(layer2_1,32,[3,3],scope = '5/2_conv_3x3',stride = [1,1],padding='SAME',use_xavier = True,activation_fn=activation_fn,bn=True)
        with tf.variable_scope('branch_3'):
            layer3_1 = util.conv2d(net,32,[1,1],scope = '5/3_conv_1x1',stride = [1,1],padding='SAME',use_xavier = True,activation_fn = None,bn=True)
            layer3_2 = util.conv2d(layer3_1,48,[3,3],scope = '5/3_conv_3x3_1',stride = [1,1],padding='SAME',use_xavier = True,bn=True,activation_fn=activation_fn)
            layer3_3 = util.conv2d(layer3_2,64,[3,3],scope = '5/3_conv_3x3_2',stride = [1,1],padding='SAME',use_xavier = True,bn=True,activation_fn=activation_fn)
            
        concat1 = util.filter_concat(3,[layer1_1,layer2_2,layer3_3])
        
        concat2 = util.conv2d(concat1,net.get_shape()[3].value,[1,1],scope = '5_concat',stride = [1,1],padding = 'SAME',use_xavier = True , activation_fn = None,bn=None,stddev = None)
        concat2 = concat2 * scale	
        shortcut = net + concat2
        
        shortcut = activation_fn(shortcut)
		
        output = shortcut
    return output
예제 #21
0
    def builder_network(self, image):
        x = image
        with tf.variable_scope('conv1'):
            x = tf_utils.conv2d(x, [3, 3, 3, 64],
                                name='conv1_1',
                                padding='SAME',
                                use_bn=self.use_bn,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf_utils.conv2d(x, [3, 3, 64, 64],
                                name='conv1_2',
                                padding='SAME',
                                use_bn=self.use_bn,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf.nn.max_pool(x,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID')

        with tf.variable_scope('conv2'):
            x = tf_utils.conv2d(x, [3, 3, 64, 128],
                                name='conv2_1',
                                padding='SAME',
                                use_bn=self.use_bn,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf_utils.conv2d(x, [3, 3, 128, 128],
                                name='conv2_2',
                                padding='SAME',
                                use_bn=self.use_bn,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf.nn.max_pool(x,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID')

        with tf.variable_scope('conv3'):
            x = tf_utils.conv2d(x, [3, 3, 128, 256],
                                name='conv3_1',
                                padding='SAME',
                                use_bn=self.use_bn,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf_utils.conv2d(x, [3, 3, 256, 256],
                                name='conv3_2',
                                padding='SAME',
                                use_bn=self.use_bn,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf_utils.conv2d(x, [3, 3, 256, 256],
                                name='conv3_3',
                                padding='SAME',
                                use_bn=self.use_bn,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf.nn.max_pool(x,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID')

        with tf.variable_scope('conv4'):
            x = tf_utils.conv2d(x, [3, 3, 256, 512],
                                name='conv4_1',
                                padding='SAME',
                                use_bn=self.use_bn,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf_utils.conv2d(x, [3, 3, 512, 512],
                                name='conv4_2',
                                padding='SAME',
                                use_bn=self.use_bn,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf_utils.conv2d(x, [3, 3, 512, 512],
                                name='conv4_3',
                                padding='SAME',
                                use_bn=self.use_bn,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf.nn.max_pool(x,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID')

        with tf.variable_scope('conv5'):
            x = tf_utils.conv2d(x, [3, 3, 512, 512],
                                name='conv5_1',
                                padding='SAME',
                                use_bn=self.use_bn,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf_utils.conv2d(x, [3, 3, 512, 512],
                                name='conv5_2',
                                padding='SAME',
                                use_bn=False,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf_utils.conv2d(x, [3, 3, 512, 512],
                                name='conv5_3',
                                padding='SAME',
                                use_bn=self.use_bn,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = tf.nn.max_pool(x,
                               ksize=[1, 2, 2, 1],
                               strides=[1, 2, 2, 1],
                               padding='VALID')

        with tf.name_scope('fc6'):
            x = tf_utils.conv2d(x, [7, 7, 512, 4096],
                                name='fc6',
                                padding='VALID',
                                use_bn=False,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = self.activation(x)

        with tf.name_scope('fc7'):
            x = tf_utils.conv2d(x, [1, 1, 4096, 4096],
                                name='fc7',
                                padding='SAME',
                                use_bn=False,
                                trainable=self.trainable,
                                activation_func=self.activation)
            x = self.activation(x)

        with tf.name_scope('fc8'):
            x = tf_utils.conv2d(x, [1, 1, 4096, self.num_classes],
                                name='fc8',
                                padding='SAME',
                                use_bn=False,
                                trainable=self.trainable,
                                activation_func=self.activation)

        x = tf.squeeze(x, axis=[1, 2], name='squeezed')
        return x
예제 #22
0
def get_model(imgs, is_training, weight_decay=0.0, bn_decay=None):
  batch_size = imgs.get_shape()[0].value
  im_dim = imgs.get_shape()[1].value

  ########
  with tf.variable_scope('Encoding'):
    # (batch_size, 64, 64, 64)
    net = tf_utils.conv2d(imgs, 64, [7,7],
                         padding='SAME', stride=[2,2],
                         bn=True, is_training=is_training,
                         scope='conv1', bn_decay=bn_decay, 
                         weight_decay=weight_decay, activation_fn=tf.nn.elu)
    # (batch_size, 32, 32, 64)
    net = tf_utils.conv2d(net, 64, [5,5],
                         padding='SAME', stride=[2,2],
                         bn=True, is_training=is_training,
                         scope='conv2', bn_decay=bn_decay, 
                         weight_decay=weight_decay, activation_fn=tf.nn.elu)
    # (batch_size, 16, 16, 128)
    net = tf_utils.conv2d(net, 128, [5,5],
                         padding='SAME', stride=[2,2],
                         bn=True, is_training=is_training,
                         scope='conv3', bn_decay=bn_decay, 
                         weight_decay=weight_decay, activation_fn=tf.nn.elu)
    # (batch_size, 8, 8, 128)
    net = tf_utils.conv2d(net, 128, [3,3],
                         padding='SAME', stride=[2,2],
                         bn=True, is_training=is_training,
                         scope='conv4', bn_decay=bn_decay, 
                         weight_decay=weight_decay, activation_fn=tf.nn.elu)
    # (batch_size, 4, 4, 256)
    net = tf_utils.conv2d(net, 256, [3,3],
                         padding='SAME', stride=[2,2],
                         bn=True, is_training=is_training,
                         scope='conv5', bn_decay=bn_decay, 
                         weight_decay=weight_decay, activation_fn=tf.nn.elu)
    # (batch_size, 1, 1, 512)
    net = tf_utils.conv2d(net, 512, [4,4],
                         padding='VALID', stride=[1,1],
                         bn=True, is_training=is_training,
                         scope='conv6', bn_decay=bn_decay, 
                         weight_decay=weight_decay, activation_fn=tf.nn.elu)

  ########
  with tf.variable_scope('Latent_variable'):
    net = tf.reshape(net, [batch_size, 512])
    net = tf_utils.fully_connected(net, 512, scope="fc1", 
                    weight_decay=weight_decay, activation_fn=tf.nn.elu, 
                    bn=True, bn_decay=bn_decay, is_training=is_training)
    net = tf_utils.fully_connected(net, 128*4*4*4, scope="fc2", 
                    weight_decay=weight_decay, activation_fn=tf.nn.elu, 
                    bn=True, bn_decay=bn_decay, is_training=is_training)
    net = tf.reshape(net, [batch_size, 4, 4, 4, 128])

  ########
  with tf.variable_scope('Decoding'):
    # (batch_size, 8, 8, 8, 64)
    net = tf_utils.conv3d_transpose(net, 64, [3, 3, 3], scope="deconv1",
                     stride=[2, 2, 2], padding='SAME',
                     weight_decay=weight_decay, activation_fn=tf.nn.elu,
                     bn=True, bn_decay=bn_decay, is_training=is_training)
    
    # (batch_size, 16, 16, 16, 32)
    net = tf_utils.conv3d_transpose(net, 32, [3, 3, 3], scope="deconv2",
                     stride=[2, 2, 2], padding='SAME',
                     weight_decay=weight_decay, activation_fn=tf.nn.elu,
                     bn=True, bn_decay=bn_decay, is_training=is_training)
    
    # (batch_size, 32, 32, 32, 32)
    net = tf_utils.conv3d_transpose(net, 32, [3, 3, 3], scope="deconv3",
                     stride=[2, 2, 2], padding='SAME',
                     weight_decay=weight_decay, activation_fn=tf.nn.elu,
                     bn=True, bn_decay=bn_decay, is_training=is_training)

    ##################
    ## regressed color
    ##################
    # (batch_size, 64, 64, 64, 24)
    net_reg_clr = tf_utils.conv3d_transpose(net, 16, [3, 3, 3], scope="deconv_reg_clr1",
                     stride=[2, 2, 2], padding='SAME',
                     weight_decay=weight_decay, activation_fn=tf.nn.elu,
                     bn=True, bn_decay=bn_decay, is_training=is_training)
    # (batch_size, 64, 64, 64, 3)
    net_reg_clr = tf_utils.conv3d(net_reg_clr, 3, [3, 3, 3], scope="deconv_reg_clr2",
                     stride=[1, 1, 1], padding='SAME',
                     weight_decay=weight_decay, activation_fn=tf.sigmoid,
                     bn=True, bn_decay=bn_decay, is_training=is_training)

    ##############
    ### confidence
    ############## 
    net_conf = tf_utils.conv3d_transpose(net, 16, [3, 3, 3], scope="deconv_conf1",
                     stride=[2, 2, 2], padding='SAME',
                     weight_decay=weight_decay, activation_fn=tf.nn.elu,
                     bn=True, bn_decay=bn_decay, is_training=is_training)
     # (batch_size, 64, 64, 64, 1)
    net_conf = tf_utils.conv3d(net_conf, 1, [3, 3, 3], scope="conv_conf2",
                     stride=[1, 1, 1], padding='SAME',
                     weight_decay=weight_decay, activation_fn=tf.sigmoid,
                     bn=True, bn_decay=bn_decay, is_training=is_training)

    ##############
    ### flow color
    ##############
    net_flow = tf_utils.conv3d_transpose(net, 2, [3, 3, 3], scope="deconv_flow",
                     stride=[2, 2, 2], padding='SAME',
                     weight_decay=weight_decay, activation_fn=tf.sigmoid,
                     bn=True, bn_decay=bn_decay, is_training=is_training)
    # (batch_size, 64, 64, 64, 3)
    net_flow_clr = tf_utils.Sampler(net_flow, imgs)

    #################
    ### blended color
    #################
    net_blended_clr = net_reg_clr * net_conf + net_flow_clr * (1.0 - net_conf)


  return net_reg_clr, net_conf, net_flow, net_blended_clr
예제 #23
0
    "NHWC": [None, 28, 28, 1],
    "NCHW": [None, 1, 28, 28],
}[DATA_FORMAT]

sess = tf.InteractiveSession()

x = tf.placeholder(tf.float32, shape=init_shape)
y_ = tf.placeholder(tf.int64, shape=[None])

h = x

with tf.variable_scope("mlp",
                       initializer=tf.random_uniform_initializer(-0.05, 0.05)):
    h = tfu.conv2d("conv1",
                   h,
                   num_filters=16,
                   filter_size=(5, 5),
                   # strides=(2, 2),
                   data_format=DATA_FORMAT)
    h = tf.nn.relu(h)
    h = tfu.max_pool(h, (2, 2), data_format=DATA_FORMAT)
    h = tfu.conv2d("conv2",
                   h,
                   num_filters=32,
                   filter_size=(5, 5),
                   # strides=(2, 2),
                   data_format=DATA_FORMAT)
    h = tf.nn.relu(h)
    h = tfu.max_pool(h, (2, 2), data_format=DATA_FORMAT)
    h = tfu.flatten(h, 2)
    h = tfu.affine("fc1", h, 256)
    h = tf.nn.relu(h)
예제 #24
0
    def builder_network(self, image):
        x = image
        # x = tf_utils.conv2d(x, name='Conv2d_1a_7x7', shape=[7, 7, 3, 8], strides=[1, 2, 2, 1],
        #                     padding='SAME',
        #                     use_bn=self.use_bn,
        #                     trainable=self.trainable,
        #                     activation_func=self.activation_func)
        x = tf_utils.separable_conv2d(x,
                                      depthwise_filter=[7, 7, 3, 8],
                                      pointwise_filter=[1, 1, 24, 64],
                                      name='Conv2d_1a_7x7',
                                      padding='SAME',
                                      use_bn=False,
                                      trainable=self.trainable,
                                      activation_func=self.activation_func)
        x = tf.nn.max_pool(x,
                           ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME',
                           name='MaxPool_2a_3x3')

        x = tf_utils.conv2d(x,
                            name='Conv2d_2b_1x1',
                            shape=[1, 1, 64, 64],
                            strides=[1, 1, 1, 1],
                            padding='SAME',
                            use_bn=self.use_bn,
                            trainable=self.trainable,
                            activation_func=self.activation_func)

        x = tf_utils.conv2d(x,
                            name='Conv2d_2c_3x3',
                            shape=[3, 3, 64, 192],
                            strides=[1, 1, 1, 1],
                            padding='SAME',
                            use_bn=self.use_bn,
                            trainable=self.trainable,
                            activation_func=self.activation_func)

        x = tf.nn.max_pool(x,
                           ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME',
                           name='MaxPool_3a_3x3')

        x = self.inception(x,
                           shapes=[64, 64, 64, 64, 96, 96, 32],
                           name='Mixed_3b')
        x = self.inception(x,
                           shapes=[64, 64, 96, 64, 96, 96, 64],
                           name='Mixed_3c')
        x = tf.nn.max_pool(x,
                           ksize=[1, 3, 3, 1],
                           strides=[1, 2, 2, 1],
                           padding='SAME',
                           name='MaxPool_4a_3x3')
        dims = x.get_shape().as_list()[-1]
        with tf.variable_scope('Mixed_4a'):
            with tf.variable_scope('Branch_0'):
                branch_0 = tf_utils.conv2d(
                    x,
                    name='Conv2d_0a_1x1',
                    shape=[1, 1, dims, 128],
                    strides=[1, 1, 1, 1],
                    use_bn=self.use_bn,
                    trainable=self.trainable,
                    activation_func=self.activation_func)
                branch_0 = tf_utils.conv2d(
                    branch_0,
                    name='Conv2d_1a_3x3',
                    shape=[3, 3, 128, 160],
                    strides=[1, 2, 2, 1],
                    use_bn=self.use_bn,
                    trainable=self.trainable,
                    activation_func=self.activation_func)
                print('branch_3:', branch_0)
            with tf.variable_scope('Branch_1'):
                branch_1 = tf_utils.conv2d(
                    x,
                    name='Conv2d_0a_1x1',
                    shape=[1, 1, dims, 64],
                    strides=[1, 1, 1, 1],
                    use_bn=self.use_bn,
                    trainable=self.trainable,
                    activation_func=self.activation_func)
                print('branch_3:', branch_1)
                branch_1 = tf_utils.conv2d(
                    branch_1,
                    name='Conv2d_0b_3x3',
                    shape=[3, 3, 64, 96],
                    strides=[1, 1, 1, 1],
                    use_bn=self.use_bn,
                    trainable=self.trainable,
                    activation_func=self.activation_func)
                branch_1 = tf_utils.conv2d(
                    branch_1,
                    name='Conv2d_1a_3x3',
                    shape=[3, 3, 96, 96],
                    strides=[1, 2, 2, 1],
                    use_bn=self.use_bn,
                    trainable=self.trainable,
                    activation_func=self.activation_func)
            with tf.variable_scope('Branch_2'):
                branch_2 = tf.nn.max_pool(x,
                                          ksize=[1, 3, 3, 1],
                                          strides=[1, 2, 2, 1],
                                          padding='SAME',
                                          name='MaxPool_1a_3x3')
                # branch_2 = slim.max_pool2d(
                #     net, [3, 3], stride=2, scope='MaxPool_1a_3x3')
                print('branch_2:', branch_2)

            x = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])

        x = self.inception(x,
                           shapes=[224, 64, 96, 96, 128, 128, 128],
                           name='Mixed_4b')
        x = self.inception(x,
                           shapes=[192, 96, 128, 96, 128, 128, 128],
                           name='Mixed_4c')
        x = self.inception(x,
                           shapes=[160, 128, 160, 128, 160, 160, 96],
                           name='Mixed_4d')
        x = self.inception(x,
                           shapes=[96, 128, 192, 160, 192, 192, 96],
                           name='Mixed_4e')
        # x = self.inception(x, shapes=[256, 160, 320, 32, 128, 128], name='Mixed_4f')
        # x = tf.nn.max_pool(x, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME', name='MaxPool_5a_2x2')
        dims = x.get_shape().as_list()[-1]
        with tf.variable_scope('Mixed_5a'):
            with tf.variable_scope('Branch_0'):
                branch_0 = tf_utils.conv2d(
                    x,
                    name='Conv2d_0a_1x1',
                    shape=[1, 1, dims, 128],
                    strides=[1, 1, 1, 1],
                    use_bn=self.use_bn,
                    trainable=self.trainable,
                    activation_func=self.activation_func)
                branch_0 = tf_utils.conv2d(
                    branch_0,
                    name='Conv2d_1a_3x3',
                    shape=[3, 3, 128, 192],
                    strides=[1, 2, 2, 1],
                    use_bn=self.use_bn,
                    trainable=self.trainable,
                    activation_func=self.activation_func)
                print('branch_3:', branch_0)
            with tf.variable_scope('Branch_1'):
                branch_1 = tf_utils.conv2d(
                    x,
                    name='Conv2d_0a_1x1',
                    shape=[1, 1, dims, 192],
                    strides=[1, 1, 1, 1],
                    use_bn=self.use_bn,
                    trainable=self.trainable,
                    activation_func=self.activation_func)
                print('branch_3:', branch_1)
                branch_1 = tf_utils.conv2d(
                    branch_1,
                    name='Conv2d_0b_3x3',
                    shape=[3, 3, 192, 256],
                    strides=[1, 1, 1, 1],
                    use_bn=self.use_bn,
                    trainable=self.trainable,
                    activation_func=self.activation_func)
                branch_1 = tf_utils.conv2d(
                    branch_1,
                    name='Conv2d_1a_3x3',
                    shape=[3, 3, 256, 256],
                    strides=[1, 2, 2, 1],
                    use_bn=self.use_bn,
                    trainable=self.trainable,
                    activation_func=self.activation_func)
            with tf.variable_scope('Branch_2'):
                branch_2 = tf.nn.max_pool(x,
                                          ksize=[1, 3, 3, 1],
                                          strides=[1, 2, 2, 1],
                                          padding='SAME',
                                          name='MaxPool_1a_3x3')
                # branch_2 = slim.max_pool2d(
                #     net, [3, 3], stride=2, scope='MaxPool_1a_3x3')

            x = tf.concat(axis=3, values=[branch_0, branch_1, branch_2])

        x = self.inception(x,
                           shapes=[352, 192, 320, 160, 224, 224, 128],
                           name='Mixed_5b')
        x = self.inception(x,
                           shapes=[352, 192, 320, 192, 224, 224, 128],
                           name='Mixed_5c')
        print('x:', x)
        with tf.variable_scope('Logits'):
            x = tf.nn.avg_pool(x,
                               ksize=[1, 7, 7, 1],
                               strides=[1, 1, 1, 1],
                               padding='VALID')
            x = tf_utils.conv2d(x,
                                shape=[1, 1, 1024, self.num_classes],
                                name='Conv2d_1c_1x1',
                                use_bn=False,
                                padding='SAME')

        # pass
        print('x:', x)
        x = tf.squeeze(x, [1, 2], name='SpatialSqueeze')
        print('x:', x)
        return x
예제 #25
0
def stem(net,is_training=False,activation_fn=tf.nn.relu,scope = None,reuse = None,bn_decay=0.9):
    output = None
    with tf.variable_scope(scope,'Stem',[net],reuse = reuse):
        stem = util.conv2d(net,32,[3,3],scope='1_conv2d_1',stride = [2,2],padding = 'VALID',use_xavier = True,is_training = is_training,bn=True)
        stem = util.conv2d(stem,32,[3,3],scope='1_conv2d_2',stride = [1,1],padding = 'VALID',use_xavier = True,is_training = is_training,bn=True)
        stem = util.conv2d(stem,64,[3,3],scope='1_conv2d_3',stride = [1,1],padding = 'SAME',use_xavier = True,is_training = is_training,bn=True)

        with tf.variable_scope(scope,'Level-2',[stem],reuse=reuse):
            layer2_1 = util.conv2d(stem,96,[3,3],scope = '2_Conv2d',stride = [2,2] , padding = 'VALID',use_xavier = True,is_training = is_training,bn=True)
            layer2_2 = util.max_pool2d(stem,[3,3],scope = '2_MaxPool',stride = [2,2] , padding='VALID')
            stem = util.filter_concat(3,[layer2_1,layer2_2])

        with tf.variable_scope(scope,'Level-3',[stem],reuse=reuse):
            with tf.variable_scope('branch_1'):   
                layer3_1_1 = util.conv2d(stem,64,[1,1],scope = '3/1_conv_1x1',stride = [1,1],padding='SAME',use_xavier = True,activation_fn = None,bn=True,is_training=is_training)
                layer3_1_2 = util.conv2d(layer3_1_1,96,[3,3],scope = '3_conv_3x3',stride = [1,1],padding='VALID',use_xavier = True,bn=True,is_training=is_training)
            with tf.variable_scope('branch_2'):
                layer3_2_1 = util.conv2d(stem,64,[1,1],scope = '3/2-conv_1x1',stride = [1,1],padding = 'SAME',use_xavier = True,activation_fn = None,bn=True,is_training=is_training)
                layer3_2_2 = util.conv2d(layer3_2_1,64,[7,1],scope = '3/2_conv_7x1',stride = [1,1],padding='SAME',use_xavier = True,bn=True,is_training=is_training)
                layer3_2_3 = util.conv2d(layer3_2_2,64,[1,7],scope = '3/2_conv_1x7',stride = [1,1],padding='SAME',use_xavier = True,bn=True,is_training=is_training)
                layer3_2_4 = util.conv2d(layer3_2_3,96,[3,3],scope = '3/2_conv_3x3',stride = [1,1],padding='VALID',use_xavier = True,bn=True,is_training=is_training)
            stem=util.filter_concat(3,[layer3_1_2,layer3_2_4])
        with tf.variable_scope('Level-4'):
            layer4_1 = util.conv2d(stem,192,[3,3],scope = '4_Conv2d',stride = [2,2] , padding = 'VALID',use_xavier = True,bn=True,is_training = is_training)
            layer4_2 = util.max_pool2d(stem,[3,3],scope = '4_MaxPool',stride = [2,2] , padding='VALID')
            stem = util.filter_concat(3,[layer4_1,layer4_2])
        output = stem
    return output
예제 #26
0
def main():
    """
    Runs a simple linear regression model on the mnist dataset.
    """

    # Load the mnist dataset. Class stores the train, validation and testing sets as numpy arrays.
    mnist = input_data.read_data_sets('MNIST_data', one_hot=True)

    # Create a tensforlow session.
    sess = tf.InteractiveSession()

    # Create the computational graph. Start with creating placeholders for the input and output data.
    # Input placeholder.
    input_placeholder = tf.placeholder(tf.float32, shape=[None, 784])
    # Output placeholder.
    labeled_data = tf.placeholder(tf.float32, shape=[None, 10])

    # Reshape input to a 4D tensor of [ -1 , width, height, channels]. -1 ensures the size remains consitent with
    # the original size.
    image_shape = [-1, 28, 28, 1]
    input_image = tf.reshape(input_placeholder, image_shape)

    # Create convolutional layers containing 2 convolutional layers and 1 fully connected layer.
    # Layer 1 computes 32 features for each 5x5 patch.
    conv1_weights = tf_utils.weight_variable([5, 5, 1, 32])
    conv1_bias = tf_utils.bias_variable([32])
    # Apply ReLU activation and max pool.
    conv1_act = tf.nn.relu(
        tf_utils.conv2d(input_image, conv1_weights) + conv1_bias)
    conv1_pool = tf_utils.max_pool_2x2(conv1_act)

    # Layer 2 computes 64 features of 5x5 patch.
    conv2_weights = tf_utils.weight_variable([5, 5, 32, 64])
    conv2_bias = tf_utils.bias_variable([64])
    # Apply ReLU activation and max pool.
    conv2_act = tf.nn.relu(
        tf_utils.conv2d(conv1_pool, conv2_weights) + conv2_bias)
    conv2_pool = tf_utils.max_pool_2x2(conv2_act)

    # Add fully connected layers.
    fc1_weights = tf_utils.weight_variable([7 * 7 * 64, 1024])
    fc1_bias = tf_utils.bias_variable([1024])
    # Apply Relu activation to flattened conv2d pool layer.
    conv2_flat = tf.reshape(conv2_pool, [-1, 7 * 7 * 64])
    fc1_act = tf.nn.relu(tf.matmul(conv2_flat, fc1_weights) + fc1_bias)

    # Add dropout before the readout layer.
    keep_prob = tf.placeholder(tf.float32)
    dropout = tf.nn.dropout(fc1_act, keep_prob)

    # Add the readout layer for the 10 classes.
    readout_weights = tf_utils.weight_variable([1024, 10])
    readout_bias = tf_utils.bias_variable([10])
    readout_act = tf.matmul(dropout, readout_weights) + readout_bias

    # Cross entropy loss between the output labels and the model.
    cross_entropy = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(labels=labeled_data,
                                                logits=readout_act))

    # Define the training step with a learning rate for gradient descent and our cross entropy loss.
    learning_rate = 1e-4
    train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)

    # Initialize all variables.
    sess.run(tf.global_variables_initializer())

    # Training model evaluation placeholders.
    # Define a placeholder for comparing equality between output and labels.
    predictions = tf.equal(tf.argmax(labeled_data, 1),
                           tf.argmax(readout_act, 1))
    accuracy = tf.reduce_mean(tf.cast(predictions, tf.float32))

    # Run the training for a n steps.
    steps = 10000
    batch_size = 50
    for step in xrange(steps):
        # Sample a batch from the mnist dataset.
        batch = mnist.train.next_batch(batch_size)
        # Create a dict of the data from the sampled batch and run one training step.
        train_step.run(feed_dict={
            input_placeholder: batch[0],
            labeled_data: batch[1],
            keep_prob: 0.5
        })

        # Print the training error after every 100 steps.
        if step % 100 == 0:
            train_accuracy = accuracy.eval(
                feed_dict={
                    input_placeholder: batch[0],
                    labeled_data: batch[1],
                    keep_prob: 1.0
                })
            print "Step: ", step, " | Train Accuracy: ", train_accuracy

    print "Accuracy: ", accuracy.eval(
        feed_dict={
            input_placeholder: mnist.test.images,
            labeled_data: mnist.test.labels,
            keep_prob: 1.0
        })
예제 #27
0
def GEDDnet(face,
            left_eye,
            right_eye,
            keep_prob,
            is_train,
            subj_id,
            vgg_path,
            num_subj=15,
            rf=[[2, 2], [3, 3], [5, 5], [11, 11]],
            num_face=[64, 128, 64, 64, 128, 256, 64],
            r=[[2, 2], [3, 3], [4, 5], [5, 11]],
            num_eye=[64, 128, 64, 64, 128, 256],
            num_comb=[0, 256]):

    num_comb[0] = num_face[-1] + 2 * num_eye[-1]

    vgg = np.load(vgg_path)
    with tf.variable_scope("transfer"):
        W_conv1_1 = tf.Variable(vgg['conv1_1_W'])
        b_conv1_1 = tf.Variable(vgg['conv1_1_b'])
        W_conv1_2 = tf.Variable(vgg['conv1_2_W'])
        b_conv1_2 = tf.Variable(vgg['conv1_2_b'])

        W_conv2_1 = tf.Variable(vgg['conv2_1_W'])
        b_conv2_1 = tf.Variable(vgg['conv2_1_b'])
        W_conv2_2 = tf.Variable(vgg['conv2_2_W'])
        b_conv2_2 = tf.Variable(vgg['conv2_2_b'])
    del vgg
    """ define network """
    # face
    face_h_conv1_1 = tf.nn.relu(conv2d(face, W_conv1_1) + b_conv1_1)
    face_h_conv1_2 = tf.nn.relu(conv2d(face_h_conv1_1, W_conv1_2) + b_conv1_2)
    face_h_pool1 = max_pool_2x2(face_h_conv1_2)

    face_h_conv2_1 = tf.nn.relu(conv2d(face_h_pool1, W_conv2_1) + b_conv2_1)
    face_h_conv2_2 = tf.nn.relu(conv2d(face_h_conv2_1, W_conv2_2) +
                                b_conv2_2) / 100.

    with tf.variable_scope("face"):

        face_W_conv2_3 = weight_variable([1, 1, num_face[1], num_face[2]],
                                         std=0.125)
        face_b_conv2_3 = bias_variable([num_face[2]], std=0.001)

        face_W_conv3_1 = weight_variable([3, 3, num_face[2], num_face[3]],
                                         std=0.06)
        face_b_conv3_1 = bias_variable([num_face[3]], std=0.001)
        face_W_conv3_2 = weight_variable([3, 3, num_face[3], num_face[3]],
                                         std=0.06)
        face_b_conv3_2 = bias_variable([num_face[3]], std=0.001)

        face_W_conv4_1 = weight_variable([3, 3, num_face[3], num_face[4]],
                                         std=0.08)
        face_b_conv4_1 = bias_variable([num_face[4]], std=0.001)
        face_W_conv4_2 = weight_variable([3, 3, num_face[4], num_face[4]],
                                         std=0.07)
        face_b_conv4_2 = bias_variable([num_face[4]], std=0.001)

        face_W_fc1 = weight_variable([6 * 6 * num_face[4], num_face[5]],
                                     std=0.035)
        face_b_fc1 = bias_variable([num_face[5]], std=0.001)

        face_W_fc2 = weight_variable([num_face[5], num_face[6]], std=0.1)
        face_b_fc2 = bias_variable([num_face[6]], std=0.001)

        face_h_conv2_3 = tf.nn.relu(
            conv2d(face_h_conv2_2, face_W_conv2_3) + face_b_conv2_3)
        face_h_conv2_3_norm = tf.layers.batch_normalization(face_h_conv2_3,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="f_conv2_3")

        face_h_conv3_1 = tf.nn.relu(
            dilated2d(face_h_conv2_3_norm, face_W_conv3_1, rf[0]) +
            face_b_conv3_1)
        face_h_conv3_1_norm = tf.layers.batch_normalization(face_h_conv3_1,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="f_conv3_1")

        face_h_conv3_2 = tf.nn.relu(
            dilated2d(face_h_conv3_1_norm, face_W_conv3_2, rf[1]) +
            face_b_conv3_2)
        face_h_conv3_2_norm = tf.layers.batch_normalization(face_h_conv3_2,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="f_conv3_2")

        face_h_conv4_1 = tf.nn.relu(
            dilated2d(face_h_conv3_2_norm, face_W_conv4_1, rf[2]) +
            face_b_conv4_1)
        face_h_conv4_1_norm = tf.layers.batch_normalization(face_h_conv4_1,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="f_conv4_1")

        face_h_conv4_2 = tf.nn.relu(
            dilated2d(face_h_conv4_1_norm, face_W_conv4_2, rf[3]) +
            face_b_conv4_2)
        face_h_conv4_2_norm = tf.layers.batch_normalization(face_h_conv4_2,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="f_conv4_2")

        face_h_pool4_flat = tf.reshape(face_h_conv4_2_norm,
                                       [-1, 6 * 6 * num_face[4]])

        face_h_fc1 = tf.nn.relu(
            tf.matmul(face_h_pool4_flat, face_W_fc1) + face_b_fc1)
        face_h_fc1_norm = tf.layers.batch_normalization(face_h_fc1,
                                                        training=is_train,
                                                        scale=False,
                                                        renorm=True,
                                                        name="f_fc1")
        face_h_fc1_drop = tf.nn.dropout(face_h_fc1_norm, keep_prob)

        face_h_fc2 = tf.nn.relu(
            tf.matmul(face_h_fc1_drop, face_W_fc2) + face_b_fc2)
        face_h_fc2_norm = tf.layers.batch_normalization(face_h_fc2,
                                                        training=is_train,
                                                        scale=False,
                                                        renorm=True,
                                                        name="f_fc2")

    eye1_h_conv1_1 = tf.nn.relu(conv2d(left_eye, W_conv1_1) + b_conv1_1)
    eye1_h_conv1_2 = tf.nn.relu(conv2d(eye1_h_conv1_1, W_conv1_2) + b_conv1_2)
    eye1_h_pool1 = max_pool_2x2(eye1_h_conv1_2)

    eye1_h_conv2_1 = tf.nn.relu(conv2d(eye1_h_pool1, W_conv2_1) + b_conv2_1)
    eye1_h_conv2_2 = tf.nn.relu(conv2d(eye1_h_conv2_1, W_conv2_2) +
                                b_conv2_2) / 100.

    eye2_h_conv1_1 = tf.nn.relu(conv2d(right_eye, W_conv1_1) + b_conv1_1)
    eye2_h_conv1_2 = tf.nn.relu(conv2d(eye2_h_conv1_1, W_conv1_2) + b_conv1_2)
    eye2_h_pool1 = max_pool_2x2(eye2_h_conv1_2)

    eye2_h_conv2_1 = tf.nn.relu(conv2d(eye2_h_pool1, W_conv2_1) + b_conv2_1)
    eye2_h_conv2_2 = tf.nn.relu(conv2d(eye2_h_conv2_1, W_conv2_2) +
                                b_conv2_2) / 100.

    with tf.variable_scope("eye"):
        # left eye
        eye_W_conv2_3 = weight_variable([1, 1, num_eye[1], num_eye[2]],
                                        std=0.125)
        eye_b_conv2_3 = bias_variable([num_eye[2]], std=0.001)

        eye_W_conv3_1 = weight_variable([3, 3, num_eye[2], num_eye[3]],
                                        std=0.06)
        eye_b_conv3_1 = bias_variable([num_eye[3]], std=0.001)
        eye_W_conv3_2 = weight_variable([3, 3, num_eye[3], num_eye[3]],
                                        std=0.06)
        eye_b_conv3_2 = bias_variable([num_eye[3]], std=0.001)

        eye_W_conv4_1 = weight_variable([3, 3, num_eye[3], num_eye[4]],
                                        std=0.06)
        eye_b_conv4_1 = bias_variable([num_eye[4]], std=0.001)
        eye_W_conv4_2 = weight_variable([3, 3, num_eye[4], num_eye[4]],
                                        std=0.04)
        eye_b_conv4_2 = bias_variable([num_eye[4]], std=0.001)

        eye1_W_fc1 = weight_variable([4 * 6 * num_eye[4], num_eye[5]],
                                     std=0.026)
        eye1_b_fc1 = bias_variable([num_eye[5]], std=0.001)

        eye2_W_fc1 = weight_variable([4 * 6 * num_eye[4], num_eye[5]],
                                     std=0.026)
        eye2_b_fc1 = bias_variable([num_eye[5]], std=0.001)

        eye1_h_conv2_3 = tf.nn.relu(
            conv2d(eye1_h_conv2_2, eye_W_conv2_3) + eye_b_conv2_3)
        eye1_h_conv2_3_norm = tf.layers.batch_normalization(eye1_h_conv2_3,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="e_conv2_3")

        eye1_h_conv3_1 = tf.nn.relu(
            dilated2d(eye1_h_conv2_3_norm, eye_W_conv3_1, r[0]) +
            eye_b_conv3_1)
        eye1_h_conv3_1_norm = tf.layers.batch_normalization(eye1_h_conv3_1,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="e_conv3_1")

        eye1_h_conv3_2 = tf.nn.relu(
            dilated2d(eye1_h_conv3_1_norm, eye_W_conv3_2, r[1]) +
            eye_b_conv3_2)
        eye1_h_conv3_2_norm = tf.layers.batch_normalization(eye1_h_conv3_2,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="e_conv3_2")

        eye1_h_conv4_1 = tf.nn.relu(
            dilated2d(eye1_h_conv3_2_norm, eye_W_conv4_1, r[2]) +
            eye_b_conv4_1)
        eye1_h_conv4_1_norm = tf.layers.batch_normalization(eye1_h_conv4_1,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="e_conv4_1")

        eye1_h_conv4_2 = tf.nn.relu(
            dilated2d(eye1_h_conv4_1_norm, eye_W_conv4_2, r[3]) +
            eye_b_conv4_2)
        eye1_h_conv4_2_norm = tf.layers.batch_normalization(eye1_h_conv4_2,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="e_conv4_2")

        eye1_h_pool4_flat = tf.reshape(eye1_h_conv4_2_norm,
                                       [-1, 4 * 6 * num_eye[4]])

        eye1_h_fc1 = tf.nn.relu(
            tf.matmul(eye1_h_pool4_flat, eye1_W_fc1) + eye1_b_fc1)
        eye1_h_fc1_norm = tf.layers.batch_normalization(eye1_h_fc1,
                                                        training=is_train,
                                                        scale=False,
                                                        renorm=True,
                                                        name="e1_fc1")

        # right eye
        eye2_h_conv2_3 = tf.nn.relu(
            conv2d(eye2_h_conv2_2, eye_W_conv2_3) + eye_b_conv2_3)
        eye2_h_conv2_3_norm = tf.layers.batch_normalization(eye2_h_conv2_3,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="e_conv2_3",
                                                            reuse=True)

        eye2_h_conv3_1 = tf.nn.relu(
            dilated2d(eye2_h_conv2_3_norm, eye_W_conv3_1, r[0]) +
            eye_b_conv3_1)
        eye2_h_conv3_1_norm = tf.layers.batch_normalization(eye2_h_conv3_1,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="e_conv3_1",
                                                            reuse=True)

        eye2_h_conv3_2 = tf.nn.relu(
            dilated2d(eye2_h_conv3_1_norm, eye_W_conv3_2, r[1]) +
            eye_b_conv3_2)
        eye2_h_conv3_2_norm = tf.layers.batch_normalization(eye2_h_conv3_2,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="e_conv3_2",
                                                            reuse=True)

        eye2_h_conv4_1 = tf.nn.relu(
            dilated2d(eye2_h_conv3_2_norm, eye_W_conv4_1, r[2]) +
            eye_b_conv4_1)
        eye2_h_conv4_1_norm = tf.layers.batch_normalization(eye2_h_conv4_1,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="e_conv4_1",
                                                            reuse=True)

        eye2_h_conv4_2 = tf.nn.relu(
            dilated2d(eye2_h_conv4_1_norm, eye_W_conv4_2, r[3]) +
            eye_b_conv4_2)
        eye2_h_conv4_2_norm = tf.layers.batch_normalization(eye2_h_conv4_2,
                                                            training=is_train,
                                                            scale=False,
                                                            renorm=True,
                                                            name="e_conv4_2",
                                                            reuse=True)

        eye2_h_pool4_flat = tf.reshape(eye2_h_conv4_2_norm,
                                       [-1, 4 * 6 * num_eye[4]])

        eye2_h_fc1 = tf.nn.relu(
            tf.matmul(eye2_h_pool4_flat, eye2_W_fc1) + eye2_b_fc1)
        eye2_h_fc1_norm = tf.layers.batch_normalization(eye2_h_fc1,
                                                        training=is_train,
                                                        scale=False,
                                                        renorm=True,
                                                        name="e2_fc1")

    # combine both eyes and face
    with tf.variable_scope("combine"):

        cls1_W_fc2 = weight_variable([num_comb[0], num_comb[1]], std=0.07)
        cls1_b_fc2 = bias_variable([num_comb[1]], std=0.001)

        cls1_W_fc3 = weight_variable([num_comb[1], 2], std=0.125)
        cls1_b_fc3 = bias_variable([2], std=0.001)

        cls1_h_fc1_norm = tf.concat(
            [face_h_fc2_norm, eye1_h_fc1_norm, eye2_h_fc1_norm], axis=1)
        cls1_h_fc1_drop = tf.nn.dropout(cls1_h_fc1_norm, keep_prob)
        cls1_h_fc2 = tf.nn.relu(
            tf.matmul(cls1_h_fc1_drop, cls1_W_fc2) + cls1_b_fc2)
        cls1_h_fc2_norm = tf.layers.batch_normalization(cls1_h_fc2,
                                                        training=is_train,
                                                        scale=False,
                                                        renorm=True,
                                                        name="c_fc2")
        cls1_h_fc2_drop = tf.nn.dropout(cls1_h_fc2_norm, keep_prob)

        t_hat = tf.matmul(cls1_h_fc2_drop, cls1_W_fc3) + cls1_b_fc3
    """ bias learning from subject id """
    num_bias = (2 * num_subj, )
    with tf.variable_scope("bias"):

        bias_W_fc = weight_variable([num_bias[0], 2], std=0.125)
        b_hat = tf.matmul(subj_id, bias_W_fc)

    g_hat = t_hat + b_hat

    l2_loss = (1e-2 * tf.nn.l2_loss(W_conv1_1) +
               1e-2 * tf.nn.l2_loss(W_conv1_2) +
               1e-2 * tf.nn.l2_loss(W_conv2_1) +
               1e-2 * tf.nn.l2_loss(W_conv2_2) +
               tf.nn.l2_loss(face_W_conv2_3) + tf.nn.l2_loss(face_W_conv3_1) +
               tf.nn.l2_loss(face_W_conv3_2) + tf.nn.l2_loss(face_W_conv4_1) +
               tf.nn.l2_loss(face_W_conv4_2) + tf.nn.l2_loss(face_W_fc1) +
               tf.nn.l2_loss(face_W_fc2) + tf.nn.l2_loss(eye_W_conv2_3) +
               tf.nn.l2_loss(eye_W_conv3_1) + tf.nn.l2_loss(eye_W_conv3_2) +
               tf.nn.l2_loss(eye_W_conv4_1) + tf.nn.l2_loss(eye_W_conv4_2) +
               tf.nn.l2_loss(eye1_W_fc1) + tf.nn.l2_loss(eye2_W_fc1) +
               tf.nn.l2_loss(cls1_W_fc2) + tf.nn.l2_loss(cls1_W_fc3))

    return g_hat, t_hat, bias_W_fc, l2_loss
예제 #28
0
 def inception(self, x, shapes, name):
     assert len(shapes) == 7
     dims = x.get_shape().as_list()[-1]
     with tf.variable_scope(name):
         with tf.variable_scope('Branch_0'):
             branch_0 = tf_utils.conv2d(
                 x,
                 name='Conv2d_0a_1x1',
                 shape=[1, 1, dims, shapes[0]],
                 strides=[1, 1, 1, 1],
                 use_bn=self.use_bn,
                 trainable=self.trainable,
                 activation_func=self.activation_func)
             # print('branch_3:', branch_0)
         with tf.variable_scope('Branch_1'):
             branch_1 = tf_utils.conv2d(
                 x,
                 name='Conv2d_0a_1x1',
                 shape=[1, 1, dims, shapes[1]],
                 strides=[1, 1, 1, 1],
                 use_bn=self.use_bn,
                 trainable=self.trainable,
                 activation_func=self.activation_func)
             # print('branch_3:', branch_1)
             branch_1 = tf_utils.conv2d(
                 branch_1,
                 name='Conv2d_0b_3x3',
                 shape=[3, 3, shapes[1], shapes[2]],
                 strides=[1, 1, 1, 1],
                 use_bn=self.use_bn,
                 trainable=self.trainable,
                 activation_func=self.activation_func)
             # print('branch_3:', branch_1)
         with tf.variable_scope('Branch_2'):
             branch_2 = tf_utils.conv2d(
                 x,
                 name='Conv2d_0a_1x1',
                 shape=[1, 1, dims, shapes[3]],
                 strides=[1, 1, 1, 1],
                 use_bn=self.use_bn,
                 trainable=self.trainable,
                 activation_func=self.activation_func)
             # print('branch_3:', branch_2)
             branch_2 = tf_utils.conv2d(
                 branch_2,
                 name='Conv2d_0b_3x3',
                 shape=[3, 3, shapes[3], shapes[4]],
                 strides=[1, 1, 1, 1],
                 use_bn=self.use_bn,
                 trainable=self.trainable,
                 activation_func=self.activation_func)
             branch_2 = tf_utils.conv2d(
                 branch_2,
                 name='Conv2d_0c_3x3',
                 shape=[3, 3, shapes[4], shapes[5]],
                 strides=[1, 1, 1, 1],
                 use_bn=self.use_bn,
                 trainable=self.trainable,
                 activation_func=self.activation_func)
             # print('branch_3:', branch_2)
         with tf.variable_scope('Branch_3'):
             branch_3 = tf.nn.avg_pool(x,
                                       ksize=[1, 3, 3, 1],
                                       strides=[1, 1, 1, 1],
                                       padding='SAME',
                                       name='AvgPool_0a_3x3')
             # print('branch_3:', branch_3)
             branch_3 = tf_utils.conv2d(
                 branch_3,
                 name='Conv2d_0b_1x1',
                 shape=[1, 1, dims, shapes[6]],
                 strides=[1, 1, 1, 1],
                 use_bn=self.use_bn,
                 trainable=self.trainable,
                 activation_func=self.activation_func)
             # print('branch_3:', branch_3)
     x = tf.concat(axis=3, values=[branch_0, branch_1, branch_2, branch_3])
     return x
예제 #29
0
def get_model(imgs, is_training, weight_decay=0.0, bn_decay=None):
    """
      Args: 
        imgs: (batch_size, im_dim, im_dim, 3)
        is_training: a boolean placeholder.

      Return:
        shape: (batch_size, vol_dim, vol_dim, vol_dim, 1)
  """
    batch_size = imgs.get_shape()[0].value
    im_dim = imgs.get_shape()[1].value

    ########
    with tf.variable_scope('Encoding'):
        # (batch_size, 64, 64, 64)
        net = tf_utils.conv2d(imgs,
                              64, [7, 7],
                              padding='SAME',
                              stride=[2, 2],
                              bn=True,
                              is_training=is_training,
                              scope='conv1',
                              bn_decay=bn_decay,
                              weight_decay=weight_decay,
                              activation_fn=tf.nn.elu)
        # (batch_size, 32, 32, 64)
        net = tf_utils.conv2d(net,
                              64, [5, 5],
                              padding='SAME',
                              stride=[2, 2],
                              bn=True,
                              is_training=is_training,
                              scope='conv2',
                              bn_decay=bn_decay,
                              weight_decay=weight_decay,
                              activation_fn=tf.nn.elu)
        # (batch_size, 16, 16, 128)
        net = tf_utils.conv2d(net,
                              128, [5, 5],
                              padding='SAME',
                              stride=[2, 2],
                              bn=True,
                              is_training=is_training,
                              scope='conv3',
                              bn_decay=bn_decay,
                              weight_decay=weight_decay,
                              activation_fn=tf.nn.elu)
        # (batch_size, 8, 8, 128)
        net = tf_utils.conv2d(net,
                              128, [3, 3],
                              padding='SAME',
                              stride=[2, 2],
                              bn=True,
                              is_training=is_training,
                              scope='conv4',
                              bn_decay=bn_decay,
                              weight_decay=weight_decay,
                              activation_fn=tf.nn.elu)
        # (batch_size, 4, 4, 256)
        net = tf_utils.conv2d(net,
                              256, [3, 3],
                              padding='SAME',
                              stride=[2, 2],
                              bn=True,
                              is_training=is_training,
                              scope='conv5',
                              bn_decay=bn_decay,
                              weight_decay=weight_decay,
                              activation_fn=tf.nn.elu)
        # (batch_size, 1, 1, 512)
        net = tf_utils.conv2d(net,
                              512, [4, 4],
                              padding='VALID',
                              stride=[1, 1],
                              bn=True,
                              is_training=is_training,
                              scope='conv6',
                              bn_decay=bn_decay,
                              weight_decay=weight_decay,
                              activation_fn=tf.nn.elu)

    ########
    with tf.variable_scope('Latent_variable'):
        net = tf.reshape(net, [batch_size, 512])
        net = tf_utils.fully_connected(net,
                                       512,
                                       scope="fc1",
                                       weight_decay=weight_decay,
                                       activation_fn=tf.nn.elu,
                                       bn=True,
                                       bn_decay=bn_decay,
                                       is_training=is_training)
        net = tf_utils.fully_connected(net,
                                       128 * 4 * 4 * 4,
                                       scope="fc2",
                                       weight_decay=weight_decay,
                                       activation_fn=tf.nn.elu,
                                       bn=True,
                                       bn_decay=bn_decay,
                                       is_training=is_training)
        net = tf.reshape(net, [batch_size, 4, 4, 4, 128])

    ########
    with tf.variable_scope('Decoding'):
        # (batch_size, 8, 8, 8, 64)
        net = tf_utils.conv3d_transpose(net,
                                        64, [3, 3, 3],
                                        scope="deconv1",
                                        stride=[2, 2, 2],
                                        padding='SAME',
                                        weight_decay=weight_decay,
                                        activation_fn=tf.nn.elu,
                                        bn=True,
                                        bn_decay=bn_decay,
                                        is_training=is_training)
        # (batch_size, 16, 16, 16, 32)
        net = tf_utils.conv3d_transpose(net,
                                        32, [3, 3, 3],
                                        scope="deconv2",
                                        stride=[2, 2, 2],
                                        padding='SAME',
                                        weight_decay=weight_decay,
                                        activation_fn=tf.nn.elu,
                                        bn=True,
                                        bn_decay=bn_decay,
                                        is_training=is_training)
        # (batch_size, 32, 32, 32, 32)
        net = tf_utils.conv3d_transpose(net,
                                        32, [3, 3, 3],
                                        scope="deconv3",
                                        stride=[2, 2, 2],
                                        padding='SAME',
                                        weight_decay=weight_decay,
                                        activation_fn=tf.nn.elu,
                                        bn=True,
                                        bn_decay=bn_decay,
                                        is_training=is_training)
        # (batch_size, 64, 64, 64, 16)
        net = tf_utils.conv3d_transpose(net,
                                        24, [3, 3, 3],
                                        scope="deconv4",
                                        stride=[2, 2, 2],
                                        padding='SAME',
                                        weight_decay=weight_decay,
                                        activation_fn=tf.nn.elu,
                                        bn=True,
                                        bn_decay=bn_decay,
                                        is_training=is_training)
        # (batch_size, 64, 64, 64, 1)
        net = tf_utils.conv3d(net,
                              1, [3, 3, 3],
                              scope="deconv5",
                              stride=[1, 1, 1],
                              padding='SAME',
                              weight_decay=weight_decay,
                              activation_fn=None,
                              bn=True,
                              bn_decay=bn_decay,
                              is_training=is_training)

    return net
예제 #30
0
    convs_comb = []
    dw_h_convs_comb = OrderedDict()

    for layer in range(0, layers):
        stddev = np.sqrt(2 / (filter_size**2 * features))
        if layer == 0:
            w1 = weight_variable(
                [filter_size, filter_size, n_channels // 3, features], stddev)
        else:
            w1 = weight_variable(
                [filter_size, filter_size, features, features], stddev)

        b1 = bias_variable([features])

        conv1 = conv2d(in_node1, w1, keep_prob)
        dw_h_convs1[layer] = tf.nn.relu(conv1 + b1)

        conv2 = conv2d(in_node2, w1, keep_prob)
        dw_h_convs2[layer] = tf.nn.relu(conv2 + b1)

        conv3 = conv2d(in_node3, w1, keep_prob)
        dw_h_convs3[layer] = tf.nn.relu(conv3 + b1)

        weights.append(w1)
        biases.append(b1)
        convs1.append(conv1)
        convs2.append(conv2)
        convs3.append(conv3)

        if layer < layers - 1:
예제 #31
0
    for layer in range(0, layers):
        features = 2**layer * features_root
        stddev = np.sqrt(2 / (filter_size**2 * features))
        if layer == 0:
            w1 = weight_variable(
                [filter_size, filter_size, n_channels, features], stddev)
        else:
            w1 = weight_variable(
                [filter_size, filter_size, features // 2, features], stddev)

        w2 = weight_variable([filter_size, filter_size, features, features],
                             stddev)
        b1 = bias_variable([features])
        b2 = bias_variable([features])

        conv1 = conv2d(in_node, w1, keep_prob)
        tmp_h_conv = tf.nn.relu(conv1 + b1)
        conv2 = conv2d(tmp_h_conv, w2, keep_prob)
        dw_h_convs[layer] = tf.nn.relu(conv2 + b2)

        weights.append((w1, w2))
        biases.append((b1, b2))
        convs.append((conv1, conv2))

        if layer < layers - 1:
            pools[layer] = max_pool(dw_h_convs[layer], pool_size)
            in_node = pools[layer]

    in_node = dw_h_convs[layers - 1]

    # up layers