Beispiel #1
0
        def build_discriminator(image, reuse=False):
            conv_dim = 64
            with tf.variable_scope("discriminator") as scope:
                if (reuse):
                    scope.reuse_variables()

                f1 = ut.conv2d(image, conv_dim, f_h=7, f_w=7, name="conv1")
                print(f1.shape)
                f2 = ut.conv2d(f1, f1.shape[-1] * 2, name="conv2")
                print(f2.shape)
                f3 = ut.conv2d(f2, f2.shape[-1] * 2, name="conv3")
                print(f3.shape)
                f4 = ut.conv2d(f3, f3.shape[-1] * 2, name="conv4")
                print(f4.shape)

                ### DNN layer
                flatten = tf.reshape(f4, [self.batch_size, -1])
                print(flatten.shape)
                W2 = tf.get_variable(
                    "W2", [flatten.shape[-1], 1], tf.float32,
                    tf.truncated_normal_initializer(stddev=0.02))
                B2 = tf.get_variable(
                    "B2", [1], tf.float32,
                    tf.truncated_normal_initializer(stddev=0.02))
                score = tf.matmul(flatten, W2) + B2
                print(score.shape)

            return score
Beispiel #2
0
        def build_discriminator(image, cond, reuse=False):
            conv_dim = 64
            with tf.variable_scope("discriminator") as scope:
                if (reuse):
                    scope.reuse_variables()

                f1 = ut.conv2d(image, conv_dim, f_h=7, f_w=7, name="conv1")
                print(f1.shape)
                f2 = ut.conv2d(f1, f1.shape[-1] * 2, name="conv2")
                print(f2.shape)
                f3 = ut.conv2d(f2, f2.shape[-1] * 2, name="conv3")
                print(f3.shape)
                f4 = ut.conv2d(f3, f3.shape[-1] * 2, name="conv4")
                print(f4.shape)

                features1D = tf.reshape(f4, [self.batch_size, -1])
                print(features1D.shape)

                ### condition matching
                concat_features = tf.concat([features1D, cond], axis=1)
                C1 = ut.DNN(concat_features, [concat_features.shape[-1], 1000],
                            4)
                score = ut.DNN(C1, [1000, 1], 6, False)

                return score
Beispiel #3
0
def conv_module_seg(points,feature_plane, mlp, is_training, scope, bn_decay, bn=True):
    with tf.variable_scope(scope) as sc:

        new_points=feature_plane

        new_points64 = util.conv2d(new_points, mlp[0], [1, 1],
                                  padding='VALID', stride=[1, 1],
                                  bn=bn, is_training=is_training,
                                  scope='conv2d%d' % (64), bn_decay=bn_decay)

        new_points1281 = util.conv2d(new_points64, mlp[1], [1, 1],
                                  padding='VALID', stride=[1, 1],
                                  bn=bn, is_training=is_training,
                                  scope='conv2d%d' % (1281), bn_decay=bn_decay)

        new_points1282 = util.conv2d(new_points1281, mlp[2], [1, 1],
                                  padding='VALID', stride=[1, 1],
                                  bn=bn, is_training=is_training,
                                  scope='conv2d%d' % (1282), bn_decay=bn_decay)


        new_points1024 = util.conv2d(new_points1282, 1024, [1, 1],
                                  padding='VALID', stride=[1, 1],
                                  bn=bn, is_training=is_training,
                                  scope='conv2d%d' % (1024), bn_decay=bn_decay)

        #new_points1024 = tf.squeeze(new_points1024, [2])
        new_points_all = tf.reduce_max(new_points1024, axis=1)
            #new_points_all = tf.squeeze(new_points_all, [1])

    return new_points_all,new_points64,new_points1281,new_points1282
Beispiel #4
0
    def _build_anet(self, name, trainable):
        with tf.variable_scope(name):
            conv1 = tf.nn.relu(
                U.conv2d(self.tfs,
                         16,
                         "l1", [8, 8], [4, 4],
                         pad="VALID",
                         trainable=trainable))
            conv2 = tf.nn.relu(
                U.conv2d(conv1,
                         32,
                         "l2", [4, 4], [2, 2],
                         pad="VALID",
                         trainable=trainable))
            flat = U.flattenallbut0(conv2)
            den1 = tf.nn.relu(
                U.dense(flat,
                        256,
                        'lin',
                        U.normc_initializer(1.0),
                        trainable=trainable))
            self.probs = tf.nn.softmax(
                U.dense(den1,
                        n_actions,
                        "logits",
                        U.normc_initializer(0.01),
                        trainable=trainable))

        params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=name)
        u = tf.distributions.Categorical(probs=self.probs)
        return u, params
        '''
Beispiel #5
0
    def __init__(self, Load=False):
        self.sess = tf.Session()
        self.tfs = tf.placeholder(tf.float32, S_shape, 'state')

        # critic
        with tf.variable_scope('critic'):
            '''
            conv1 = tf.layers.conv2d(self.tfs, 32, 8, 4, 'valid', activation=tf.nn.relu)
            conv2 = tf.layers.conv2d(conv1, 64, 4, 2, 'valid', activation=tf.nn.relu)
            conv3 = tf.layers.conv2d(conv2, 64, 3, 1, 'valid', activation=tf.nn.relu)
            flat = U.flattenallbut0(conv3)
            l1 = tf.layers.dense(flat, 512, activation=tf.nn.relu)
            '''
            conv1 = tf.nn.relu(
                U.conv2d(self.tfs, 32, "l1", [8, 8], [4, 4], pad="VALID"))
            conv2 = tf.nn.relu(
                U.conv2d(conv1, 64, "l2", [4, 4], [2, 2], pad="VALID"))
            conv3 = tf.nn.relu(
                U.conv2d(conv2, 64, "l3", [3, 3], [1, 1], pad="VALID"))
            flat = U.flattenallbut0(conv3)
            den1 = tf.nn.relu(
                U.dense(flat, 512, 'lin', U.normc_initializer(1.0)))
            self.v = U.dense(den1, 1, "value", U.normc_initializer(1.0))
            self.tfdc_r = tf.placeholder(tf.float32, [None, 1], 'discounted_r')
            self.advantage = self.tfdc_r - self.v
            self.closs = tf.reduce_mean(tf.square(self.advantage))
            self.ctrain_op = tf.train.AdamOptimizer(C_LR).minimize(self.closs)

        # actor
        pi, pi_params = self._build_anet('pi', trainable=True)
        oldpi, oldpi_params = self._build_anet('oldpi', trainable=False)
        self.sample_op = tf.squeeze(pi.sample(1),
                                    axis=0)  # operation of choosing action
        self.update_oldpi_op = [
            oldp.assign(p) for p, oldp in zip(pi_params, oldpi_params)
        ]

        self.tfa = tf.placeholder(tf.int32, [None, A_DIM], 'action')
        self.tfadv = tf.placeholder(tf.float32, [None, 1], 'advantage')
        # ratio = tf.exp(pi.log_prob(self.tfa) - oldpi.log_prob(self.tfa))
        ratio = pi.prob(self.tfa) / (oldpi.prob(self.tfa) + 1e-5)
        surr = ratio * self.tfadv  # surrogate loss

        self.aloss = -tf.reduce_mean(
            tf.minimum(  # clipped surrogate objective
                surr,
                tf.clip_by_value(ratio, 1. - EPSILON, 1. + EPSILON) *
                self.tfadv))

        self.atrain_op = tf.train.AdamOptimizer(A_LR).minimize(self.aloss)

        self.saver = tf.train.Saver()
        self.last_ep = 0

        if (Load):
            print('Loading!')
            self.saver.restore(self.sess, '/home/icenter/tmp/Crazy/params')
        else:
            self.sess.run(tf.global_variables_initializer())
def cnn(x):
    
    """ CNN model to detect lung cancer
    
            Args:
                x: tensor of shape [batch_size, width, height, channels]
        
            Returns:
                pool2: tensor with all convolutions, pooling applied
    """
    
    with tf.name_scope('cnn') as scope:
        with tf.name_scope('conv1') as inner_scope:
            wcnn1 = tu.weight([3, 3, 1, 64], name='wcnn1')
            bcnn1 = tu.bias(1.0, [64], name='bcnn1')
            conv1 = tf.add(tu.conv2d(x, wcnn1, stride=(1, 1), padding='SAME'), bcnn1)
            conv1 = tu.relu(conv1)
            # (?, 192, 192, 64)
            
        with tf.name_scope('conv2') as inner_scope:
            wcnn2 = tu.weight([3, 3, 64, 64], name='wcnn2')
            bcnn2 = tu.bias(1.0, [64], name='bcnn2')
            conv2 = tf.add(tu.conv2d(conv1, wcnn2, stride=(1, 1), padding='SAME'), bcnn2)
            conv2 = tu.relu(conv2)
            #(?, 192, 192, 64)
            
        with tf.name_scope('max_pool') as inner_scope:
            pool1 = tu.max_pool2d(conv2, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], padding='SAME') 
            # (?, 96, 96, 64)
            
        with tf.name_scope('conv3') as inner_scope:
            wcnn3 = tu.weight([3, 3, 64, 64], name='wcnn3')
            bcnn3 = tu.bias(1.0, [64], name='bcnn3')
            conv3 = tf.add(tu.conv2d(pool1, wcnn3, stride=(1, 1), padding='SAME'), bcnn3)
            conv3 = tu.relu(conv3)
            # (?, 96, 96, 64)
            
        with tf.name_scope('conv4') as inner_scope:
            wcnn4 = tu.weight([3, 3, 64, 64], name='wcnn4')
            bcnn4 = tu.bias(1.0, [64], name='bcnn4')
            conv4 = tf.add(tu.conv2d(conv3, wcnn4, stride=(1, 1), padding='SAME'), bcnn4)
            conv4 = tu.relu(conv4)
            # (?, 96, 96, 64)
            
        with tf.name_scope('conv5') as inner_scope:
            wcnn5 = tu.weight([3, 3, 64, 64], name='wcnn5')
            bcnn5 = tu.bias(1.0, [64], name='bcnn5')
            conv5 = tf.add(tu.conv2d(conv4, wcnn5, stride=(1, 1), padding='SAME'), bcnn5)
            conv5 = tu.relu(conv5)
            # (?, 96, 96, 64)
            
        with tf.name_scope('max_pool') as inner_scope:
            pool2 = tu.max_pool2d(conv5, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], padding='SAME') 
            # (?, 48, 48, 64)
            
        return pool2
Beispiel #7
0
 def farword(self, inputs_image):
     inputs = tf.reshape(inputs_image, [-1, 58, 58, 1])
     with tf.variable_scope('conv1'):
         W1 = tf.Variable(tf.random_normal([3, 3, 1, 16]), name='w1')
         net = conv2d(inputs, W1)
         net = pooling(net)
     with tf.variable_scope('conv2'):
         W2 = tf.Variable(tf.random_normal([3, 3, 16, 16]), name='w2')
         net = conv2d(net, W2)
         net = pooling(net)
     with tf.variable_scope('fc'):
         logits = fully_connected(net)
     return logits
def ConvLayer(Input, FilterIn, FilterOut, Training, Scope):

    with tf.variable_scope(Scope):

        Weight = weight_variable([3, 3, FilterIn, FilterOut])

        if cfg.LeakyReLU == True:

            return tf.nn.leaky_relu(
                batch_norm_conv(conv2d(Input, Weight), FilterOut, Training))
        else:
            return tf.nn.relu(
                batch_norm_conv(conv2d(Input, Weight), FilterOut, Training))
Beispiel #9
0
def transform_moudule(xyz,points, is_training,idx, num_out_conv, size_conv, num_out_fc,num_channle,nsample, scope, bn_decay, bn=True,K=3):
    with tf.variable_scope(scope) as sc:
        new_points, net ,idx = plane_module(xyz, points, idx,centralize=True, num_channle=num_channle, npoint=1024,
                                                          nsample=nsample,
                                                          is_training=is_training, scope='Tnet-plane', pool='avg',
                                                          use_xyz=True, bn=bn, bn_decay=bn_decay,weight_decay=0.0)


        batch_size = tf.shape(xyz)[0]
        for i in range(len(num_out_conv)):
            net = util.conv2d(net, num_out_conv[i], size_conv[i], scope='tconv' + str(i + 1), stride=[1, 1], bn=True,
                         bn_decay=bn_decay, is_training=is_training, padding='VALID')
        net = tf.squeeze(net, 2)
        net = tf.reduce_max(net, axis=1)

        for i in range(len(num_out_fc)):
            net = util.fully_connected(net, num_out_fc[i], scope='tfc' + str(i + 1), bn_decay=bn_decay, bn=True,
                                  is_training=is_training)
        with tf.variable_scope(scope) as sc:
            weights = tf.get_variable('weights', [256, K * K],
                                      initializer=tf.constant_initializer(0.0),
                                      dtype=tf.float32)
            biases = tf.get_variable('biases', [K * K],
                                     initializer=tf.constant_initializer(0.0),
                                     dtype=tf.float32)
            biases += tf.constant(np.eye(K).flatten(), dtype=tf.float32)
            transform = tf.matmul(net, weights)
            transform = tf.nn.bias_add(transform, biases)

        transform = tf.reshape(transform, [batch_size, K, K])

        xyz = tf.matmul(xyz, transform)
        if new_points is not None:
            new_points = tf.matmul(new_points, transform)
    return xyz,transform,new_points
Beispiel #10
0
def add_residual_pre(prev_layer,
                     z_concat=None,
                     text_filters=None,
                     k_h=5,
                     k_w=5,
                     hidden_text_filters=None,
                     hidden_filters=None,
                     name_func=None):

    filters = prev_layer.get_shape()[3].value
    if hidden_filters == None:
        hidden_filters = filters * 4
    if text_filters == None:
        text_filters = int(filters / 2)
    if hidden_text_filters == None:
        hidden_text_filters = int(filters / 8)
    s = prev_layer.get_shape()[1].value

    bn0 = util.batch_norm(name=g_name())
    bn1 = util.batch_norm(name=g_name())

    low_dim = util.conv2d(util.lrelu(bn0(prev_layer)),
                          hidden_filters,
                          k_h=k_h,
                          k_w=k_w,
                          name=name_func())

    residual = util.deconv2d(util.lrelu(bn1(low_dim), name=name_func()),
                             [batch_size, s, s, filters],
                             k_h=k_h,
                             k_w=k_w,
                             name=name_func())

    next_layer = prev_layer + residual
    return next_layer
Beispiel #11
0
 def projection_shortcut(self, input, num_conv, strides, name):
     h0 = util.conv2d(input, [1, 1, num_conv],
                      strides,
                      name=name + "/shortcut",
                      use_bias=False,
                      padding=('SAME' if strides[1] == 1 else 'VALID'))
     return h0
Beispiel #12
0
    def generate_attention_maps(self, state, feature):

        h, c = state
        DIM = self.DIM_ATT

        # There are 5 body parts. `tmp` is shared for each joint within a body part.
        # In other words, we need 5 `tmp` terms, or equivalently, 1 `tmp` term with 5*DIM channels.

        # Compute map (Eq. 2)
        Ac = util.conv2d(feature, [1, 1, 5 * DIM],
                         "att_pose_c",
                         use_bias=False)
        Ah = util.fc(h, 5 * DIM, "att_pose_h", use_bias=False)
        bias = tf.get_variable("bias",
                               shape=[5 * DIM],
                               initializer=tf.zeros_initializer())

        # A_c: Bx7x7x32; A_h: Bx32.
        # Add A_h to A_c by broadcasting
        tmp = tf.nn.tanh(tf.reshape(Ah, [self.BATCH, 1, 1, DIM]) + Ac + bias)
        tmp = tf.split(tmp, 5, axis=3)  # Split into 5 groups

        joint_maps = []
        joint_tens = []
        for i in range(5):
            # v is just a 1x1 convolution.
            # NOTE: From paper, it is not entirely clear if v is shared between body parts.
            # We assume this is NOT the case.
            res = util.conv2d(tmp[i], [1, 1, self.J], "att_map_bp" + str(i))
            res = tf.reshape(res, [self.BATCH, 7, 7, self.J])

            # Normalization (Eq. 3)
            t_res = tf.nn.softmax(res, 3)

            l_res = tf.split(t_res, self.J, axis=3)

            joint_maps.append(l_res)  # For use in assemble_parts
            joint_tens.append(tf.expand_dims(
                t_res, axis=1))  # For convenient loss computation

        joint_tens = tf.concat(
            joint_tens, axis=1)  # Resulting shape: BATCH x 5 x 7 x 7 x J
        return joint_maps, joint_tens
Beispiel #13
0
 def make_convolution_graph(self):
     with tf.name_scope("convolution"):
         self.W_conv = weight_variable([
             self.KERNEL_SIZE, self.KERNEL_SIZE, self.box_shape[0],
             self.NUM_FEATURES
         ],
                                       name="W")
         self.b_conv = bias_variable([self.NUM_FEATURES], name="b")
         self.box_activations = []
         for box in self.box_inputs:
             z = conv2d(box, self.W_conv) + self.b_conv
             self.box_activations.append(tf.sigmoid(z))
Beispiel #14
0
    def resnet_v2_bottleneck_block(self,
                                   input,
                                   num_conv,
                                   strides,
                                   name,
                                   projection_shortcut=False):

        shortcut = input

        bn1 = util.batch_norm(input, name + "/bn1", self.phase)
        bn1 = tf.nn.relu(bn1)

        # 1x1 Conv layer
        if projection_shortcut:
            shortcut = self.projection_shortcut(bn1, num_conv * 4, strides,
                                                name)

        conv1 = util.conv2d(bn1, [1, 1, num_conv], [1, 1, 1, 1],
                            name=name + "/conv1",
                            use_bias=False)
        bn2 = util.batch_norm(conv1, name + "/bn2", self.phase)
        bn2 = tf.nn.relu(bn2)

        if strides[1] > 1:
            bn2 = self.pad(bn2, 3)

        conv2 = util.conv2d(bn2, [3, 3, num_conv],
                            strides,
                            name=name + "/conv2",
                            use_bias=False,
                            padding=('SAME' if strides[1] == 1 else 'VALID'))
        bn3 = util.batch_norm(conv2, name + "/bn3", self.phase)
        bn3 = tf.nn.relu(bn3)

        conv3 = util.conv2d(bn3, [1, 1, num_conv * 4], [1, 1, 1, 1],
                            name=name + "/conv3",
                            use_bias=False)

        return conv3 + shortcut
Beispiel #15
0
def conv_module(points,new_points, mlp, is_training, scope, bn_decay, bn=True):
    with tf.variable_scope(scope) as sc:

        for i, num_out_channel in enumerate(mlp):
            new_points = util.conv2d(new_points, num_out_channel, [1,1],
                                        padding='VALID', stride=[1,1],
                                        bn=bn, is_training=is_training,
                                        scope='conv2d%d'%(i), bn_decay=bn_decay)



        new_points = tf.squeeze(new_points, [2])
        new_points_all = tf.reduce_max(new_points, axis=1)

    return new_points_all,new_points
Beispiel #16
0
  def generate_attention_maps( self, state, feature ):

    h, c  = state
    DIM   = self.DIM_ATT
    
    # Compute map (Eq. 2)
    Ac    = util.conv2d( feature, [1, 1, DIM], name="att_pose_c" )
    Ah    = util.fc( h, DIM, "att_pose_h" )

    # A_c: Bx7x7x32; A_h: Bx32.
    # Add A_h to A_c by broadcasting
    tmp   = tf.nn.tanh( tf.reshape( Ah, [self.BATCH, 1, 1, DIM] ) + Ac )

    # v
    res   = util.conv2d( tmp, [1, 1, self.J], name="att_map" )
    res   = tf.reshape( res, [self.BATCH, 7, 7, self.J] )

    # Normalization (Eq. 3)
    # t_res = tf.nn.softmax( res, axis=3 )      # Tensorflow 1.6 and higher
    t_res = tf.nn.softmax( res, dim=3 )         # This is deprecated in Tensorflow 1.8, but still works

    l_res = tf.split( t_res, self.J, axis=3 )

    return l_res, t_res
Beispiel #17
0
    def resnet_v2(self, input):

        strides = [1, 2, 2, 2]
        blocks = [3, 4, 6, 3]
        num_conv = [64, 128, 256, 512]

        input = self.pad(input, 7)
        res = util.conv2d(input, [7, 7, 64],
                          stride=[1, 2, 2, 1],
                          padding='VALID',
                          name="conv_pre",
                          use_bias=False)
        res = tf.nn.max_pool(res,
                             ksize=[1, 3, 3, 1],
                             strides=[1, 2, 2, 1],
                             padding='SAME')

        for j, b in enumerate(blocks):
            block_stride = [1, strides[j], strides[j], 1]

            res = self.resnet_v2_bottleneck_block(res,
                                                  num_conv=num_conv[j],
                                                  strides=block_stride,
                                                  name="block" + str(j + 1) +
                                                  "-1",
                                                  projection_shortcut=True)

            for i in range(1, b):
                res = self.resnet_v2_bottleneck_block(
                    res,
                    num_conv=num_conv[j],
                    strides=[1, 1, 1, 1],
                    name="block" + str(j + 1) + "-" + str(i + 1))

        res = util.batch_norm(res, "post_bn", self.phase)
        res = tf.nn.relu(res)

        self.spatial = res

        # Average Pooling over both spatial dimensions
        res = tf.reduce_mean(res, axis=[1, 2])

        # With ImageNet classifier
        if self.with_classifier:
            res = util.fc(res, 1001, "imagenet_dense")

        return res
Beispiel #18
0
 def action_value_function(self, input, reuse=True, scope='Action_Value'):
     #should be created with invarianve to batch size
     #input should be a placeholder
     #outputs are actions shape [batch_size, num_actions]
     with tf.variable_scope(scope, reuse=reuse):
         out = tf.cast(input, tf.float32)
         conv_params = self.params['conv_layers']
         for i in range(len(conv_params)):
             clp = conv_params[i]
             out = ut.conv2d(out, clp[0], clp[1], clp[2],
                             'conv2d%d' % (i + 1))
             out = tf.nn.relu(out)
         out = ut.fully_connected(out, self.params['FC_layer'],
                                  'fully_con1')
         out = tf.nn.relu(out)
         out = ut.fully_connected(out, self.params['n_output'],
                                  'fully_con2', False)
         return out
Beispiel #19
0
def conv_plane2d(inputs,
               num_output_channels,
               kernel_size,
               scope,
               pool,
               use_xavier=True,
               stddev=1e-3,
               weight_decay=0.0,
               activation_fn=tf.nn.sigmoid,
               bn=False,
               bn_decay=None,
               is_training=None):
    with tf.variable_scope(scope) as sc:

        kernel_h, kernel_w = kernel_size

        outputs = util.conv2d(inputs, 32, [1, 1],
                                  padding='VALID', stride=[1, 1],
                                  bn=bn, is_training=is_training,
                                  scope='conv2d%d' % (64), bn_decay=bn_decay)

        outputs = tf.reduce_max(outputs, 2,keep_dims=True)

        return outputs
    def build_model(self, batch_size, spatial_size, points_xyz, points_features, is_training, num_class, batch_normalization_decay=None):
        """ Build a Fully-Convolutional Point Network.

            Args:
            batch_size: int
            spatial_size: np.array
            points_xyz: tf.placeholder
            points_features: tf.placeholder
            is_training: tf.placeholder
            num_class: int
            batch_normalization_decay: float

            Returns: tf.tensor

        """

        self.min_xyz_ = np.array([0, 0, 0])
        self.max_xyz_ = spatial_size
        self.use_batch_normalization_ = self._config['training']['optimizer']['batch_normalization'] != False

        pointnet_locations = util.get_uniformly_spaced_point_grid(self.min_xyz_, self.max_xyz_, self._config['model']['pointnet']['spacing'], batch_size)
        top_level_centroid_locations = util.get_uniformly_spaced_point_grid(self.min_xyz_, self.max_xyz_, self.get_centroid_spacing(self._config['model']['pointnet']['spacing'], self._abstraction_levels), batch_size)

        with tf.variable_scope("abstraction"):

            with tf.variable_scope("points_to_15cm"):

                with tf.variable_scope("simplified_pointnet"):

                    with tf.device('/gpu:' + str(self._config['training']['gpu']['id'])):
                        # Radius search and Grouping
                        grouped_points_xyz_and_features = self.radius_search_and_group(pointnet_locations, self.get_pointnet_radius(self._config['model']['pointnet']['spacing']), self._config['model']['pointnet']['neighbors'], points_xyz, points_features)

                    # 3x 1x1 Convolutions
                    features = util.conv2d(grouped_points_xyz_and_features, self._config['model']['filters']['abstraction']['points_to_15cm'][0], [1, 1], padding='VALID', stride=[1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='1x1_conv_1', bn_decay=batch_normalization_decay)
                    features = util.conv2d(features, self._config['model']['filters']['abstraction']['points_to_15cm'][1], [1, 1], padding='VALID', stride=[1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='1x1_conv_2', bn_decay=batch_normalization_decay)

                    # Max-pooling for permutation invariance
                    features = tf.reduce_max(features, axis=[2], keepdims=True)
                    features = tf.squeeze(features, [2])

                    num_dims = self.get_feature_volume_shape(spatial_size, self._config['model']['pointnet']['spacing'], 1)
                    features = tf.reshape(features, [batch_size, num_dims[0], num_dims[1], num_dims[2], features.get_shape().as_list()[-1]])

                with tf.variable_scope("skip_15cm"):
                
                    skip_15cm = util.conv3d(features, self._config['model']['filters']['skip']['15cm'], [1, 1, 1], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='1x1x1_conv', bn_decay=batch_normalization_decay)

                with tf.variable_scope("skip_45cm"):
                    
                    padded = tf.pad(features, [[0,0], [1,1], [1,1], [1,1], [0,0]], "SYMMETRIC")
                    skip_45cm = util.conv3d(padded, self._config['model']['filters']['skip']['45cm'], [3, 3, 3], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='3x3x3_conv', bn_decay=batch_normalization_decay)

            with tf.variable_scope("15cm_to_30cm"):

                with tf.variable_scope("3d_convolution"):

                    features = util.conv3d(features, self._config['model']['filters']['abstraction']['15cm_to_30cm'][0], [2, 2, 2], padding='VALID', stride=[2, 2, 2], bn=self.use_batch_normalization_, is_training=is_training, scope='2x2x2_conv', bn_decay=batch_normalization_decay)
                    features = util.conv3d(features, self._config['model']['filters']['abstraction']['15cm_to_30cm'][1], [1, 1, 1], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='1x1_conv_1', bn_decay=batch_normalization_decay)

                with tf.variable_scope("skip_30cm"):

                    skip_30cm = util.conv3d(features, self._config['model']['filters']['skip']['30cm'], [1, 1, 1], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='1x1x1_conv', bn_decay=batch_normalization_decay)

                with tf.variable_scope("skip_90cm"):

                    padded = tf.pad(features, [[0,0], [1,1], [1,1], [1,1], [0,0]], "SYMMETRIC")
                    skip_90cm = util.conv3d(padded, self._config['model']['filters']['skip']['90cm'], [3, 3, 3], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='3x3x3_conv', bn_decay=batch_normalization_decay)

            with tf.variable_scope("30cm_to_60cm"):

                with tf.variable_scope("3d_convolution"):

                    features = util.conv3d(features, self._config['model']['filters']['abstraction']['30cm_to_60cm'][0], [2, 2, 2], padding='VALID', stride=[2, 2, 2], bn=self.use_batch_normalization_, is_training=is_training, scope='2x2x2_conv', bn_decay=batch_normalization_decay)
                    features = util.conv3d(features, self._config['model']['filters']['abstraction']['30cm_to_60cm'][1], [1, 1, 1], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='1x1x1_conv_1', bn_decay=batch_normalization_decay)
                    
                with tf.variable_scope("skip_60cm"):

                    skip_60cm = util.conv3d(features, self._config['model']['filters']['skip']['60cm'], [1, 1, 1], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='1x1x1_conv_1', bn_decay=batch_normalization_decay)

                with tf.variable_scope("skip_180cm"):

                    padded = tf.pad(features, [[0,0], [1,1], [1,1], [1,1], [0,0]], "SYMMETRIC")
                    skip_180cm = util.conv3d(padded, self._config['model']['filters']['skip']['180cm'], [3, 3, 3], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='3x3x3_conv', bn_decay=batch_normalization_decay)

        with tf.variable_scope("spatial_pool"):

            num_cells_in_current_layer = self.get_feature_volume_shape(spatial_size, self._config['model']['pointnet']['spacing'], 3)

            with tf.variable_scope("reshape_and_repeat"):

                # Reshape and repeat feature volume to apply weighted spatial pooling
                features = tf.reshape(features, [batch_size, top_level_centroid_locations.get_shape()[1].value, self._config['model']['filters']['abstraction']['30cm_to_60cm'][-1]])
                features = tf.tile(tf.expand_dims(features, axis=1), [1, top_level_centroid_locations.get_shape()[1].value, 1, 1])

            with tf.variable_scope("pool"):
             
                spatial_pooling_weights = self.get_spatial_pool_weighting(self._config['model']['spatial_pool_radius'], top_level_centroid_locations)
                skip_spatial_pool = features * spatial_pooling_weights
                skip_spatial_pool = tf.reduce_sum(skip_spatial_pool, axis=2)
                skip_spatial_pool = tf.reshape(skip_spatial_pool, [batch_size, num_cells_in_current_layer[0], num_cells_in_current_layer[1], num_cells_in_current_layer[2], self._config['model']['filters']['abstraction']['30cm_to_60cm'][-1]])
                
            with tf.variable_scope("skip_spatial_pool"):
                skip_spatial_pool = util.conv3d(skip_spatial_pool, self._config['model']['filters']['skip']['spatial_pool'], [1, 1, 1], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='1x1x1_conv', bn_decay=batch_normalization_decay)

        with tf.variable_scope("upsampling"):
            
            with tf.variable_scope("60cm_to_30cm"):

                features = tf.concat([skip_60cm, skip_180cm, skip_spatial_pool], axis=4)

                features = util.conv3d_transpose(features, self._config['model']['filters']['upsampling']['60cm_to_30cm'][0], [2, 2, 2], padding='VALID', stride=[2, 2, 2], bn=self.use_batch_normalization_, is_training=is_training, scope='2x2x2_deconv', bn_decay=batch_normalization_decay)
                features = util.conv3d(features, self._config['model']['filters']['upsampling']['60cm_to_30cm'][1], [1, 1, 1], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='1x1x1_conv_1', bn_decay=batch_normalization_decay)
                features = util.conv3d(features, self._config['model']['filters']['upsampling']['60cm_to_30cm'][2], [1, 1, 1], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='1x1x1_conv_2', bn_decay=batch_normalization_decay)
                
            with tf.variable_scope("30cm_to_15cm"):

                features = tf.concat([features, skip_30cm, skip_90cm], axis=4)
                features = util.conv3d_transpose(features, self._config['model']['filters']['upsampling']['30cm_to_15cm'][0], [2, 2, 2], padding='VALID', stride=[2, 2, 2], bn=self.use_batch_normalization_, is_training=is_training, scope='2x2x2_deconv', bn_decay=batch_normalization_decay)
                features = util.conv3d(features, self._config['model']['filters']['upsampling']['30cm_to_15cm'][1], [1, 1, 1], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='1x1x1_conv_1', bn_decay=batch_normalization_decay)
                features = util.dropout(features, keep_prob=0.5, is_training=is_training, scope='dropout')

                features = tf.concat([features, skip_45cm], axis=4)
                features = util.conv3d(features, self._config['model']['filters']['upsampling']['30cm_to_15cm'][2], [1, 1, 1], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='1x1x1_conv_3', bn_decay=batch_normalization_decay)

            with tf.variable_scope("15cm_to_5cm"):

                features = tf.concat([features, skip_15cm], axis=4)
                features = util.dropout(features, keep_prob=0.5, is_training=is_training, scope='dropout')

                upsample_factor = int(math.ceil(self._config['model']['pointnet']['spacing'] / self._output_voxel_size))
                features = util.conv3d_transpose(features, self._config['model']['filters']['upsampling']['15cm_to_5cm'][0], [upsample_factor, upsample_factor, upsample_factor], padding='VALID', stride=[upsample_factor, upsample_factor, upsample_factor], bn=self.use_batch_normalization_, is_training=is_training, scope='final_deconv', bn_decay=batch_normalization_decay)
                features = tf.pad(features, [[0,0], [1,1], [1,1], [1,1], [0,0]], "SYMMETRIC")
                output = util.conv3d(features, num_class, [3, 3, 3], padding='VALID', stride=[1, 1, 1], bn=self.use_batch_normalization_, is_training=is_training, scope='final_conv', bn_decay=batch_normalization_decay, activation_fn=None)

                num_output_elements = np.prod(self.get_output_volume_shape(spatial_size, self._output_voxel_size))
                output = tf.reshape(output, [batch_size, num_output_elements, num_class])

        return output
Beispiel #21
0
def SCNN(demand,topo) :

    x_demand = tf.reshape(demand, [-1,8,8,1])
    x_topo   = tf.reshape(topo, [-1,20,20,1])

    # convolution 1
    with tf.name_scope('D_cnn_1_layer'):
      with tf.name_scope('Weight'):
        W_conv1_d = util.weight_variable([3, 3, 1, 16])
      with tf.name_scope('biases'):
        b_conv1_d = util.bias_variable([16])
    # h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
      d_conv1 = tf.nn.relu(util.conv2d(x_demand, W_conv1_d) + b_conv1_d)
      #d_conv1 = util.conv2d(x_demand, W_conv1_d) + b_conv1_d
    # h_pool1 = max_pool_2x2(h_conv1)

    # convolution 2
    with tf.name_scope('D_cnn_2_layer'):
      with tf.name_scope('Weight'):
        W_conv2_d = util.weight_variable([3, 3, 16, 32])
      with tf.name_scope('biases'):
        b_conv2_d = util.bias_variable([32])
    # h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
      d_conv2 = tf.nn.relu(util.conv2d(d_conv1, W_conv2_d) + b_conv2_d)
      #d_conv2 = util.conv2d(d_conv1, W_conv2_d) + b_conv2_d
      #d_conv2_flat = tf.reshape(d_conv2, [-1, 8 * 8 * 32])
    # h_pool2 = max_pool_2x2(h_conv2)

    # dropout
    #with tf.name_scope('dropout'):
     #  keep_prob_d = tf.placeholder(tf.float32, name = "drop_in")
      # d_drop = tf.nn.dropout(d_conv2, keep_prob_d)

    # convolution 1
    with tf.name_scope('T_cnn_1_layer'):
      with tf.name_scope('Weight'):
        W_conv1_t = util.weight_variable([5, 5, 1, 16])
      with tf.name_scope('biases'):
        b_conv1_t = util.bias_variable([16])
    # h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
      t_conv1 = tf.nn.relu(util.conv2d(x_topo, W_conv1_t) + b_conv1_t)
      #t_conv1 = util.conv2d(x_topo, W_conv1_t) + b_conv1_t
    # h_pool1 = max_pool_2x2(h_conv1)

    # convolution 2
    with tf.name_scope('T_cnn_2_layer'):
      with tf.name_scope('Weight'):
        W_conv2_t = util.weight_variable([5, 5, 16, 32])
      with tf.name_scope('biases'):
        b_conv2_t = util.bias_variable([32])
    # h_conv2 = tf.nn.relu(conv2d(h_conv1, W_conv2) + b_conv2)
      t_conv2 = tf.nn.relu(util.conv2d(t_conv1, W_conv2_t) + b_conv2_t)
      #t_conv2 = util.conv2d(t_conv1, W_conv2_t) + b_conv2_t
      #t_conv2_flat = tf.reshape(t_conv2, [-1, 8 * 8 * 32])
    # h_pool2 = max_pool_2x2(h_conv2)

     #dropout
   # with tf.name_scope('dropout'):
   #    keep_prob_t = tf.placeholder(tf.float32, name = "drop_in")
   #    t_drop = tf.nn.dropout(t_conv2, keep_prob_t)
    
    with tf.name_scope('Contact'):
        D_conv2_seq = tf.reshape(d_conv2,[-1,8 * 8,32])
        T_conv2_seq = tf.reshape(t_conv2,[-1,20 * 20,32])
        Demand_Topo = tf.concat([D_conv2_seq, T_conv2_seq], 1)
    # full-connected 1
    Demand_Topo_flat = tf.reshape(Demand_Topo, [-1, (64 + 400) * 32])
    with tf.name_scope('fully_1_layer'):
      with tf.name_scope('Weight'):
        W_fc1 = util.weight_variable([ (64 + 400)* 32, 512])
      with tf.name_scope('biases'):
        b_fc1 = util.bias_variable([512])
      with tf.name_scope('relu'):
        h_fc1 = tf.nn.relu(tf.matmul(Demand_Topo_flat, W_fc1) + b_fc1)

    # dropout
    with tf.name_scope('dropout'):
       keep_prob1 = tf.placeholder(tf.float32, name = "drop_in")
       h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob1)

    # full-connected 2
    with tf.name_scope('fully_2_layer'):
      with tf.name_scope('Weight'):
        W_fc2 = util.weight_variable([512, 128])
      with tf.name_scope('biases'):
        b_fc2 = util.bias_variable([128])
      with tf.name_scope('relu'):
        h_fc2 = tf.nn.relu(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)



    # dropout
    with tf.name_scope('dropout'):
       keep_prob2 = tf.placeholder(tf.float32, name = "drop_in")
       h_drop = tf.nn.dropout(h_fc2, keep_prob2)

    # readout
    with tf.name_scope('fully_layer'):
      with tf.name_scope('Weight'):
        W_out = util.weight_variable([128, 1])
      with tf.name_scope('biases'):
        b_out = util.bias_variable([1])
    
      y_out = tf.matmul(h_drop, W_out) + b_out

    #return y_out, keep_prob1, keep_prob2, keep_prob_d, keep_prob_t
    return y_out, keep_prob1, keep_prob2
def alexnet(x):
    """
  AlexNet conv layers definition
  
  Args:
      x: tensor of shape[batch_size,width,height,channels]
  Returns:
      pool5: tensor with all convolutions ,pooling and lrn operations applied
      
  """
    with tf.name_scope('alexnetwork') as scope:
        with tf.name_scope('conv1') as inner_scope:
            wcnn1 = tu.weight([11, 11, 3, 96], name='wcnn1')
            bcnn1 = tu.bias(0.0, [96], name='bcnn1')
            conv1 = tf.add(tu.conv2d(x, wcnn1, stride=(4, 4), padding='SAME'),
                           bcnn1)

            #conv1 = tu.batch_norm(conv1)

            conv1 = tu.relu(conv1)
            norm1 = tu.lrn(conv1,
                           depth_radius=5,
                           bias=1.0,
                           alpha=1e-04,
                           beta=0.75)
            pool1 = tu.max_pool2d(norm1,
                                  kernel=[1, 3, 3, 1],
                                  stride=[1, 2, 2, 1],
                                  padding='VALID')

        with tf.name_scope('conv2') as inner_scope:
            wcnn2 = tu.weights([5, 5, 96, 256], name='wcnn2')
            bcnn2 = tu.bias(1.0, [256], name='bcnn2')
            conv2 = tf.add(
                tu.conv2d(pool1, wcnn2, stride=(1, 1), padding='SAME'), bcnn2)

            #conv2 = tu.batch_norm(conv2)

            conv2 = tu.relu(conv2)
            norm2 = tu.lrn(conv2,
                           depth_radius=5,
                           bias=1.0,
                           alpha=1e-04,
                           beta=0.75)
            pool2 = tu.max_pool2d(norm2,
                                  kernel=[1, 3, 3, 1],
                                  stride=[1, 2, 2, 1],
                                  padding='VALID')

        with tf.name_scope('conv3') as inner_scope:
            wcnn3 = tu.weights([3, 3, 256, 384], name='wcnn3')
            bcnn3 = tu.bias(0.0, [384], name='bcnn3')
            conv3 = tf.add(
                tu.conv2d(pool2, wcnn3, stride=(1, 1), padding='SAME'), bcnn3)
            #conv3 = tu.batch_norm(conv3)
            conv3 = tu.relu(conv3)

        with tf.name_scope('conv4') as inner_scope:
            wcnn4 = tu.weight([3, 3, 384, 384], name='wcnn4')
            bcnn4 = tu.bias(1.0, [384], name='bcnn4')
            conv4 = tf.add(
                tu.conv2d(conv3, wcnn5, stride=(1, 1), padding='SAME'), bcnn5)
            #conv5 = tu.batch_norm(conv5)
            conv5 = tu.relu(conv5)
            pool5 = tu.max_pool2d(conv5,
                                  kernel=[1, 3, 3, 1],
                                  stride=[1, 2, 2, 1],
                                  padding='VALID')

        return pool5
def cnn(x):
    """ CNN model to detect lung cancer
    
            Args:
                x: tensor of shape [batch_size, width, height, channels]
        
            Returns:
                pool2: tensor with all convolutions, pooling applied
    """

    with tf.name_scope('cnn') as scope:
        with tf.name_scope('conv1') as inner_scope:
            wcnn1 = tu.weight([3, 3, 1, 64], name='wcnn1')
            bcnn1 = tu.bias(1.0, [64], name='bcnn1')
            conv1 = tf.add(tu.conv2d(x, wcnn1, stride=(1, 1), padding='SAME'),
                           bcnn1)
            conv1 = tu.relu(conv1)
            # (?, 192, 192, 64)

        with tf.name_scope('conv2') as inner_scope:
            wcnn2 = tu.weight([3, 3, 64, 64], name='wcnn2')
            bcnn2 = tu.bias(1.0, [64], name='bcnn2')
            conv2 = tf.add(
                tu.conv2d(conv1, wcnn2, stride=(1, 1), padding='SAME'), bcnn2)
            conv2 = tu.relu(conv2)
            #(?, 192, 192, 64)

        with tf.name_scope('max_pool') as inner_scope:
            pool1 = tu.max_pool2d(conv2,
                                  kernel=[1, 2, 2, 1],
                                  stride=[1, 2, 2, 1],
                                  padding='SAME')
            # (?, 96, 96, 64)

        with tf.name_scope('conv3') as inner_scope:
            wcnn3 = tu.weight([3, 3, 64, 64], name='wcnn3')
            bcnn3 = tu.bias(1.0, [64], name='bcnn3')
            conv3 = tf.add(
                tu.conv2d(pool1, wcnn3, stride=(1, 1), padding='SAME'), bcnn3)
            conv3 = tu.relu(conv3)
            # (?, 96, 96, 64)

        with tf.name_scope('conv4') as inner_scope:
            wcnn4 = tu.weight([3, 3, 64, 64], name='wcnn4')
            bcnn4 = tu.bias(1.0, [64], name='bcnn4')
            conv4 = tf.add(
                tu.conv2d(conv3, wcnn4, stride=(1, 1), padding='SAME'), bcnn4)
            conv4 = tu.relu(conv4)
            # (?, 96, 96, 64)

        with tf.name_scope('conv5') as inner_scope:
            wcnn5 = tu.weight([3, 3, 64, 64], name='wcnn5')
            bcnn5 = tu.bias(1.0, [64], name='bcnn5')
            conv5 = tf.add(
                tu.conv2d(conv4, wcnn5, stride=(1, 1), padding='SAME'), bcnn5)
            conv5 = tu.relu(conv5)
            # (?, 96, 96, 64)

        with tf.name_scope('max_pool') as inner_scope:
            pool2 = tu.max_pool2d(conv5,
                                  kernel=[1, 2, 2, 1],
                                  stride=[1, 2, 2, 1],
                                  padding='SAME')
            # (?, 48, 48, 64)

        return pool2
Beispiel #24
0
  def build( self ):
    with tf.variable_scope( "vgg" ) as scope:
      conv1_1 = tf.nn.relu( util.conv2d( self.X, [3,3,64], 'conv1_1' ) )
      conv1_2 = tf.nn.relu( util.conv2d( conv1_1 , [3,3,64], 'conv1_2' ) )
      pool1   = tf.nn.max_pool( conv1_2, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME' )

      conv2_1 = tf.nn.relu( util.conv2d( pool1, [3,3,128], 'conv2_1' ) )
      conv2_2 = tf.nn.relu( util.conv2d( conv2_1, [3,3,128], 'conv2_2' ) )
      pool2   = tf.nn.max_pool( conv2_2, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME' )

      conv3_1 = tf.nn.relu( util.conv2d( pool2, [3,3,256], 'conv3_1' ) )
      conv3_2 = tf.nn.relu( util.conv2d( conv3_1, [3,3,256], 'conv3_2' ) )
      conv3_3 = tf.nn.relu( util.conv2d( conv3_2, [3,3,256], 'conv3_3' ) )
      pool3   = tf.nn.max_pool( conv3_3, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME' )

      conv4_1 = tf.nn.relu( util.conv2d( pool3, [3,3,512], 'conv4_1' ) )
      conv4_2 = tf.nn.relu( util.conv2d( conv4_1, [3,3,512], 'conv4_2' ) )
      conv4_3 = tf.nn.relu( util.conv2d( conv4_2, [3,3,512], 'conv4_3' ) )
      pool4   = tf.nn.max_pool( conv4_3, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME' )

      conv5_1 = tf.nn.relu( util.conv2d( pool4, [3,3,512], 'conv5_1' ) )
      conv5_2 = tf.nn.relu( util.conv2d( conv5_1, [3,3,512], 'conv5_2' ) )
      conv5_3 = tf.nn.relu( util.conv2d( conv5_2, [3,3,512], 'conv5_3' ) )
      pool5   = tf.nn.max_pool( conv5_3, ksize = [1,2,2,1], strides = [1,2,2,1], padding = 'SAME' )

    self.pool5 = pool5
        w3 = tf.Variable(tf.constant(ld['w3']), name='w3')
        b3 = tf.Variable(tf.constant(ld['b3']), name='b3')

# Implementation of the iterative scheme
x_values = [x_0]
x = x_0
for i in range(n_iter):
    with tf.name_scope('iterate_{}'.format(i)):
        nonlinear_Tx = tf.exp(-mu_water * odl_op_layer(x))
        gradx = mu_water * odl_op_layer_adjoint(y - nonlinear_Tx)
        gradreg = odl_op_regularizer(x)

        update = tf.concat([x, gradx, gradreg, s], axis=3)

        update = tf.nn.relu(conv2d(update, w1) + b1)
        update = tf.nn.relu(conv2d(update, w2) + b2)

        update = conv2d(update, w3) + b3

        s = tf.nn.relu(update[..., 1:])
        dx = update[..., 0:1]

        x = x + dx
        x_values.append(x)

with tf.name_scope('loss'):
    loss = tf.reduce_mean(tf.reduce_sum((x - x_true)**2, axis=(1, 2)))

with tf.name_scope('optimizer'):
    # Learning rate
Beispiel #26
0
# TensorBoard debug view.
file_writer = tf.summary.FileWriter('LOGS', sess.graph)

# Create placeholders for independent and dependant variables once batch has been selected.
with tf.name_scope('Input_Image'):
	x = tf.placeholder(tf.float32, shape=[None, None, None, 1], name='Image')  # Independent variables.
	# Reshape to amenable shape.
	# x_image = tf.reshape(x, [-1, windowSize[0], windowSize[1], 1])
with tf.name_scope('Input_Synapse'):
	y_syn = tf.placeholder(tf.float32, shape=[None, 2])  # Target values.

with tf.name_scope('First_Layer'):
	# Create first convolutional layer. (No pooling.)
	W_conv1 = util.weight_variable(firstLayerDimensions, "w_conv_1")  # Weights in first layer.
	b_conv1 = util.bias_variable([firstLayerDimensions[3]], "b_conv_1")  # Biases in first layer.
	h_conv1 = tf.nn.relu(util.conv2d(x, W_conv1, valid=True, stride=1) + b_conv1)  # Perform convolution (with zero padding) and apply ReLU.
	h_pool1 = util.max_pool(h_conv1, 1) #, kernelWidth=2)

with tf.name_scope('Second_Layer'):
	# Create first convolutional layer. (No pooling.)
	W_conv2 = util.weight_variable(secondLayerDimensions, "w_conv_2")  # Weights in first layer.
	b_conv2 = util.bias_variable([secondLayerDimensions[3]], "b_conv_2")  # Biases in first layer.
	h_conv2 = tf.nn.relu(util.atrous_conv2d(h_pool1, W_conv2, valid=True, rate=2) + b_conv2)  # Perform convolution (with zero padding) and apply ReLU.
	h_pool2 = util.atrous_max_pool(h_conv2, mask_size=2, rate=2)

with tf.name_scope('Third_Layer'):
	# Create first convolutional layer. (No pooling.)
	W_conv3 = util.weight_variable(thirdLayerDimensions, "w_conv_3")  # Weights in first layer.
	b_conv3 = util.bias_variable([thirdLayerDimensions[3]], "b_conv_3")  # Biases in first layer.
	h_conv3 = tf.nn.relu(util.atrous_conv2d(h_pool2, W_conv3, valid=True, rate=4) + b_conv3)  # Perform convolution (with zero padding) and apply ReLU.
	h_pool3 = util.atrous_max_pool(h_conv3, mask_size=2, rate=4)