示例#1
0
    def forward_alexnet(self, inp, weights, reuse=False):
        # reuse is for the normalization parameters.

        conv1 = conv_block(inp, weights['conv1_weights'], weights['conv1_biases'], stride_y=4, stride_x=4, groups=1, reuse=reuse, scope='conv1')
        norm1 = lrn(conv1, 2, 1e-05, 0.75)
        pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID')

        # 2nd Layer: Conv (w ReLu)  -> Lrn -> Pool with 2 groups
        conv2 = conv_block(pool1, weights['conv2_weights'], weights['conv2_biases'], stride_y=1, stride_x=1, groups=2, reuse=reuse, scope='conv2')
        norm2 = lrn(conv2, 2, 1e-05, 0.75)
        pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID')

        # 3rd Layer: Conv (w ReLu)
        conv3 = conv_block(pool2, weights['conv3_weights'], weights['conv3_biases'], stride_y=1, stride_x=1, groups=1, reuse=reuse, scope='conv3')

        # 4th Layer: Conv (w ReLu) splitted into two groups
        conv4 = conv_block(conv3, weights['conv4_weights'], weights['conv4_biases'], stride_y=1, stride_x=1, groups=2, reuse=reuse, scope='conv4')

        # 5th Layer: Conv (w ReLu) -> Pool splitted into two groups
        conv5 = conv_block(conv4, weights['conv5_weights'], weights['conv5_biases'], stride_y=1, stride_x=1, groups=2, reuse=reuse, scope='conv5')
        pool5 = max_pool(conv5, 3, 3, 2, 2, padding='VALID')

        # 6th Layer: Flatten -> FC (w ReLu) -> Dropout
        flattened = tf.reshape(pool5, [-1, 6 * 6 * 256])
        fc6 = fc(flattened, weights['fc6_weights'], weights['fc6_biases'], activation='relu')
        dropout6 = dropout(fc6, self.KEEP_PROB)

        # 7th Layer: FC (w ReLu) -> Dropout
        fc7 = fc(dropout6, weights['fc7_weights'], weights['fc7_biases'], activation='relu')
        dropout7 = dropout(fc7, self.KEEP_PROB)

        # 8th Layer: FC and return unscaled activations
        fc8 = fc(dropout7, weights['fc8_weights'], weights['fc8_biases'])

        return fc7, fc8
示例#2
0
def UNet(x, keep_probability):
    layer1 = conv_block(x, 1)
    pool1 = utils.max_pool(layer1, 2)

    layer2 = conv_block(pool1, 2)
    pool2 = utils.max_pool(layer2, 2)

    layer3 = conv_block(pool2, 3)
    pool3 = utils.max_pool(layer3, 2)

    layer4 = conv_block(pool3, 4)
    #upsampling

    up5 = upsampling_bolck(layer3, layer4, 5)
    layer5 = conv_block(up5, 5)

    up6 = upsampling_bolck(layer2, layer5, 6)
    layer6 = conv_block(up6, 6)

    up7 = upsampling_bolck(layer1, layer6, 7)
    layer7 = conv_block(up7, 7)

    W6 = utils.weight_variable([1, 1, layer7.get_shape()[3].value, 1],
                               name='W6')
    b6 = utils.bias_variable([1], 'bias6')
    conv6_1 = utils.conv2d(layer7, W6, b6, 'conv_6')

    return conv6_1
示例#3
0
def inference(image):
    with tf.variable_scope('inference'):
        with tf.name_scope('1.unit'):
            w1 = utils.weight_variable([5, 5, 3, 32], name='w1')
            print(w1.name)
            b1 = utils.bias_variable([32], name='b1')
            tf.summary.histogram('b1', b1)
            x = utils.conv2d_activtion(image, w1, b1, batchnorm=True)
            x = utils.max_pool(x)

        with tf.name_scope('2.unit'):
            w2 = utils.weight_variable([5, 5, 32, 64], 'w2')
            tf.summary.histogram('w2', w2)
            b2 = utils.bias_variable([64], 'b2')
            tf.summary.histogram('b2', b2)
            x = utils.conv2d_activtion(x, w2, b2, batchnorm=True)
            x = utils.max_pool(x)

        with tf.name_scope('3.unit'):
            w3 = utils.weight_variable([5, 5, 64, 96], 'w3')
            b3 = utils.bias_variable([96], 'b3')
            x = utils.conv2d_activtion(x, w3, b3, batchnorm=True)
            x = utils.max_pool(x)

        with tf.name_scope('4.unit'):
            w4 = utils.weight_variable([5, 5, 96, 128], 'w4')
            b4 = utils.bias_variable([128], 'b4')
            x = utils.conv2d_activtion(x, w4, b4, batchnorm=True)
            x = utils.max_pool(x)

        with tf.name_scope('fc1'):
            layers = int(x.shape[1]) * int(x.shape[2]) * int(x.shape[3])
            w5 = utils.weight_variable([layers, 128], 'w5')
            b5 = utils.bias_variable([128], 'b5')
            x = tf.reshape(x, [-1, layers])
            x = tf.matmul(x, w5) + b5
            x = tf.nn.elu(x)
            # x = tf.nn.dropout(x, 0.5)

        with tf.name_scope('fc2'):
            w6 = utils.weight_variable([128, 128], 'w6')
            b6 = utils.bias_variable([128], 'b6')
            x = tf.matmul(x, w6) + b6
            x = tf.nn.elu(x)
            # x = tf.nn.dropout(x, 0.5)

        with tf.name_scope('out_put'):
            w7 = utils.weight_variable([128, 5], 'w7')
            b7 = utils.bias_variable([5], 'b7')
            x = tf.matmul(x, w7) + b7
            pred = tf.nn.softmax(x)

    return pred
示例#4
0
    def eval(self, data, dropout_keep_prob=1.0):
        activations_1x1 = self.calc_activations(data, self.weights_1x1,
                                                self.biases_1x1)

        activations_3x3_reduced = self.calc_activations(
            data, self.weights_3x3_reduced, self.biases_3x3_reduced)
        activations_3x3 = self.calc_activations(activations_3x3_reduced,
                                                self.weights_3x3,
                                                self.biases_3x3)

        activations_5x5_reduced = self.calc_activations(
            data, self.weights_5x5_reduced, self.biases_5x5_reduced)
        activations_5x5 = self.calc_activations(activations_5x5_reduced,
                                                self.weights_5x5,
                                                self.biases_5x5)

        activations_max_pool_3x3 = utils.max_pool(data,
                                                  block_size=3,
                                                  stride=1,
                                                  padding='SAME')
        activations_pool_reduced = self.calc_activations(
            activations_max_pool_3x3, self.weights_pool_reduced,
            self.biasespool_reduced)

        depth_concat = tf.concat(3, [
            activations_3x3, activations_1x1, activations_5x5,
            activations_pool_reduced
        ])
        return depth_concat
示例#5
0
 def inference(self, x):
     with tf.variable_scope("conv0"):
         conv1 = utils.relu(utils.Bn(utils.conv2d(x, 64, 7, 7, 2, 2, bias=True), training=self.is_training))
     with tf.name_scope("pool1"):
         pool1 = utils.max_pool(conv1, 3, 3, 2, 2)
     with tf.variable_scope("group0"):
         res2a = self.residual(pool1, 256, name='block0')
         res2b = self.residual(res2a, 256, name='block1')
         res2c = self.residual(res2b, 256, name='block2')
     with tf.variable_scope("group1"):
         res3a = self.residual(res2c, 512, 2, name='block0')
         res3b = self.residual(res3a, 512, name='block1')
         res3c = self.residual(res3b, 512, name='block2')
         res3d = self.residual(res3c, 512, name='block3')
     with tf.variable_scope("group2"):
         res4a = self.residual(res3d, 1024, 2, name='block0')
         res4b = self.residual(res4a, 1024, name='block1')
         res4c = self.residual(res4b, 1024, name='block2')
         res4d = self.residual(res4c, 1024, name='block3')
         res4e = self.residual(res4d, 1024, name='block4')
         res4f = self.residual(res4e, 1024, name='block5')
     with tf.variable_scope("group3"):
         res5a = self.residual(res4f, 2048, 2, name='block0')
         res5b = self.residual(res5a, 2048, name='block1')
         res5c = self.residual(res5b, 2048, name='block2')
     with tf.name_scope("pool5"):
         pool5 = utils.global_pool(res5c)
     with tf.variable_scope("linear"):
         dropout = tf.nn.dropout(pool5, keep_prob=self.keep_prob)
         out = utils.linear(dropout, 1000)
     return out
    def encode(self, inputs, lengths, fr=0):
        bsz, max_len = inputs.size()
        e_hidden_init = self.e_hidden_init.expand(
            2, bsz, self.hidden_dim).contiguous()
        e_cell_init = self.e_cell_init.expand(2, bsz,
                                              self.hidden_dim).contiguous()
        lens, indices = torch.sort(lengths, 0, True)

        if fr and not self.share_vocab:
            in_embs = self.embedding_fr(inputs)
        else:
            in_embs = self.embedding(inputs)

        if fr and not self.share_encoder:
            if self.dropout > 0:
                F.dropout(in_embs, training=self.training)
            all_hids, (enc_last_hid, _) = self.lstm_fr(
                pack(in_embs[indices], lens.tolist(), batch_first=True),
                (e_hidden_init, e_cell_init))
        else:
            if self.dropout > 0:
                F.dropout(in_embs, training=self.training)
            all_hids, (enc_last_hid, _) = self.lstm(
                pack(in_embs[indices], lens.tolist(), batch_first=True),
                (e_hidden_init, e_cell_init))

        _, _indices = torch.sort(indices, 0)
        all_hids = unpack(all_hids, batch_first=True)[0][_indices]

        if self.pool == "max":
            embs = utils.max_pool(all_hids, lengths, self.gpu)
        elif self.pool == "mean":
            embs = utils.mean_pool(all_hids, lengths, self.gpu)
        return embs
示例#7
0
    def graph(self, input, is_training):
        with tf.name_scope('model'):
            net = ut.conv_layer(input, 64, 7, 2, name='conv1')
            net = ut.bottleneck(net,
                                128,
                                stride=1,
                                training=is_training,
                                name='res1')
            net = ut.max_pool(net, 2, 2, 'max_pool')
            net = ut.bottleneck(net,
                                int(self.nFeats / 2),
                                stride=1,
                                training=is_training,
                                name='res2')
            net = ut.bottleneck(net,
                                self.nFeats,
                                stride=1,
                                training=is_training,
                                name='res3')

            with tf.name_scope('stacks'):
                stack_out = []
                with tf.name_scope('stage_0'):
                    hg = ut.hourglass(net, self.nLow, self.nFeats, 'hourglass')
                    drop = ut.dropout(hg, self.dropout_rate, is_training,
                                      'dropout')
                    ll = ut.conv_layer_bn(drop, self.nFeats, 1, 1, is_training)
                    out = ut.conv_layer(ll, self.num_points, 1, 1, name='out')
                    out_ = ut.conv_layer(out, self.nFeats, 1, 1, name='out_')
                    sum_ = tf.add(net, out_, name='merge')
                    stack_out.append(out)
                for i in range(1, self.nStacks):
                    with tf.name_scope('stage_' + str(i)):
                        hg = ut.hourglass(sum_, self.nLow, self.nFeats,
                                          'hourglass')
                        drop = ut.dropout(hg, self.dropout_rate, is_training,
                                          'dropout')
                        ll = ut.conv_layer_bn(drop, self.nFeats, 1, 1,
                                              is_training)
                        out = ut.conv_layer(ll,
                                            self.num_points,
                                            1,
                                            1,
                                            name='out')
                        out_ = ut.conv_layer(ll,
                                             self.nFeats,
                                             1,
                                             1,
                                             name='out_')
                        sum_ = tf.add(sum_, out_, name='merge')
                        stack_out.append(out)
            with tf.name_scope('upsampling'):
                net = ut.batch_norm(sum_, is_training)
                net = ut.conv_layer_bn(net, self.nFeats, 3, 1, is_training)
                up1 = ut.deconv_layer(net, self.num_points, 1, 2, name='up_1')
                net = ut.conv_layer_bn(up1, self.nFeats, 3, 1, is_training)
                up2 = ut.deconv_layer(net, self.num_points, 1, 2, name='up_2')
            return tf.stack(stack_out, axis=1, name='stack_out'), up1, up2
示例#8
0
    def set_up(self):

        with tf.variable_scope('conv1'):
            network = conv2d(self.input, [7, 7], 32, scope='conv1_1')
            network = conv2d(network, [3, 3], 32, scope='conv1_2')
            network = max_pool(network, 'pool1')  # downsample

        with tf.variable_scope('conv1'):
            network = conv2d(network, [3, 3], 64, scope='conv1_1')
            network = conv2d(network, [3, 3], 64, scope='conv1_2')
            network = max_pool(network, 'pool2')  # downsample

        with tf.variable_scope('conv1'):
            network = conv2d(network, [3, 3], 128, scope='conv1_1')
            network = conv2d(network, [3, 3], 128, scope='conv1_2')

        with tf.variable_scope('deconv1'):
            network = deconv2d(network, [3, 3], 64,
                               scope='deconv1_1')  # upsample
            network = deconv2d(network, [3, 3],
                               64,
                               stride=1,
                               scope='deconv1_1')

        with tf.variable_scope('deconv2'):
            network = deconv2d(network, [3, 3], 32,
                               scope='deconv1_1')  # upsample
            network = deconv2d(network, [3, 3],
                               32,
                               stride=1,
                               scope='deconv1_1')

        with tf.variable_scope('out_class'):
            logits = conv2d(network, [3, 3],
                            2,
                            bn=False,
                            relu=False,
                            scope='logits')

        self.pred_prob = tf.nn.softmax(logits, name='predictions')[:, :, :, 1]
        self.pred = tf.argmax(logits, 3)
        self.loss = iou_loss(self.pred_prob, self.label)
        self.train_score = iou_loss(tf.cast(self.pred, tf.float32), self.label)
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate, epsilon=1e-4).minimize(self.loss)
 def forward(self, x):
     self._ims = []
     self._cols = []
     new_rows = []
     for row in x:
         im = row.reshape((1,self.im_rect[0],self.im_rect[1]))
         col = utils.im2col(im, self.pool_rect, self.pool_stride)
         new_row = utils.max_pool(col)
         new_rows.append(new_row)
         self._ims.append(im)
         self._cols.append(col)
     return np.array(new_rows)
示例#10
0
 def eval(self, data, dropout_keep_prob=1.0):
     if self.activation_func == 'maxpool':
         pooled_data = utils.max_pool(data,
                                      block_size=self.filter_size,
                                      stride=self.stride,
                                      padding=self.padding)
     else:
         pooled_data = utils.avg_pool(data,
                                      block_size=self.filter_size,
                                      stride=self.stride,
                                      padding=self.padding)
     return pooled_data
示例#11
0
 def forward(self, x):
     self._ims = []
     self._cols = []
     new_rows = []
     for row in x:
         im = row.reshape((1, self.im_rect[0], self.im_rect[1]))
         col = utils.im2col(im, self.pool_rect, self.pool_stride)
         new_row = utils.max_pool(col)
         new_rows.append(new_row)
         self._ims.append(im)
         self._cols.append(col)
     return np.array(new_rows)
示例#12
0
def net_1(input, is_train):
    conv1 = conv(input,
                 filter_h=5,
                 filter_w=5,
                 num_filters=32,
                 stride_y=1,
                 stride_x=1,
                 name='conv1')
    pool1 = max_pool(conv1,
                     filter_h=2,
                     filter_w=2,
                     stride_y=2,
                     stride_x=2,
                     name='pool1')
    conv2 = conv(pool1, 5, 5, 64, 1, 1, 'conv2')
    pool2 = max_pool(conv2, 2, 2, 2, 2, 'pool2')
    flattened = flatten_3d(pool2, name='flattening')
    fc3 = fc(flattened, out_neurons=1000, name='fc3')
    dropout3 = dropout(fc3,
                       keep_prob=prob_close(is_train, 0.5),
                       name='dropout3')
    fc4 = fc(dropout3, out_neurons=10, name='fc4', relu=False)

    return fc4
    def encode(self, idxs, lengths, fr=0):
        if fr and not self.share_vocab:
            word_embs = self.embedding_fr(idxs)
        else:
            word_embs = self.embedding(idxs)

        if self.dropout > 0:
            F.dropout(word_embs, training=self.training)

        if self.pool == "max":
            word_embs = utils.max_pool(word_embs, lengths, self.args.gpu)
        elif self.pool == "mean":
            word_embs = utils.mean_pool(word_embs, lengths, self.args.gpu)

        return word_embs
示例#14
0
 def inference(self, x, grah):
     with tf.variable_scope("conv0"):
         if self.res_name == "resnet50":
             net = utils.relu(
                 utils.Bn(utils.conv2d(x, 64, 7, 7, 2, 2, bias=True),
                          training=self.is_training))
         else:
             net = utils.relu(
                 utils.Bn(utils.conv2d(x, 64, 7, 7, 2, 2, bias=False),
                          training=self.is_training))
     with tf.name_scope("pool1"):
         net = utils.max_pool(net, 3, 3, 2, 2)
     with tf.variable_scope("group0"):
         for i in range(grah[0]):
             net = self.residual(net, 256, name='block' + str(i))
     with tf.variable_scope("group1"):
         for i in range(grah[1]):
             if i == 0:
                 net = self.residual(net, 512, 2, name='block' + str(i))
             else:
                 net = self.residual(net, 512, name='block' + str(i))
     with tf.variable_scope("group2"):
         for i in range(grah[2]):
             if i == 0:
                 net = self.residual(net, 1024, 2, name='block' + str(i))
             else:
                 net = self.residual(net, 1024, name='block' + str(i))
     with tf.variable_scope("group3"):
         for i in range(grah[3]):
             if i == 0:
                 net = self.residual(net, 2048, 2, name='block' + str(i))
             else:
                 net = self.residual(net, 2048, name='block' + str(i))
     with tf.name_scope("pool5"):
         net = utils.global_pool(net)
     with tf.variable_scope("linear"):
         net = tf.nn.dropout(net, keep_prob=self.keep_prob)
         net = utils.linear(net, 1000)
     return net
示例#15
0
 def inference(self, x):
     with tf.variable_scope("conv0"):
         conv1 = utils.relu(utils.Bn(utils.conv2d(x, 64, 7, 7, 2, 2, bias=False), training=self.is_training))
     with tf.name_scope("pool1"):
         pool1 = utils.max_pool(conv1, 3, 3, 2, 2)
     with tf.variable_scope("group0"):
         res2a = self.residual(pool1, 64, branch=True, name='block0')
         res2b = self.residual(res2a, 64, name='block1')
     with tf.variable_scope("group1"):
         res3a = self.residual(res2b, 128, 2, name='block0')
         res3b = self.residual(res3a, 128, name='block1')
     with tf.variable_scope("group2"):
         res4a = self.residual(res3b, 256, 2, name='block0')
         res4b = self.residual(res4a, 256, name='block1')
     with tf.variable_scope("group3"):
         res5a = self.residual(res4b, 512, 2, name='block0')
         res5b = self.residual(res5a, 512, name='block1')
     with tf.name_scope("pool5"):
         pool5 = utils.global_pool(res5b)
     with tf.variable_scope("linear"):
         dropout = tf.nn.dropout(pool5, keep_prob=self.keep_prob)
         out = utils.linear(dropout, 1000)
     return out
示例#16
0
def create_conv_network(x,
                        channels_x,
                        channels_y,
                        layers=3,
                        feature_base=64,
                        filter_size=5,
                        pool_size=2,
                        keep_prob=0.8,
                        create_summary=True):
    """
    :param x: input_tensor, shape should be [None, n, m, channels_x]
    :param channels_x: number of channels in the input image. For Mri, input has 4 channels.
    :param channels_y: number of channels in the output image. For Mri, output has 2 channels.
    :param layers: number of layers in u-net architecture.
    :param feature_base: Neurons in first layer of cnn. Next layers have twice the number of neurons in previous layers.
    :param filter_size: size of convolution filter
    :param pool_size: size of pooling layer
    :create_summary: Creates Tensorboard summary if True
    """

    logging.info(
        "Layers: {layers}, features: {features}, filter size {fill_size}x{fill_size}, pool size {pool_size}x{pool_size},"
        "input channels {in_channels}, output channels {out_channels}".format(
            layers=layers,
            features=feature_base,
            fill_size=filter_size,
            pool_size=pool_size,
            in_channels=channels_x,
            out_channels=channels_y))

    #placeholder for input image
    with tf.name_scope("input_image"):
        n = tf.shape(x)[1]
        m = tf.shape(x)[2]

        x_image = tf.reshape(x, tf.stack([-1, n, m, channels_x]))
        input_node = x_image

    weights = []
    biases = []
    convs = []
    pools = OrderedDict()
    deconv = OrderedDict()
    dw_h_convs = OrderedDict()
    up_h_convs = OrderedDict()

    # down layers
    for layer in range(layers):
        with tf.name_scope("down_conv_layer{}".format(str(layer))):
            features = (2**layer) * feature_base
            std_dev = np.sqrt(2. / (filter_size * filter_size * features))

            if layer == 0:
                w1 = utils.weight_variable(
                    [filter_size, filter_size, channels_x, features], std_dev,
                    "w1")
            else:
                w1 = utils.weight_variable(
                    [filter_size, filter_size, features // 2, features],
                    std_dev, "w1")

            w2 = utils.weight_variable(
                [filter_size, filter_size, features, features], std_dev, "w2")
            b1 = utils.bias_variable([features], "b1")
            b2 = utils.bias_variable([features], "b2")

            conv_1 = utils.conv2d(input_node, w1, b1, keep_prob)
            conv_2 = utils.conv2d(tf.nn.relu(conv_1), w2, b2, keep_prob)
            dw_h_convs[layer] = tf.nn.relu(conv_2)

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv_1, conv_2))

            # do max pooling if not the last layer
            if layer < layers - 1:
                pools[layer] = utils.max_pool(dw_h_convs[layer], pool_size)
                input_node = pools[layer]

    input_node = dw_h_convs[layers - 1]

    #up layers
    for layer in range(layers - 2, -1, -1):
        with tf.name_scope("up_conv_layer{}".format(str(layer))):
            features = (2**(layer + 1)) * feature_base
            std_dev = np.sqrt(2. / (filter_size * filter_size * features))

            wd = utils.weight_variable_devonc(
                [pool_size, pool_size, features // 2, features], std_dev, "wd")
            bd = utils.bias_variable([features // 2], "bd")

            h_deconv = tf.nn.relu(
                utils.deconv2d(input_node, wd, pool_size) + bd)
            h_deconv_concat = tf.concat([dw_h_convs[layer], h_deconv], 3)

            deconv[layer] = h_deconv_concat

            w1 = utils.weight_variable(
                [filter_size, filter_size, features, features // 2], std_dev,
                "w1")
            w2 = utils.weight_variable(
                [filter_size, filter_size, features // 2, features // 2],
                std_dev, "w2")
            b1 = utils.bias_variable([features // 2], "b1")
            b2 = utils.bias_variable([features // 2], "b2")

            conv_1 = utils.conv2d(h_deconv_concat, w1, b1, keep_prob)
            conv_2 = utils.conv2d(tf.nn.relu(conv_1), w2, b2, keep_prob)

            input_node = tf.nn.relu(conv_2)
            up_h_convs[layer] = input_node

            weights.append((w1, w2))
            biases.append((b1, b2))
            convs.append((conv_1, conv_2))

    #Output image
    with tf.name_scope("output_image"):
        weight = utils.weight_variable([1, 1, feature_base, channels_y],
                                       std_dev, "out_weight")
        bias = utils.bias_variable([channels_y], "out_bias")
        output_image = tf.add(
            utils.conv2d(input_node, weight, bias, tf.constant(1.0)), x_image)
        up_h_convs["out"] = output_image

    # Create Summaries
    if create_summary:
        with tf.name_scope("summaries"):
            for i, (c1, c2) in enumerate(convs):
                tf.summary.image("summary_conv_{:02}_01".format(i),
                                 utils.get_image_summary(c1))
                tf.summary.image("summary_conv_{:02}_02".format(i),
                                 utils.get_image_summary(c2))

            for k in pools.keys():
                tf.summary.image("summary_pool_{:02}".format(k),
                                 utils.get_image_summary(pools[k]))

            for k in deconv.keys():
                tf.summary.image("summary_deconv_concat_{:02}".format(k),
                                 utils.get_image_summary(deconv[k]))

            for k in dw_h_convs.keys():
                tf.summary.histogram(
                    "dw_convolution_{:02}/activations".format(k),
                    dw_h_convs[k])

            for k in up_h_convs.keys():
                tf.summary.histogram("up_convolution_{}/activations".format(k),
                                     up_h_convs[k])

    variables = []
    for w1, w2 in weights:
        variables.append(w1)
        variables.append(w2)

    for b1, b2 in biases:
        variables.append(b1)
        variables.append(b2)

    return output_image, variables
示例#17
0
def deeplab(x):
    W1 = utils.weight_variable([3, 3, x.get_shape()[3].value, 128], name='W1')
    b1 = utils.bias_variable([128], 'bias1')
    conv1_1 = utils.conv2d(x, W1, b1)
    conv1_2 = tf.nn.relu(conv1_1)
    conv1_3 = utils.max_pool(conv1_2, 2)

    W2 = utils.weight_variable([3, 3, conv1_3.get_shape()[3].value, 128],
                               name='W2')
    b2 = utils.bias_variable([128], 'bias2')
    conv2_1 = utils.conv2d(conv1_3, W2, b2)
    conv2_2 = tf.nn.relu(conv2_1)
    conv2_3 = utils.max_pool(conv2_2, 2)

    W3 = utils.weight_variable([3, 3, conv2_3.get_shape()[3].value, 128],
                               name='W3')
    b3 = utils.bias_variable([128], 'bias3')
    conv3_1 = utils.atrous_conv2d(conv2_3, W2, b2)
    conv3_2 = tf.nn.relu(conv3_1)

    W4 = utils.weight_variable([3, 3, conv2_3.get_shape()[3].value, 128],
                               name='W4')
    b4 = utils.bias_variable([128], 'bias4')
    conv4_1 = utils.atrous_conv2d(conv3_2, W4, b4)
    conv4_2 = tf.nn.relu(conv4_1)

    # fully connection

    W5 = utils.weight_variable([3, 3, conv4_2.get_shape()[3].value, 128],
                               name='W5')
    b5 = utils.bias_variable([128], 'bias5')
    conv5_1 = utils.conv2d(conv4_2, W5, b5)
    conv5_2 = tf.nn.relu(conv5_1)

    # upscale
    inputs_shape = tf.shape(conv1_3)
    outputs_shape = [
        inputs_shape[0], inputs_shape[1], inputs_shape[2],
        conv1_3.get_shape()[3].value
    ]
    W_t1 = utils.weight_variable(
        [4, 4, conv1_3.get_shape()[3].value, 128], name='W_t1')
    b_t1 = utils.bias_variable([conv1_3.get_shape()[3].value], name='b_t1')
    conv_t1 = utils.conv2d_transpose(conv5_2,
                                     W_t1,
                                     b_t1,
                                     outputs_shape,
                                     stride=2)
    fuse_1 = tf.add(conv_t1, conv1_3, name='fuse_1')

    # inputs_shape = tf.shape(conv1_3)
    # outputs_shape = [inputs_shape[0], inputs_shape[1], inputs_shape[2], conv1_3.get_shape()[3].value]
    # W_t2 = utils.weight_variable([4, 4, conv1_3.get_shape()[3].value,fuse_1.get_shape()[3].value], name='W_t2')
    # b_t2 = utils.bias_variable([conv1_3.get_shape()[3].value], name='b_t2')
    # conv_t2 = utils.conv2d_transpose(fuse_1,W_t2, b_t2, outputs_shape,stride=2)
    # fuse_2=tf.add(conv_t2,conv1_1_3,name='fuse_2')

    inputs_shape = tf.shape(x)
    outputs_shape = [inputs_shape[0], inputs_shape[1], inputs_shape[2], 128]
    W_t3 = utils.weight_variable(
        [4, 4, 128, fuse_1.get_shape()[3].value], name='W_t3')
    b_t3 = utils.bias_variable([128], name='b_t3')
    conv_t3 = utils.conv2d_transpose(fuse_1,
                                     W_t3,
                                     b_t3,
                                     outputs_shape,
                                     stride=2)

    # annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction")
    # return annotation_pred, conv_t3

    W0 = utils.weight_variable([1, 1, conv_t3.get_shape()[3].value, 1],
                               name='W0')
    b0 = utils.bias_variable([1], 'bias0')
    conv0_1 = utils.conv2d(conv_t3, W0, b0)
    return conv0_1
    def inference(self, images):
        print '================== Resnet structure ======================='
        print 'num_residual_units: ', self.num_residual_units
        print 'channels in each block: ', self.filters
        print 'stride in each block: ', self.strides
        print '================== constructing network ===================='

        x = utils.input_data(images, self.data_format)
        x = tf.cast(x, self.float_type)

        print 'shape input: ', x.get_shape()
        with tf.variable_scope('conv1'):
            trainable_ = False if self.fix_blocks > 0 else True
            self.fix_blocks -= 1
            x = utils.conv2d_same(x,
                                  64,
                                  7,
                                  2,
                                  trainable=trainable_,
                                  data_format=self.data_format,
                                  initializer=self.initializer,
                                  float_type=self.float_type)
            x = utils.batch_norm('BatchNorm',
                                 x,
                                 trainable_,
                                 self.data_format,
                                 self.mode,
                                 use_gamma=self.bn_use_gamma,
                                 use_beta=self.bn_use_beta,
                                 bn_epsilon=self.bn_epsilon,
                                 bn_ema=self.bn_ema,
                                 float_type=self.float_type)
            x = utils.relu(x)
            x = utils.max_pool(x, 3, 2, self.data_format)
        print 'shape after pool1: ', x.get_shape()

        for block_index in range(len(self.num_residual_units)):
            for unit_index in range(self.num_residual_units[block_index]):
                with tf.variable_scope('block%d' % (block_index + 1)):
                    with tf.variable_scope('unit_%d' % (unit_index + 1)):
                        stride = 1
                        if unit_index == self.num_residual_units[
                                block_index] - 1:
                            stride = self.strides[block_index]

                        trainable_ = False if self.fix_blocks > 0 else True
                        self.fix_blocks -= 1
                        x = utils.bottleneck_residual(
                            x,
                            self.filters[block_index],
                            stride,
                            data_format=self.data_format,
                            initializer=self.initializer,
                            rate=self.rate[block_index],
                            trainable=trainable_,
                            bn_mode=self.mode,
                            bn_use_gamma=self.bn_use_gamma,
                            bn_use_beta=self.bn_use_beta,
                            bn_epsilon=self.bn_epsilon,
                            bn_ema=self.bn_ema,
                            float_type=self.float_type)
            print 'shape after block %d: ' % (block_index + 1), x.get_shape()

        with tf.variable_scope('logits'):
            x = utils.global_avg_pool(x, self.data_format)
            self.logits = utils.fully_connected(x,
                                                self.num_classes,
                                                trainable=True,
                                                data_format=self.data_format,
                                                initializer=self.initializer,
                                                float_type=self.float_type)
            self.logits = tf.reshape(self.logits, (-1, self.num_classes))
            self.predictions = tf.nn.softmax(self.logits)

        print '================== network constructed ===================='
        return self.logits
示例#19
0
    pickle.dump(images, open(tmp_image_file, "wb"))

### model
sess = tf.Session()

# input layer
X = tf.placeholder(tf.float32, shape=[None, max_shape[0], max_shape[1], 1])
y = tf.placeholder(tf.float32, shape=[None, 1])
keep_prob = tf.placeholder(tf.float32)

# first convolution
W_conv1 = weight_variable([10, 10, 1, n_convo_layer1])
b_conv1 = bias_variable([n_convo_layer1])

h_conv1 = tf.nn.sigmoid(conv2d(X, W_conv1) + b_conv1)
h_pool1 = max_pool(h_conv1, ksize=[1, 2, 2, 1])

# second convolution
W_conv2 = weight_variable([10, 10, n_convo_layer1, n_convo_layer2])
b_conv2 = bias_variable([n_convo_layer2])

h_conv2 = tf.nn.sigmoid(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool(h_conv2, ksize=[1, 2, 2, 1])

# first fully connected layer
W_fc1 = weight_variable([inner_layer_size * inner_layer_size * n_convo_layer2, percep_size])
b_fc1 = bias_variable([percep_size])

h_pool2_flat = tf.reshape(h_pool2, [-1, inner_layer_size * inner_layer_size * n_convo_layer2])
h_fc1 = tf.nn.sigmoid(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
示例#20
0
    def __init__(self,
                 inp,
                 dropout=False,
                 drop_prob=0.25,
                 histogram=True,
                 num_class=11,
                 def_cp_name='mnist_seg',
                 def_cp_path='checkpoints/mnist_seg_mp',
                 def_log_name='mnist_seg_mp',
                 def_log_path='./log/',
                 version=1):
        """
    inp: Input placeholder.
    shape: Tensorflow tensor shape used in the input placeholder.
           It must be a list object.
    dropout: Flag used to indicate if dropout will be used
    drop_prob: Percentage of neurons to be turned off
    histogram: Indicates if information for tensorboard should be annexed.
    """
        self.def_cp_name = def_cp_name
        self.def_cp_path = def_cp_path + 'v' + str(version)
        if self.def_cp_path[-1] != '/':
            self.def_cp_path += '/'
        self.def_log_name = def_log_name + 'v' + str(version)
        self.def_log_path = def_log_path
        if self.def_log_path[-1] != '/':
            self.def_log_path += '/'
        self.reg = []  # Contains l2 regularizaton for weights
        self.dropout = dropout
        self.drop_prob = drop_prob
        self.x = inp
        # shape vs get_shape https://stackoverflow.com/a/43290897/5969548
        self.im_h = int(self.x.get_shape()[1])
        self.im_w = int(self.x.get_shape()[2])
        self.im_c = int(self.x.get_shape()[3])
        self.num_class = num_class
        ##### Network Specs
        ks1 = 3
        num_k1 = 8
        ks2 = 3
        num_k2 = 8

        ks3 = 3
        num_k3 = 16
        ks4 = 3
        num_k4 = 16

        ks5 = 3
        num_k5 = 32
        ks6 = 3
        num_k6 = 32

        ##### Core model
        c1_shape = [ks1, ks1, self.im_c, num_k1]
        self.conv1, reg = ut.conv2(inp=self.x,
                                   shape=c1_shape,
                                   name='conv1',
                                   dropout=self.dropout,
                                   drop_prob=self.drop_prob,
                                   histogram=histogram,
                                   l2=True)
        self.reg.append(reg)

        c2_shape = [ks2, ks2, num_k1, num_k2]
        self.conv2, reg = ut.conv2(inp=self.conv1,
                                   shape=c2_shape,
                                   name='conv2',
                                   dropout=self.dropout,
                                   drop_prob=self.drop_prob,
                                   histogram=histogram,
                                   l2=True)
        self.reg.append(reg)

        self.pool1, self.ind1 = ut.max_pool(self.conv2,
                                            args=True,
                                            name='maxpool1')

        c3_shape = [ks3, ks3, num_k2, num_k3]
        self.conv3, reg = ut.conv2(inp=self.pool1,
                                   shape=c3_shape,
                                   name='conv3',
                                   dropout=self.dropout,
                                   drop_prob=self.drop_prob,
                                   histogram=histogram,
                                   l2=True)
        self.reg.append(reg)

        c4_shape = [ks4, ks4, num_k3, num_k4]
        self.conv4, reg = ut.conv2(inp=self.conv3,
                                   shape=c4_shape,
                                   name='conv4',
                                   dropout=self.dropout,
                                   drop_prob=self.drop_prob,
                                   histogram=histogram,
                                   l2=True)
        self.reg.append(reg)

        self.pool2, self.ind2 = ut.max_pool(self.conv4,
                                            args=True,
                                            name='maxpool2')

        c5_shape = [ks5, ks5, num_k4, num_k5]
        self.conv5, reg = ut.conv2(inp=self.pool2,
                                   shape=c5_shape,
                                   name='conv5',
                                   dropout=self.dropout,
                                   drop_prob=self.drop_prob,
                                   histogram=histogram,
                                   l2=True)
        self.reg.append(reg)

        c6_shape = [ks6, ks6, num_k5, num_k6]
        self.conv6, reg = ut.conv2(inp=self.conv5,
                                   shape=c6_shape,
                                   name='conv6',
                                   dropout=self.dropout,
                                   drop_prob=self.drop_prob,
                                   histogram=histogram,
                                   l2=True)
        self.reg.append(reg)

        d1_shape = [ks6, ks6, num_k6, num_k6]
        self.deconv1, reg = ut.deconv2(inp=self.conv6,
                                       shape=d1_shape,
                                       relu=True,
                                       name='deconv1',
                                       dropout=self.dropout,
                                       drop_prob=self.drop_prob,
                                       histogram=histogram,
                                       l2=True)
        self.reg.append(reg)

        d2_shape = [ks5, ks5, num_k4, num_k5]
        self.deconv2, reg = ut.deconv2(inp=self.deconv1,
                                       shape=d2_shape,
                                       relu=True,
                                       name='deconv2',
                                       dropout=self.dropout,
                                       drop_prob=self.drop_prob,
                                       histogram=histogram,
                                       l2=True)
        self.reg.append(reg)

        self.unpool1 = ut.unpool_with_argmax(
            self.deconv2,
            self.ind2,
            input_shape=[self.x.get_shape()[0].value, 7, 7, num_k4],
            name='unpool1')

        #self.sum1 = self.unpool1 + self.conv4
        d3_shape = [ks4, ks4, num_k3, num_k4]
        self.deconv3, reg = ut.deconv2(inp=self.unpool1,
                                       shape=d3_shape,
                                       relu=True,
                                       name='deconv3',
                                       dropout=self.dropout,
                                       drop_prob=self.drop_prob,
                                       histogram=histogram,
                                       l2=True)
        self.reg.append(reg)

        self.sum1 = self.deconv3 + self.conv4
        d4_shape = [ks3, ks3, num_k2, num_k3]
        self.deconv4, reg = ut.deconv2(inp=self.deconv3,
                                       shape=d4_shape,
                                       relu=True,
                                       name='deconv4',
                                       dropout=self.dropout,
                                       drop_prob=self.drop_prob,
                                       histogram=histogram,
                                       l2=True)
        self.reg.append(reg)

        self.unpool2 = ut.unpool_with_argmax(
            self.deconv4,
            self.ind1,
            input_shape=[self.x.get_shape()[0].value, 14, 14, num_k2],
            name='unpool2')

        #self.sum2 = self.unpool2 + self.conv2
        d5_shape = [ks2, ks2, num_k2, num_k2]
        self.deconv5, reg = ut.deconv2(inp=self.unpool2,
                                       shape=d5_shape,
                                       relu=True,
                                       name='deconv5',
                                       dropout=self.dropout,
                                       drop_prob=self.drop_prob,
                                       histogram=histogram,
                                       l2=True)
        self.reg.append(reg)

        self.sum2 = self.deconv5 + self.conv2
        d6_shape = [ks1, ks1, self.num_class, num_k2]
        self.deconv6, reg = ut.deconv2(inp=self.sum2,
                                       shape=d6_shape,
                                       relu=False,
                                       name='deconv6',
                                       histogram=histogram,
                                       l2=True)
        self.reg.append(reg)

        self.pre_logits = self.deconv6

        msg = '\n\t{0} \n\t{1} \n\t{2} \n\t{3} \n\t{4} \n\t{5} '
        msg += '\n\t{6} \n\t{7}'
        msg = msg.format(self.conv1, self.conv2, self.pool1, self.conv3,
                         self.conv4, self.pool2, self.conv5, self.conv6)
        msg += '\n\t{0} \n\t{1} \n\t{2} \n\t{3} \n\t{4} \n\t{5} '
        msg += '\n\t{6} \n\t{7}'
        msg = msg.format(self.deconv1, self.deconv2, self.unpool1,
                         self.deconv3, self.deconv4, self.unpool2,
                         self.deconv5, self.deconv6)
        print(msg)
示例#21
0
def main(argv=None):
    if tf.gfile.Exists(FLAGS.train_dir):
        tf.gfile.DeleteRecursively(FLAGS.train_dir)
    tf.gfile.MakeDirs(FLAGS.train_dir)

    global_step = tf.contrib.framework.get_or_create_global_step()
    save_path = os.path.join(FLAGS.train_dir, 'model_ckpt')

    #获取(image, label)batch pair
    image_batch, label_batch = inputs(data_type='train')

    #损失函数sparse_softmax_cross_entropy_with_logits要求rank_of_labels = rank_of_images - 1
    #对label_batch作扁平化处理
    label_batch = tf.reshape(label_batch, [50])

    #扩展image维度,从[batch, row, col]转换为[batch, row, col, depth=1]
    expand_image_batch = tf.expand_dims(image_batch, -1)

    input_placeholder = tf.placeholder_with_default(expand_image_batch,
                                                    shape=[None, 28, 28, 1],
                                                    name='input')

    # 构建模型
    # 第一个卷积层
    with tf.variable_scope('conv1') as scope:
        kernal = weight_variable('weights', shape=[5, 5, 1, 32])
        biases = bias_variable('biases', shape=[32])
        pre_activation = tf.nn.bias_add(conv2d(input_placeholder, kernal),
                                        biases)
        conv1 = tf.nn.relu(pre_activation, name=scope.name)

    # 第一个池化层
    pool1 = max_pool(conv1)

    # 第二个卷积层
    with tf.variable_scope('conv2') as scope:
        kernal = weight_variable('weights', shape=[5, 5, 32, 64])
        biases = bias_variable('biases', shape=[64])
        pre_activation = tf.nn.bias_add(conv2d(pool1, kernal), biases)
        conv2 = tf.nn.relu(pre_activation, name=scope.name)

    # 第二个池化层
    # 7*7*64
    pool2 = max_pool(conv2)

    # 全连接层
    with tf.variable_scope('fc1') as scope:
        weight_fc1 = weight_variable('weights', shape=[7 * 7 * 64, 1024])
        biases = bias_variable('biases', shape=[1024])
        pool2_flat = tf.reshape(pool2, [-1, 7 * 7 * 64])
        fc1 = tf.nn.relu((tf.matmul(pool2_flat, weight_fc1) + biases),
                         name=scope.name)
        print('Tensor fc1/relu: ', fc1.name)

    keep_prob = tf.placeholder(tf.float32, name='keep_prob')
    fc1_drop = tf.nn.dropout(fc1, keep_prob, name='fc1_drop')
    print('>>Tensor dropout: ', fc1_drop.name)

    # 输出层
    with tf.variable_scope('softmax_linear') as scope:
        weight_fc2 = weight_variable('weight', shape=[1024, 10])
        biases = bias_variable('biases', shape=[10])

        softmax_output = tf.add(tf.matmul(fc1_drop, weight_fc2),
                                biases,
                                name=scope.name)
        print('>>Tensor softmax_linear/softmax_output: ', softmax_output.name)

    loss = softmax_loss(logits=softmax_output, labels=label_batch)
    print('>>Tensor loss: ', loss.name)

    accuracy = train_accuracy(softmax_output, label_batch)
    print('>>Tensor accuracy: ', accuracy.name)

    train_op = train(loss, global_step)
    print('>>Tensor train_op: ', train_op.name)

    #初始化所有参数
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)
        coord = tf.train.Coordinator()
        try:
            threads = []

            for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
                threads.extend(
                    qr.create_threads(sess, coord, daemon=True, start=True))

            saver = tf.train.Saver()
            step = 1

            while step <= 20000 and not coord.should_stop():
                if step % 100 == 0:
                    # 每隔100步打印一次accuracy
                    runtime_accuracy = sess.run(accuracy,
                                                feed_dict={keep_prob: 1.0})
                    print(">>step %d, training accuracy %g" %
                          (step, runtime_accuracy))

                    # 每隔1000步保存一次模型
                    if step % 1000 == 0:
                        saver.save(sess, save_path, global_step=step)

                # 训练模型
                sess.run(train_op, feed_dict={keep_prob: 0.5})
                # 步数更新
                step += 1
        except Exception as e:
            coord.request_stop(e)
        coord.request_stop()
        coord.join(threads, stop_grace_period_secs=10)
示例#22
0
    def _add_layers(self, x):
        def residual(x, in_channel, out_channel, is_training, norm):
            """residual unit with 2 layers
            convolution:
                width filter: 1
                height filter: 3
            """
            orig_x = x
            with tf.variable_scope('conv1'):
                conv1 = utils.conv(x, [1, 3, in_channel, out_channel],
                                   [out_channel],
                                   padding='SAME')
                if norm:
                    conv1 = utils.batch_norm(conv1, is_training)
                relu1 = utils.activation(conv1)
            with tf.variable_scope('conv2'):
                conv2 = utils.conv(relu1, [1, 3, out_channel, out_channel],
                                   [out_channel],
                                   padding='SAME')
                if norm:
                    conv2 = utils.batch_norm(conv2, is_training)
            with tf.variable_scope('add'):
                if in_channel != out_channel:
                    orig_x = utils.conv(x, [1, 1, in_channel, out_channel],
                                        [out_channel],
                                        padding='SAME')

            return utils.activation(conv2 + orig_x)

        x_shape = x.get_shape()
        with tf.variable_scope('residual1'):
            r1 = residual(x, x_shape[-1], 32, self.is_training, self.norm)
            tf.summary.histogram('res_output1', r1)
        with tf.variable_scope('residual2'):
            r2 = residual(r1,
                          r1.get_shape()[-1], 32, self.is_training, self.norm)
            tf.summary.histogram('res_output2', r2)

        with tf.variable_scope('pool0'):
            h_pool0 = utils.max_pool(r2, 1, 2, 1, 2, padding='SAME')

        with tf.variable_scope('residual3'):
            r3 = residual(h_pool0,
                          h_pool0.get_shape()[-1], 64, self.is_training,
                          self.norm)
            tf.summary.histogram('res_output3', r3)
        with tf.variable_scope('residual4'):
            r4 = residual(r3,
                          r3.get_shape()[-1], 64, self.is_training, self.norm)
            tf.summary.histogram('res_output4', r4)

        with tf.variable_scope('pool1'):
            h_pool1 = utils.max_pool(r4, 1, 5, 1, 5, padding='SAME')

        with tf.variable_scope('full_conn_1'):
            flat_size = 5 * 64
            h_pool2_flat = tf.reshape(h_pool1, [-1, flat_size])
            h_fc1 = utils.full_conn(h_pool2_flat, [flat_size, 1024], [1024])
            h_fc1 = utils.activation(h_fc1)

        with tf.variable_scope('full_conn_2'):
            h_fc2 = utils.full_conn(h_fc1, [1024, 128], [128])
            h_fc2 = utils.activation(h_fc2)
        return h_fc2
示例#23
0
def fcnLayerNew(x, keep_probability):
    W1 = utils.weight_variable([3, 3, x.get_shape()[3].value, 128], name='W1')
    b1 = utils.bias_variable([128], 'bias1')
    conv1_1 = utils.conv2d(x, W1, b1, 'conv_1')
    conv1_2 = tf.nn.relu(conv1_1)

    W2 = utils.weight_variable([3, 3, conv1_2.get_shape()[3].value, 128],
                               name='W2')
    b2 = utils.bias_variable([128], 'bias2')
    conv2_1 = utils.conv2d(conv1_2, W2, b2, 'conv_2')
    conv2_2 = tf.nn.relu(conv2_1, 'relu_2')

    W3 = utils.weight_variable([3, 3, conv2_2.get_shape()[3].value, 64],
                               name='W3')
    b3 = utils.bias_variable([64], 'bias3')
    conv3_1 = utils.conv2d(conv2_2, W3, b3, 'conv_3')
    conv3_2 = tf.nn.relu(conv3_1, 'relu_3')
    conv3_3 = utils.max_pool(conv3_2, 2, name='conv_p3')

    W4 = utils.weight_variable([3, 3, conv3_3.get_shape()[3].value, 64],
                               name='W4')
    b4 = utils.bias_variable([64], 'bias4')
    conv4_1 = utils.conv2d(conv3_3, W4, b4, 'conv_4')
    conv4_2 = tf.nn.relu(conv4_1, 'relu_4')
    conv4_3 = utils.max_pool(conv4_2, 2, name='conv_p4')

    W5 = utils.weight_variable([3, 3, conv4_3.get_shape()[3].value, 64],
                               name='W5')
    b5 = utils.bias_variable([64], 'bias5')
    conv5_1 = utils.conv2d(conv4_3, W5, b5, 'conv_5')
    conv5_2 = tf.nn.relu(conv5_1, 'relu_5')
    conv5_3 = utils.max_pool(conv5_2, 2, name='conv_p5')

    # W6 = utils.weight_variable([1, 1, conv5_3.get_shape()[3].value, 1], name='W6')
    # b6 = utils.bias_variable([1], 'bias6')
    # conv6_1 = utils.conv2d(conv5_3, W6, b6, 'conv_6')
    # conv6_2 = tf.nn.relu(conv6_1, 'relu_6')

    # upscale
    deconv_shape1 = conv4_3.get_shape()
    W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, 64],
                                 name='W_t1')
    b_t1 = utils.bias_variable([deconv_shape1[3].value], name='b_t1')
    conv_t1 = utils.conv2d_transpose(conv5_3,
                                     W_t1,
                                     b_t1,
                                     tf.shape(conv4_3),
                                     stride=2)
    # fuse_1=tf.add(conv_t1,conv4_3,name='fuse_1')

    deconv_shape2 = conv3_3.get_shape()
    W_t2 = utils.weight_variable(
        [4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name='W_t2')
    b_t2 = utils.bias_variable([deconv_shape2[3].value], name='b_t2')
    conv_t2 = utils.conv2d_transpose(conv_t1,
                                     W_t2,
                                     b_t2,
                                     tf.shape(conv3_3),
                                     stride=2)
    # fuse_2 = tf.add(conv_t2, conv3_3, name='fuse_2')

    # deconv_shape1=conv5_3.get_shape()
    shape = tf.shape(x)
    deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], 1])
    W_t3 = utils.weight_variable([4, 4, 1, deconv_shape1[3].value],
                                 name='W_t3')
    b_t3 = utils.bias_variable([1], name='b_t3')
    conv_t3 = utils.conv2d_transpose(conv_t2,
                                     W_t3,
                                     b_t3,
                                     deconv_shape3,
                                     stride=2,
                                     name='out')

    return conv_t3
示例#24
0
def alexnet(X, trainlayers=[], weights=None):

    # current = X
    # layers = ['conv1', 'conv2', 'conv3', 'conv4', 'conv5']
    # for layer in layers:
    #     if 'conv' in layer:
    #         pass

    # 1st Layer: Conv (w ReLu) -> Lrn -> Poolwith 1 groups

    trainable = False
    if 'conv1' in trainlayers:
        # and, or, weights==None and init_wb=weights['conv1'] or None
        # conv1 = conv(X, 11, 11, 96, 4, 4, padding='VALID', name='conv1', trainable=True, init_wb=weights)
        trainable = True
    conv1 = conv(X,
                 11,
                 11,
                 96,
                 4,
                 4,
                 padding='VALID',
                 name='conv1',
                 init_wb=weights,
                 trainable=trainable)
    norm1 = lrn(conv1, 2, 2e-05, 0.75, name='norm1')
    pool1 = max_pool(norm1, 3, 3, 2, 2, padding='VALID', name='pool1')

    # 2nd Layer: Conv (w ReLu) -> Lrn -> Poolwith 2 groups
    trainable = False
    if 'conv2' in trainlayers:
        trainable = True
    conv2 = conv(pool1,
                 5,
                 5,
                 256,
                 1,
                 1,
                 groups=2,
                 name='conv2',
                 init_wb=weights,
                 trainable=trainable)
    norm2 = lrn(conv2, 2, 2e-05, 0.75, name='norm2')
    pool2 = max_pool(norm2, 3, 3, 2, 2, padding='VALID', name='pool2')

    # 3rd Layer: Conv (w ReLu)
    trainable = False
    if 'conv3' in trainlayers:
        trainable = True
    conv3 = conv(pool2,
                 3,
                 3,
                 384,
                 1,
                 1,
                 name='conv3',
                 trainable=trainable,
                 init_wb=weights)

    # 4th Layer: Conv (w ReLu) splitted into two groups
    trainable = False
    if 'conv4' in trainlayers:
        trainable = True
    conv4 = conv(conv3,
                 3,
                 3,
                 384,
                 1,
                 1,
                 groups=2,
                 name='conv4',
                 trainable=trainable,
                 init_wb=weights)

    # 5th Layer: Conv (w ReLu) -> Pool splitted into two groups
    trainable = False
    if 'conv5' in trainlayers:
        trainable = True
    conv5 = conv(conv4,
                 3,
                 3,
                 256,
                 1,
                 1,
                 groups=2,
                 name='conv5',
                 trainable=trainable,
                 init_wb=weights)

    return conv5