Пример #1
0
def reconstruct_input_roicnn(images, layer_num, filter_num, max_act_pl, max_ind_pl, layer, feat=[2, 4]):
    switches = []
    pool_tensor_shape = []
    conv_tensor_input_shape = []

    for l in range(0, layer_num + 1):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
            conv_tensor_input_shape.append(images.get_shape().as_list())
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            conv_tensor_input_shape.append(pool_tensor.get_shape().as_list())

        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=2, kwidth=2)
        pool_tensor_shape.append(pool_tensor.get_shape().as_list())
        switches.append(switches_tmp)

        if l == layer_num:
            with tf.variable_scope('toplayer' + str(l)) as scope:
		# Set top layer activations based on maximum activations
                max_act_feat = tf.Variable(tf.zeros([pool_tensor_shape[l][3]*pool_tensor_shape[l][1]*pool_tensor_shape[l][2]]), name='max_act_feat')
                max_act_feat = tf.assign(max_act_feat, tf.zeros([pool_tensor_shape[l][3]*pool_tensor_shape[l][1]*pool_tensor_shape[l][2]]))
                max_features_tmp = tf.scatter_update(max_act_feat, max_ind_pl + filter_num * pool_tensor_shape[l][1] * pool_tensor_shape[l][2], max_act_pl)
                max_features_tmp2 = tf.reshape(max_features_tmp, [pool_tensor_shape[l][3], pool_tensor_shape[l][1],pool_tensor_shape[l][2]])
                max_features_tmp3 = tf.transpose(max_features_tmp2, [1, 2, 0])
                max_feature = tf.expand_dims(max_features_tmp3, 0)


    deconv_tensor = max_feature

    # Deconvolution network
    for l in range(layer_num, -1, -1):
        unpool_tensor = rsvp_quick_deconv.deconv_unpooling_n_filter(deconv_tensor , switches[l], 'pool' + str(l), kheight=2, kwidth=2)

        deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
                        (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

        # if l == 0:
        #     deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter(unpool_tensor, conv_tensor_input_shape[l], 'conv0')
        # else:
        #     deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
        #         (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

    returnTensors = []
    returnTensors.extend([max_act_feat])
    returnTensors.extend([max_feature])
    returnTensors.extend([deconv_tensor])

    returnTensors.extend(switches)

    return returnTensors
Пример #2
0
def reconstruct_input_lasso_roicnn(images, max_feature, layer_num, filter_num, max_act_pl, max_ind_pl, layer, feat=[2, 4]):
    switches = []
    pool_tensor_shape = []
    conv_tensor_input_shape = []

    for l in range(0, layer_num + 1):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_local_st5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
            conv_tensor_input_shape.append(images.get_shape().as_list())
        else:
            conv_tensor = rsvp_quick_deconv.deconv_local_st5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            conv_tensor_input_shape.append(pool_tensor.get_shape().as_list())

        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
        pool_tensor_shape.append(pool_tensor.get_shape().as_list())
        switches.append(switches_tmp)

    deconv_tensor = max_feature

    for l in range(layer_num, -1, -1):
        unpool_tensor = rsvp_quick_deconv.deconv_unpooling_n_filter(deconv_tensor , switches[l], 'pool' + str(l), kheight=poolh, kwidth=poolw)

        if l == 0:
            deconv_tensor = rsvp_quick_deconv.deconv_local_st5_unfilter(unpool_tensor, conv_tensor_input_shape[l], 'conv0')
        else:
            deconv_tensor = rsvp_quick_deconv.deconv_local_st5_unfilter \
                (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

    returnTensors = []
    #returnTensors.extend([max_act_val]   )
    #returnTensors.extend([max_ind_val])
    returnTensors.extend([deconv_tensor])
    returnTensors.extend([pool_tensor])


    returnTensors.extend(switches)

    return returnTensors
Пример #3
0
def reconstruct_input_lasso_roicnn(images, max_feature, layer_num, filter_num, max_act_pl, max_ind_pl, layer, feat=[2, 4]):
    switches = []
    pool_tensor_shape = []
    conv_tensor_input_shape = []

    for l in range(0, layer_num + 1):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_local_st5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
            conv_tensor_input_shape.append(images.get_shape().as_list())
        else:
            conv_tensor = rsvp_quick_deconv.deconv_local_st5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            conv_tensor_input_shape.append(pool_tensor.get_shape().as_list())

        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
        pool_tensor_shape.append(pool_tensor.get_shape().as_list())
        switches.append(switches_tmp)

    deconv_tensor = max_feature

    for l in range(layer_num, -1, -1):
        unpool_tensor = rsvp_quick_deconv.deconv_unpooling_n_filter(deconv_tensor , switches[l], 'pool' + str(l), kheight=poolh, kwidth=poolw)

        if l == 0:
            deconv_tensor = rsvp_quick_deconv.deconv_local_st5_unfilter(unpool_tensor, conv_tensor_input_shape[l], 'conv0')
        else:
            deconv_tensor = rsvp_quick_deconv.deconv_local_st5_unfilter \
                (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

    returnTensors = []
    #returnTensors.extend([max_act_val]   )
    #returnTensors.extend([max_ind_val])
    returnTensors.extend([deconv_tensor])
    returnTensors.extend([pool_tensor])


    returnTensors.extend(switches)

    return returnTensors
Пример #4
0
def reconstruct_input_lasso_cvcnn(images, max_feature, keep_prob, layer_num, filter_num, max_act_pl, max_ind_pl, layer, feat=[2, 4]):
    switches = []
    pool_tensor_shape = []
    conv_tensor_input_shape = []
    pool_tensors = []
    deconv_tensors = []
    unpool_tensors = []
    unpool_resize_tensors  = []
    conv_tensors = []

    for l in range(0, layer_num + 1):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
            conv_tensor_input_shape.append(images.get_shape().as_list())
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            conv_tensor_input_shape.append(pool_tensor.get_shape().as_list())

        conv_tensors.append(conv_tensor)
        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
        pool_tensor_shape.append(pool_tensor.get_shape().as_list())
        pool_tensors.append(pool_tensor)
        switches.append(switches_tmp)

    # deconv_tensor = max_feature

    if (layer_num == 1):
        logits, layer1, layer2 = rsvp_quick_deconv.deconv_fully_connected_1layer(pool_tensor, keep_prob)

    deconv_tensor = max_feature


    for l in range(layer_num, -1, -1):
        unpool_tensor, unpool_resize_tensor = rsvp_quick_deconv.deconv_unpooling_n_filter(deconv_tensor , switches[l], 'pool' + str(l), kheight=poolh, kwidth=poolw)

        unpool_resize_tensors.append(unpool_resize_tensor)
        unpool_tensors.append(unpool_tensor)

        if l == 0:
            deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter(unpool_tensor, conv_tensor_input_shape[l], 'conv0')
        else:
            deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
                (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

        deconv_tensors.append(deconv_tensor)

    returnTensors = []
    #returnTensors.extend([max_act_val]   )
    #returnTensors.extend([max_ind_val])
    returnTensors.extend(deconv_tensors)
    returnTensors.extend(pool_tensors)
    returnTensors.extend(switches)
    returnTensors.extend(unpool_tensors)
    returnTensors.extend(unpool_resize_tensors)
    returnTensors.extend(conv_tensors)

    if (layer_num == 1):
        returnTensors.extend([logits, layer1, layer2])

    return returnTensors
Пример #5
0
def reconstruct_input_lasso_cvcnn(images, max_feature, keep_prob, layer_num, filter_num, max_act_pl, max_ind_pl, layer, feat=[2, 4]):
    switches = []
    pool_tensor_shape = []
    conv_tensor_input_shape = []
    pool_tensors = []
    deconv_tensors = []
    unpool_tensors = []
    unpool_resize_tensors  = []
    conv_tensors = []

    for l in range(0, layer_num + 1):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
            conv_tensor_input_shape.append(images.get_shape().as_list())
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            conv_tensor_input_shape.append(pool_tensor.get_shape().as_list())

        conv_tensors.append(conv_tensor)
        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
        pool_tensor_shape.append(pool_tensor.get_shape().as_list())
        pool_tensors.append(pool_tensor)
        switches.append(switches_tmp)

    # deconv_tensor = max_feature

    if (layer_num == 1):
        logits, layer1, layer2 = rsvp_quick_deconv.deconv_fully_connected_1layer(pool_tensor, keep_prob)

    deconv_tensor = max_feature


    for l in range(layer_num, -1, -1):
        unpool_tensor, unpool_resize_tensor = rsvp_quick_deconv.deconv_unpooling_n_filter(deconv_tensor , switches[l], 'pool' + str(l), kheight=poolh, kwidth=poolw)

        unpool_resize_tensors.append(unpool_resize_tensor)
        unpool_tensors.append(unpool_tensor)

        if l == 0:
            deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter(unpool_tensor, conv_tensor_input_shape[l], 'conv0')
        else:
            deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
                (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

        deconv_tensors.append(deconv_tensor)

    returnTensors = []
    #returnTensors.extend([max_act_val]   )
    #returnTensors.extend([max_ind_val])
    returnTensors.extend(deconv_tensors)
    returnTensors.extend(pool_tensors)
    returnTensors.extend(switches)
    returnTensors.extend(unpool_tensors)
    returnTensors.extend(unpool_resize_tensors)
    returnTensors.extend(conv_tensors)

    if (layer_num == 1):
        returnTensors.extend([logits, layer1, layer2])

    return returnTensors
Пример #6
0
def reconstruct_input_roicnn(images,
                             layer_num,
                             filter_num,
                             max_act_pl,
                             max_ind_pl,
                             layer,
                             feat=[2, 4]):
    switches = []
    pool_tensor_shape = []
    conv_tensor_input_shape = []

    for l in range(0, layer_num + 1):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images,
                                                              'conv0',
                                                              in_feat=1,
                                                              out_feat=feat[0])
            conv_tensor_input_shape.append(images.get_shape().as_list())
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            conv_tensor_input_shape.append(pool_tensor.get_shape().as_list())

        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(
            conv_tensor, 'pool' + str(l), kheight=2, kwidth=2)
        pool_tensor_shape.append(pool_tensor.get_shape().as_list())
        switches.append(switches_tmp)

        if l == layer_num:
            with tf.variable_scope('toplayer' + str(l)) as scope:
                # Set top layer activations based on maximum activations
                max_act_feat = tf.Variable(tf.zeros([
                    pool_tensor_shape[l][3] * pool_tensor_shape[l][1] *
                    pool_tensor_shape[l][2]
                ]),
                                           name='max_act_feat')
                max_act_feat = tf.assign(
                    max_act_feat,
                    tf.zeros([
                        pool_tensor_shape[l][3] * pool_tensor_shape[l][1] *
                        pool_tensor_shape[l][2]
                    ]))
                max_features_tmp = tf.scatter_update(
                    max_act_feat, max_ind_pl + filter_num *
                    pool_tensor_shape[l][1] * pool_tensor_shape[l][2],
                    max_act_pl)
                max_features_tmp2 = tf.reshape(max_features_tmp, [
                    pool_tensor_shape[l][3], pool_tensor_shape[l][1],
                    pool_tensor_shape[l][2]
                ])
                max_features_tmp3 = tf.transpose(max_features_tmp2, [1, 2, 0])
                max_feature = tf.expand_dims(max_features_tmp3, 0)

    deconv_tensor = max_feature

    # Deconvolution network
    for l in range(layer_num, -1, -1):
        unpool_tensor = rsvp_quick_deconv.deconv_unpooling_n_filter(
            deconv_tensor, switches[l], 'pool' + str(l), kheight=2, kwidth=2)

        deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
                        (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

        # if l == 0:
        #     deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter(unpool_tensor, conv_tensor_input_shape[l], 'conv0')
        # else:
        #     deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
        #         (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

    returnTensors = []
    returnTensors.extend([max_act_feat])
    returnTensors.extend([max_feature])
    returnTensors.extend([deconv_tensor])

    returnTensors.extend(switches)

    return returnTensors