Example #1
0
def compute_last_conv_layer_cvcnn(images, cur_image_num, keep_prob, layer=2, feat=[2, 4]):
    for l in range(0, layer):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])


        with tf.variable_scope('layer' + str(l)) as scope:
            pool_tensor, _ = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
            pool_tensor_shape.append(pool_tensor.get_shape().as_list())

    logits, layer1, layer2 = rsvp_quick_deconv.deconv_fully_connected_1layer(pool_tensor, keep_prob)

    returnTensors = []
    returnTensors.extend([pool_tensor])
    returnTensors.extend([layer2])
    returnTensors.extend([logits])

    return returnTensors
def compute_last_conv_layer_cvcnn(images, cur_image_num, keep_prob, layer=2, feat=[2, 4]):
    for l in range(0, layer):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])


        with tf.variable_scope('layer' + str(l)) as scope:
            pool_tensor, _ = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
            pool_tensor_shape.append(pool_tensor.get_shape().as_list())

    logits, layer1, layer2 = rsvp_quick_deconv.deconv_fully_connected_1layer(pool_tensor, keep_prob)

    returnTensors = []
    returnTensors.extend([pool_tensor])
    returnTensors.extend([layer2])
    returnTensors.extend([logits])

    return returnTensors
Example #3
0
def reconstruct_input_lasso_cvcnn(images, max_feature, keep_prob, layer_num, filter_num, max_act_pl, max_ind_pl, layer, feat=[2, 4]):
    switches = []
    pool_tensor_shape = []
    conv_tensor_input_shape = []
    pool_tensors = []
    deconv_tensors = []
    unpool_tensors = []
    unpool_resize_tensors  = []
    conv_tensors = []

    for l in range(0, layer_num + 1):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
            conv_tensor_input_shape.append(images.get_shape().as_list())
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            conv_tensor_input_shape.append(pool_tensor.get_shape().as_list())

        conv_tensors.append(conv_tensor)
        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
        pool_tensor_shape.append(pool_tensor.get_shape().as_list())
        pool_tensors.append(pool_tensor)
        switches.append(switches_tmp)

    # deconv_tensor = max_feature

    if (layer_num == 1):
        logits, layer1, layer2 = rsvp_quick_deconv.deconv_fully_connected_1layer(pool_tensor, keep_prob)

    deconv_tensor = max_feature


    for l in range(layer_num, -1, -1):
        unpool_tensor, unpool_resize_tensor = rsvp_quick_deconv.deconv_unpooling_n_filter(deconv_tensor , switches[l], 'pool' + str(l), kheight=poolh, kwidth=poolw)

        unpool_resize_tensors.append(unpool_resize_tensor)
        unpool_tensors.append(unpool_tensor)

        if l == 0:
            deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter(unpool_tensor, conv_tensor_input_shape[l], 'conv0')
        else:
            deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
                (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

        deconv_tensors.append(deconv_tensor)

    returnTensors = []
    #returnTensors.extend([max_act_val]   )
    #returnTensors.extend([max_ind_val])
    returnTensors.extend(deconv_tensors)
    returnTensors.extend(pool_tensors)
    returnTensors.extend(switches)
    returnTensors.extend(unpool_tensors)
    returnTensors.extend(unpool_resize_tensors)
    returnTensors.extend(conv_tensors)

    if (layer_num == 1):
        returnTensors.extend([logits, layer1, layer2])

    return returnTensors
def reconstruct_input_lasso_cvcnn(images, max_feature, keep_prob, layer_num, filter_num, max_act_pl, max_ind_pl, layer, feat=[2, 4]):
    switches = []
    pool_tensor_shape = []
    conv_tensor_input_shape = []
    pool_tensors = []
    deconv_tensors = []
    unpool_tensors = []
    unpool_resize_tensors  = []
    conv_tensors = []

    for l in range(0, layer_num + 1):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
            conv_tensor_input_shape.append(images.get_shape().as_list())
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            conv_tensor_input_shape.append(pool_tensor.get_shape().as_list())

        conv_tensors.append(conv_tensor)
        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
        pool_tensor_shape.append(pool_tensor.get_shape().as_list())
        pool_tensors.append(pool_tensor)
        switches.append(switches_tmp)

    # deconv_tensor = max_feature

    if (layer_num == 1):
        logits, layer1, layer2 = rsvp_quick_deconv.deconv_fully_connected_1layer(pool_tensor, keep_prob)

    deconv_tensor = max_feature


    for l in range(layer_num, -1, -1):
        unpool_tensor, unpool_resize_tensor = rsvp_quick_deconv.deconv_unpooling_n_filter(deconv_tensor , switches[l], 'pool' + str(l), kheight=poolh, kwidth=poolw)

        unpool_resize_tensors.append(unpool_resize_tensor)
        unpool_tensors.append(unpool_tensor)

        if l == 0:
            deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter(unpool_tensor, conv_tensor_input_shape[l], 'conv0')
        else:
            deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
                (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

        deconv_tensors.append(deconv_tensor)

    returnTensors = []
    #returnTensors.extend([max_act_val]   )
    #returnTensors.extend([max_ind_val])
    returnTensors.extend(deconv_tensors)
    returnTensors.extend(pool_tensors)
    returnTensors.extend(switches)
    returnTensors.extend(unpool_tensors)
    returnTensors.extend(unpool_resize_tensors)
    returnTensors.extend(conv_tensors)

    if (layer_num == 1):
        returnTensors.extend([logits, layer1, layer2])

    return returnTensors