Пример #1
0
def reconstruct_input_roicnn(images, layer_num, filter_num, max_act_pl, max_ind_pl, layer, feat=[2, 4]):
    switches = []
    pool_tensor_shape = []
    conv_tensor_input_shape = []

    for l in range(0, layer_num + 1):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
            conv_tensor_input_shape.append(images.get_shape().as_list())
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            conv_tensor_input_shape.append(pool_tensor.get_shape().as_list())

        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=2, kwidth=2)
        pool_tensor_shape.append(pool_tensor.get_shape().as_list())
        switches.append(switches_tmp)

        if l == layer_num:
            with tf.variable_scope('toplayer' + str(l)) as scope:
		# Set top layer activations based on maximum activations
                max_act_feat = tf.Variable(tf.zeros([pool_tensor_shape[l][3]*pool_tensor_shape[l][1]*pool_tensor_shape[l][2]]), name='max_act_feat')
                max_act_feat = tf.assign(max_act_feat, tf.zeros([pool_tensor_shape[l][3]*pool_tensor_shape[l][1]*pool_tensor_shape[l][2]]))
                max_features_tmp = tf.scatter_update(max_act_feat, max_ind_pl + filter_num * pool_tensor_shape[l][1] * pool_tensor_shape[l][2], max_act_pl)
                max_features_tmp2 = tf.reshape(max_features_tmp, [pool_tensor_shape[l][3], pool_tensor_shape[l][1],pool_tensor_shape[l][2]])
                max_features_tmp3 = tf.transpose(max_features_tmp2, [1, 2, 0])
                max_feature = tf.expand_dims(max_features_tmp3, 0)


    deconv_tensor = max_feature

    # Deconvolution network
    for l in range(layer_num, -1, -1):
        unpool_tensor = rsvp_quick_deconv.deconv_unpooling_n_filter(deconv_tensor , switches[l], 'pool' + str(l), kheight=2, kwidth=2)

        deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
                        (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

        # if l == 0:
        #     deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter(unpool_tensor, conv_tensor_input_shape[l], 'conv0')
        # else:
        #     deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
        #         (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

    returnTensors = []
    returnTensors.extend([max_act_feat])
    returnTensors.extend([max_feature])
    returnTensors.extend([deconv_tensor])

    returnTensors.extend(switches)

    return returnTensors
Пример #2
0
def compute_last_conv_layer_cvcnn(images, cur_image_num, keep_prob, layer=2, feat=[2, 4]):
    for l in range(0, layer):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])


        with tf.variable_scope('layer' + str(l)) as scope:
            pool_tensor, _ = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
            pool_tensor_shape.append(pool_tensor.get_shape().as_list())

    logits, layer1, layer2 = rsvp_quick_deconv.deconv_fully_connected_1layer(pool_tensor, keep_prob)

    returnTensors = []
    returnTensors.extend([pool_tensor])
    returnTensors.extend([layer2])
    returnTensors.extend([logits])

    return returnTensors
Пример #3
0
def compute_last_conv_layer_cvcnn(images, cur_image_num, keep_prob, layer=2, feat=[2, 4]):
    for l in range(0, layer):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])


        with tf.variable_scope('layer' + str(l)) as scope:
            pool_tensor, _ = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
            pool_tensor_shape.append(pool_tensor.get_shape().as_list())

    logits, layer1, layer2 = rsvp_quick_deconv.deconv_fully_connected_1layer(pool_tensor, keep_prob)

    returnTensors = []
    returnTensors.extend([pool_tensor])
    returnTensors.extend([layer2])
    returnTensors.extend([logits])

    return returnTensors
Пример #4
0
def find_max_activation_roicnn(images, cur_image_num, layer=2, feat=[2, 4]):
    for l in range(0, layer):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])


        with tf.variable_scope('layer' + str(l)) as scope:
            pool_tensor, _ = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=2, kwidth=2)
            pool_tensor_shape.append(pool_tensor.get_shape().as_list())
            # Initialize variables
            num_filters = pool_tensor_shape[l][3]
            max_acitvations_threshold.append(tf.Variable(tf.fill([num_filters], 10e+20 ),
                                                             name='max_acitvations_threshold'))

            max_activation.append(tf.Variable(-10e+20 * tf.ones([num_filters]), name='max_activation'))
            max_ind.append(tf.Variable(tf.fill([num_filters], tf.constant(-1, dtype=tf.int64)), name='max_ind'))
            max_image_ind.append(tf.Variable(tf.fill([num_filters], tf.constant(-1, dtype=tf.int64)), name='max_image_ind'))


            cur_image_ind.append(tf.fill([pool_tensor_shape[l][3]], cur_image_num))

            # Get maximum activations
            max_acitvations_threshold_tmp1 = tf.expand_dims(max_acitvations_threshold[l], 0)
            max_acitvations_threshold_tmp2 = tf.expand_dims(max_acitvations_threshold_tmp1, 0)
            max_acitvations_threshold_tmp3 = tf.expand_dims(max_acitvations_threshold_tmp2, 0)

            max_threshold.append(tf.tile(max_acitvations_threshold_tmp3, [1,pool_tensor_shape[l][1], pool_tensor_shape[l][2], 1]))
            pool_tensor2 = tf.select(max_threshold[l] >= pool_tensor, pool_tensor, -10e+20 * tf.ones_like(pool_tensor))
            cur_activation.append(tf.reduce_max(pool_tensor2, [0, 1, 2]))
            pool_tensor2 = tf.transpose(pool_tensor2, [3, 1, 2, 0])
            pool_tensor3 = tf.reshape(pool_tensor2, [pool_tensor_shape[l][3], pool_tensor_shape[l][1] * pool_tensor_shape[l][2]])
            cur_ind.append(tf.argmax(pool_tensor3, 1))


            selection.append(tf.logical_and(cur_activation[l] > max_activation[l],
                                       max_acitvations_threshold[l] > cur_activation[l]))

            # Update maximum activations
            updated_max_ind = tf.select(selection[l], cur_ind[l], max_ind[l])
            update1.append(tf.assign(max_ind[l], updated_max_ind))
            updated_max_image_ind = tf.select(selection[l], cur_image_ind[l], max_image_ind[l])
            update2.append(tf.assign(max_image_ind[l], updated_max_image_ind))
            updated_max_activations = tf.select(selection[l], cur_activation[l], max_activation[l])
            update3.append(tf.assign(max_activation[l], updated_max_activations))

    returnTensors = []
    # Return data regarding maximums
    returnTensors.extend(cur_activation)
    returnTensors.extend(selection)
    returnTensors.extend(update1)
    returnTensors.extend(update2)
    returnTensors.extend(update3)
    returnTensors.extend(max_acitvations_threshold)
    returnTensors.extend(max_ind)
    returnTensors.extend(max_image_ind)
    returnTensors.extend(max_activation)

    update_max_threshold(layer=2)

    clear_variables(layer=2)

    return returnTensors
Пример #5
0
def reconstruct_input_lasso_cvcnn(images, max_feature, keep_prob, layer_num, filter_num, max_act_pl, max_ind_pl, layer, feat=[2, 4]):
    switches = []
    pool_tensor_shape = []
    conv_tensor_input_shape = []
    pool_tensors = []
    deconv_tensors = []
    unpool_tensors = []
    unpool_resize_tensors  = []
    conv_tensors = []

    for l in range(0, layer_num + 1):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
            conv_tensor_input_shape.append(images.get_shape().as_list())
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            conv_tensor_input_shape.append(pool_tensor.get_shape().as_list())

        conv_tensors.append(conv_tensor)
        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
        pool_tensor_shape.append(pool_tensor.get_shape().as_list())
        pool_tensors.append(pool_tensor)
        switches.append(switches_tmp)

    # deconv_tensor = max_feature

    if (layer_num == 1):
        logits, layer1, layer2 = rsvp_quick_deconv.deconv_fully_connected_1layer(pool_tensor, keep_prob)

    deconv_tensor = max_feature


    for l in range(layer_num, -1, -1):
        unpool_tensor, unpool_resize_tensor = rsvp_quick_deconv.deconv_unpooling_n_filter(deconv_tensor , switches[l], 'pool' + str(l), kheight=poolh, kwidth=poolw)

        unpool_resize_tensors.append(unpool_resize_tensor)
        unpool_tensors.append(unpool_tensor)

        if l == 0:
            deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter(unpool_tensor, conv_tensor_input_shape[l], 'conv0')
        else:
            deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
                (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

        deconv_tensors.append(deconv_tensor)

    returnTensors = []
    #returnTensors.extend([max_act_val]   )
    #returnTensors.extend([max_ind_val])
    returnTensors.extend(deconv_tensors)
    returnTensors.extend(pool_tensors)
    returnTensors.extend(switches)
    returnTensors.extend(unpool_tensors)
    returnTensors.extend(unpool_resize_tensors)
    returnTensors.extend(conv_tensors)

    if (layer_num == 1):
        returnTensors.extend([logits, layer1, layer2])

    return returnTensors
Пример #6
0
def find_max_activation_cvcnn(images, cur_image_num, layer=2, feat=[2, 4]):
    #global pool_tensor_shape, switches_batch, max_activation, max_ind, max_image_ind, cur_activation, cur_ind, cur_image_ind, max_acitvations_threshold

    #pool_tensor_shape = []
    #switches_batch -- Current maximum activations for feature on one image over all layers
    #max_activation -- Global maximum activations for feature on all images over all layers
    #max_ind -- Global maximum activations for feature on all images over all layers
    #max_image_ind -- Max images generate switches
    #cur_activation -- Current maximum activations for feature on one image over all layers
    #cur_ind -- Current maximum activations indicies for feature on one image over all layers
    #cur_image_ind -- Current image
    #max_acitvations_threshold -- a threshold for discarding maximum activations

    for l in range(0, layer):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])


        with tf.variable_scope('layer' + str(l)) as scope:
            pool_tensor, _ = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
            pool_tensor_shape.append(pool_tensor.get_shape().as_list())
            # Initialize variables
            num_filters = pool_tensor_shape[l][3]
            max_acitvations_threshold.append(tf.Variable(tf.fill([num_filters], 10e+20 ),
                                                             name='max_acitvations_threshold'))

            max_activation.append(tf.Variable(-10e+20 * tf.ones([num_filters]), name='max_activation'))
            max_ind.append(tf.Variable(tf.fill([num_filters], tf.constant(-1, dtype=tf.int64)), name='max_ind'))
            max_image_ind.append(tf.Variable(tf.fill([num_filters], tf.constant(-1, dtype=tf.int64)), name='max_image_ind'))

            #max_activation.append(tf.Variable(tf.random_uniform(shape=[num_filters], minval=0.0001, maxval=0.0002), name='max_activation'))
            #cur_activation.append(tf.Variable(tf.zeros([num_filters]), name='cur_activation'))
            #cur_ind.append(tf.Variable(tf.fill([num_filters], tf.constant(-1, dtype=tf.int64)), name='cur_ind'))   #tf.Variable(tf.constant(-1, shape=[num_filters]))
            #cur_image_ind.append(tf.Variable(tf.fill([num_filters], tf.cast(-1, dtype=tf.int64)), name='cur_image_ind'))
            #selection.append(tf.Variable(tf.fill([num_filters], tf.constant(False)), name='selection'))
            #max_threshold.append(tf.Variable(tf.zeros([1,pool_tensor_shape[l][1], pool_tensor_shape[l][2], pool_tensor_shape[l][3]]), name='max_threshold'))
            #max_threshold.append(tf.Variable(tf.fill([num_filters], tf.constant(-1, dtype=tf.int64)), name='max_ind'))

            cur_image_ind.append(tf.fill([pool_tensor_shape[l][3]], cur_image_num))

            max_acitvations_threshold_tmp1 = tf.expand_dims(max_acitvations_threshold[l], 0)
            max_acitvations_threshold_tmp2 = tf.expand_dims(max_acitvations_threshold_tmp1, 0)
            max_acitvations_threshold_tmp3 = tf.expand_dims(max_acitvations_threshold_tmp2, 0)

            max_threshold.append(tf.tile(max_acitvations_threshold_tmp3, [1,pool_tensor_shape[l][1], pool_tensor_shape[l][2], 1]))
            pool_tensor2 = tf.select(max_threshold[l] >= pool_tensor, pool_tensor, -10e+20 * tf.ones_like(pool_tensor))
            cur_activation.append(tf.reduce_max(pool_tensor2, [0, 1, 2]))
            pool_tensor2 = tf.transpose(pool_tensor2, [3, 1, 2, 0])
            pool_tensor3 = tf.reshape(pool_tensor2, [pool_tensor_shape[l][3], pool_tensor_shape[l][1] * pool_tensor_shape[l][2]])
            cur_ind.append(tf.argmax(pool_tensor3, 1))


            selection.append(tf.logical_and(cur_activation[l] > max_activation[l],
                                       max_acitvations_threshold[l] > cur_activation[l]))

            updated_max_ind = tf.select(selection[l], cur_ind[l], max_ind[l])
            update1.append(tf.assign(max_ind[l], updated_max_ind))
            updated_max_image_ind = tf.select(selection[l], cur_image_ind[l], max_image_ind[l])
            update2.append(tf.assign(max_image_ind[l], updated_max_image_ind))
            updated_max_activations = tf.select(selection[l], cur_activation[l], max_activation[l])
            update3.append(tf.assign(max_activation[l], updated_max_activations))


    returnTensors = []
    #returnTensors.extend(cur_image_ind)
    #returnTensors.extend(cur_activation)
    #returnTensors.extend(cur_ind)
    #returnTensors.extend(max_ind)
    #returnTensors.extend(max_threshold)
    #returnTensors.extend([pool_tensor])
    #returnTensors.extend([pool_tensor2])
    #returnTensors.extend(max_acitvations_threshold)
    #returnTensors.extend([pool_tensor3])

    returnTensors.extend(cur_activation)
    returnTensors.extend(selection)
    returnTensors.extend(update1)
    returnTensors.extend(update2)
    returnTensors.extend(update3)
    returnTensors.extend(max_acitvations_threshold)
    returnTensors.extend(max_ind)
    returnTensors.extend(max_image_ind)
    returnTensors.extend(max_activation)

    update_max_threshold(layer=2)

    clear_variables(layer=2)

    return returnTensors
Пример #7
0
def reconstruct_input_lasso_cvcnn(images, max_feature, keep_prob, layer_num, filter_num, max_act_pl, max_ind_pl, layer, feat=[2, 4]):
    switches = []
    pool_tensor_shape = []
    conv_tensor_input_shape = []
    pool_tensors = []
    deconv_tensors = []
    unpool_tensors = []
    unpool_resize_tensors  = []
    conv_tensors = []

    for l in range(0, layer_num + 1):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
            conv_tensor_input_shape.append(images.get_shape().as_list())
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            conv_tensor_input_shape.append(pool_tensor.get_shape().as_list())

        conv_tensors.append(conv_tensor)
        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
        pool_tensor_shape.append(pool_tensor.get_shape().as_list())
        pool_tensors.append(pool_tensor)
        switches.append(switches_tmp)

    # deconv_tensor = max_feature

    if (layer_num == 1):
        logits, layer1, layer2 = rsvp_quick_deconv.deconv_fully_connected_1layer(pool_tensor, keep_prob)

    deconv_tensor = max_feature


    for l in range(layer_num, -1, -1):
        unpool_tensor, unpool_resize_tensor = rsvp_quick_deconv.deconv_unpooling_n_filter(deconv_tensor , switches[l], 'pool' + str(l), kheight=poolh, kwidth=poolw)

        unpool_resize_tensors.append(unpool_resize_tensor)
        unpool_tensors.append(unpool_tensor)

        if l == 0:
            deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter(unpool_tensor, conv_tensor_input_shape[l], 'conv0')
        else:
            deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
                (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

        deconv_tensors.append(deconv_tensor)

    returnTensors = []
    #returnTensors.extend([max_act_val]   )
    #returnTensors.extend([max_ind_val])
    returnTensors.extend(deconv_tensors)
    returnTensors.extend(pool_tensors)
    returnTensors.extend(switches)
    returnTensors.extend(unpool_tensors)
    returnTensors.extend(unpool_resize_tensors)
    returnTensors.extend(conv_tensors)

    if (layer_num == 1):
        returnTensors.extend([logits, layer1, layer2])

    return returnTensors
Пример #8
0
def find_max_activation_cvcnn(images, cur_image_num, layer=2, feat=[2, 4]):
    #global pool_tensor_shape, switches_batch, max_activation, max_ind, max_image_ind, cur_activation, cur_ind, cur_image_ind, max_acitvations_threshold

    #pool_tensor_shape = []
    #switches_batch -- Current maximum activations for feature on one image over all layers
    #max_activation -- Global maximum activations for feature on all images over all layers
    #max_ind -- Global maximum activations for feature on all images over all layers
    #max_image_ind -- Max images generate switches
    #cur_activation -- Current maximum activations for feature on one image over all layers
    #cur_ind -- Current maximum activations indicies for feature on one image over all layers
    #cur_image_ind -- Current image
    #max_acitvations_threshold -- a threshold for discarding maximum activations

    for l in range(0, layer):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images, 'conv0', in_feat=1, out_feat=feat[0])
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])


        with tf.variable_scope('layer' + str(l)) as scope:
            pool_tensor, _ = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)
            pool_tensor_shape.append(pool_tensor.get_shape().as_list())
            # Initialize variables
            num_filters = pool_tensor_shape[l][3]
            max_acitvations_threshold.append(tf.Variable(tf.fill([num_filters], 10e+20 ),
                                                             name='max_acitvations_threshold'))

            max_activation.append(tf.Variable(-10e+20 * tf.ones([num_filters]), name='max_activation'))
            max_ind.append(tf.Variable(tf.fill([num_filters], tf.constant(-1, dtype=tf.int64)), name='max_ind'))
            max_image_ind.append(tf.Variable(tf.fill([num_filters], tf.constant(-1, dtype=tf.int64)), name='max_image_ind'))

            #max_activation.append(tf.Variable(tf.random_uniform(shape=[num_filters], minval=0.0001, maxval=0.0002), name='max_activation'))
            #cur_activation.append(tf.Variable(tf.zeros([num_filters]), name='cur_activation'))
            #cur_ind.append(tf.Variable(tf.fill([num_filters], tf.constant(-1, dtype=tf.int64)), name='cur_ind'))   #tf.Variable(tf.constant(-1, shape=[num_filters]))
            #cur_image_ind.append(tf.Variable(tf.fill([num_filters], tf.cast(-1, dtype=tf.int64)), name='cur_image_ind'))
            #selection.append(tf.Variable(tf.fill([num_filters], tf.constant(False)), name='selection'))
            #max_threshold.append(tf.Variable(tf.zeros([1,pool_tensor_shape[l][1], pool_tensor_shape[l][2], pool_tensor_shape[l][3]]), name='max_threshold'))
            #max_threshold.append(tf.Variable(tf.fill([num_filters], tf.constant(-1, dtype=tf.int64)), name='max_ind'))

            cur_image_ind.append(tf.fill([pool_tensor_shape[l][3]], cur_image_num))

            max_acitvations_threshold_tmp1 = tf.expand_dims(max_acitvations_threshold[l], 0)
            max_acitvations_threshold_tmp2 = tf.expand_dims(max_acitvations_threshold_tmp1, 0)
            max_acitvations_threshold_tmp3 = tf.expand_dims(max_acitvations_threshold_tmp2, 0)

            max_threshold.append(tf.tile(max_acitvations_threshold_tmp3, [1,pool_tensor_shape[l][1], pool_tensor_shape[l][2], 1]))
            pool_tensor2 = tf.where(max_threshold[l] >= pool_tensor, pool_tensor, -10e+20 * tf.ones_like(pool_tensor))
            cur_activation.append(tf.reduce_max(pool_tensor2, [0, 1, 2]))
            pool_tensor2 = tf.transpose(pool_tensor2, [3, 1, 2, 0])
            pool_tensor3 = tf.reshape(pool_tensor2, [pool_tensor_shape[l][3], pool_tensor_shape[l][1] * pool_tensor_shape[l][2]])
            cur_ind.append(tf.argmax(pool_tensor3, 1))


            selection.append(tf.logical_and(cur_activation[l] > max_activation[l],
                                       max_acitvations_threshold[l] > cur_activation[l]))

            updated_max_ind = tf.where(selection[l], cur_ind[l], max_ind[l])
            update1.append(tf.assign(max_ind[l], updated_max_ind))
            updated_max_image_ind = tf.where(selection[l], cur_image_ind[l], max_image_ind[l])
            update2.append(tf.assign(max_image_ind[l], updated_max_image_ind))
            updated_max_activations = tf.where(selection[l], cur_activation[l], max_activation[l])
            update3.append(tf.assign(max_activation[l], updated_max_activations))


    returnTensors = []
    #returnTensors.extend(cur_image_ind)
    #returnTensors.extend(cur_activation)
    #returnTensors.extend(cur_ind)
    #returnTensors.extend(max_ind)
    #returnTensors.extend(max_threshold)
    #returnTensors.extend([pool_tensor])
    #returnTensors.extend([pool_tensor2])
    #returnTensors.extend(max_acitvations_threshold)
    #returnTensors.extend([pool_tensor3])

    returnTensors.extend(cur_activation)
    returnTensors.extend(selection)
    returnTensors.extend(update1)
    returnTensors.extend(update2)
    returnTensors.extend(update3)
    returnTensors.extend(max_acitvations_threshold)
    returnTensors.extend(max_ind)
    returnTensors.extend(max_image_ind)
    returnTensors.extend(max_activation)

    update_max_threshold(layer=2)

    clear_variables(layer=2)

    return returnTensors
Пример #9
0
def reconstruct_input_roicnn(images,
                             layer_num,
                             filter_num,
                             max_act_pl,
                             max_ind_pl,
                             layer,
                             feat=[2, 4]):
    switches = []
    pool_tensor_shape = []
    conv_tensor_input_shape = []

    for l in range(0, layer_num + 1):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images,
                                                              'conv0',
                                                              in_feat=1,
                                                              out_feat=feat[0])
            conv_tensor_input_shape.append(images.get_shape().as_list())
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            conv_tensor_input_shape.append(pool_tensor.get_shape().as_list())

        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(
            conv_tensor, 'pool' + str(l), kheight=2, kwidth=2)
        pool_tensor_shape.append(pool_tensor.get_shape().as_list())
        switches.append(switches_tmp)

        if l == layer_num:
            with tf.variable_scope('toplayer' + str(l)) as scope:
                # Set top layer activations based on maximum activations
                max_act_feat = tf.Variable(tf.zeros([
                    pool_tensor_shape[l][3] * pool_tensor_shape[l][1] *
                    pool_tensor_shape[l][2]
                ]),
                                           name='max_act_feat')
                max_act_feat = tf.assign(
                    max_act_feat,
                    tf.zeros([
                        pool_tensor_shape[l][3] * pool_tensor_shape[l][1] *
                        pool_tensor_shape[l][2]
                    ]))
                max_features_tmp = tf.scatter_update(
                    max_act_feat, max_ind_pl + filter_num *
                    pool_tensor_shape[l][1] * pool_tensor_shape[l][2],
                    max_act_pl)
                max_features_tmp2 = tf.reshape(max_features_tmp, [
                    pool_tensor_shape[l][3], pool_tensor_shape[l][1],
                    pool_tensor_shape[l][2]
                ])
                max_features_tmp3 = tf.transpose(max_features_tmp2, [1, 2, 0])
                max_feature = tf.expand_dims(max_features_tmp3, 0)

    deconv_tensor = max_feature

    # Deconvolution network
    for l in range(layer_num, -1, -1):
        unpool_tensor = rsvp_quick_deconv.deconv_unpooling_n_filter(
            deconv_tensor, switches[l], 'pool' + str(l), kheight=2, kwidth=2)

        deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
                        (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

        # if l == 0:
        #     deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter(unpool_tensor, conv_tensor_input_shape[l], 'conv0')
        # else:
        #     deconv_tensor = rsvp_quick_deconv.deconv_5x5_unfilter \
        #         (unpool_tensor, conv_tensor_input_shape[l], 'conv' + str(l))

    returnTensors = []
    returnTensors.extend([max_act_feat])
    returnTensors.extend([max_feature])
    returnTensors.extend([deconv_tensor])

    returnTensors.extend(switches)

    return returnTensors
Пример #10
0
def find_max_activation_roicnn(images, cur_image_num, layer=2, feat=[2, 4]):
    for l in range(0, layer):
        if l == 0:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter(images,
                                                              'conv0',
                                                              in_feat=1,
                                                              out_feat=feat[0])
        else:
            conv_tensor = rsvp_quick_deconv.deconv_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])

        with tf.variable_scope('layer' + str(l)) as scope:
            pool_tensor, _ = rsvp_quick_deconv.deconv_pooling_n_filter(
                conv_tensor, 'pool' + str(l), kheight=2, kwidth=2)
            pool_tensor_shape.append(pool_tensor.get_shape().as_list())
            # Initialize variables
            num_filters = pool_tensor_shape[l][3]
            max_acitvations_threshold.append(
                tf.Variable(tf.fill([num_filters], 10e+20),
                            name='max_acitvations_threshold'))

            max_activation.append(
                tf.Variable(-10e+20 * tf.ones([num_filters]),
                            name='max_activation'))
            max_ind.append(
                tf.Variable(tf.fill([num_filters],
                                    tf.constant(-1, dtype=tf.int64)),
                            name='max_ind'))
            max_image_ind.append(
                tf.Variable(tf.fill([num_filters],
                                    tf.constant(-1, dtype=tf.int64)),
                            name='max_image_ind'))

            cur_image_ind.append(
                tf.fill([pool_tensor_shape[l][3]], cur_image_num))

            # Get maximum activations
            max_acitvations_threshold_tmp1 = tf.expand_dims(
                max_acitvations_threshold[l], 0)
            max_acitvations_threshold_tmp2 = tf.expand_dims(
                max_acitvations_threshold_tmp1, 0)
            max_acitvations_threshold_tmp3 = tf.expand_dims(
                max_acitvations_threshold_tmp2, 0)

            max_threshold.append(
                tf.tile(
                    max_acitvations_threshold_tmp3,
                    [1, pool_tensor_shape[l][1], pool_tensor_shape[l][2], 1]))
            pool_tensor2 = tf.select(max_threshold[l] >= pool_tensor,
                                     pool_tensor,
                                     -10e+20 * tf.ones_like(pool_tensor))
            cur_activation.append(tf.reduce_max(pool_tensor2, [0, 1, 2]))
            pool_tensor2 = tf.transpose(pool_tensor2, [3, 1, 2, 0])
            pool_tensor3 = tf.reshape(pool_tensor2, [
                pool_tensor_shape[l][3],
                pool_tensor_shape[l][1] * pool_tensor_shape[l][2]
            ])
            cur_ind.append(tf.argmax(pool_tensor3, 1))

            selection.append(
                tf.logical_and(
                    cur_activation[l] > max_activation[l],
                    max_acitvations_threshold[l] > cur_activation[l]))

            # Update maximum activations
            updated_max_ind = tf.select(selection[l], cur_ind[l], max_ind[l])
            update1.append(tf.assign(max_ind[l], updated_max_ind))
            updated_max_image_ind = tf.select(selection[l], cur_image_ind[l],
                                              max_image_ind[l])
            update2.append(tf.assign(max_image_ind[l], updated_max_image_ind))
            updated_max_activations = tf.select(selection[l],
                                                cur_activation[l],
                                                max_activation[l])
            update3.append(
                tf.assign(max_activation[l], updated_max_activations))

    returnTensors = []
    # Return data regarding maximums
    returnTensors.extend(cur_activation)
    returnTensors.extend(selection)
    returnTensors.extend(update1)
    returnTensors.extend(update2)
    returnTensors.extend(update3)
    returnTensors.extend(max_acitvations_threshold)
    returnTensors.extend(max_ind)
    returnTensors.extend(max_image_ind)
    returnTensors.extend(max_activation)

    update_max_threshold(layer=2)

    clear_variables(layer=2)

    return returnTensors