Example #1
0
def inference_roicnn(images, keep_prob, deconv = False, layer=2, feat=[2, 4]):

    _print_tensor_size(images, 'inference_roicnn')
    assert isinstance(keep_prob, object)

    if not layer == len(feat):
        print('Make sure you have defined the feature map size for each layer.')
        return

    #images2 = rsvp_quick_inference.inference_augment_s_filter(images)
    #

    # add noise
    #images2 = tf.cond(keep_prob < .999999, lambda: images + tf.truncated_normal(images.get_shape(), mean = 0.0, stddev = (lr /FLAGS.learning_rate) * (lr /FLAGS.learning_rate) * (lr /FLAGS.learning_rate) * 1.0), lambda: images)  # .8 was working well with .25 dropout and .992 or .994 decay

    # local st
    conv_tensor = rsvp_quick_inference.inference_local_st5_filter(images, 'conv0', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_s_filter(conv_tensor, 2)
    for l in range(1, layer):
        conv_tensor = rsvp_quick_inference.inference_local_st5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
        pool_tensor = rsvp_quick_inference.inference_pooling_s_filter(conv_tensor, 'pool' + str(l), 2)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    if deconv:
        for l in range(layer-1, 0, -1):
            conv_tensor = rsvp_quick_inference.inference_local_st5_unfilter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            pool_tensor = rsvp_quick_inference.inference_unpooling_s_filter(conv_tensor, 'pool' + str(l), 2)
    else:
        logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #2
0
def inference_cvcnn(images, keep_prob, layer=2, feat=[2, 4]):

    _print_tensor_size(images, 'inference_cvcnn')
    assert isinstance(keep_prob, object)

    if not layer == len(feat):
        print(
            'Make sure you have defined the feature map size for each layer.')
        return

    # local st
    image_shape = images.get_shape().as_list()
    conv_tensor = rsvp_quick_inference.inference_5x5_filter(
        images, 'conv0', in_feat=image_shape[3], out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor)
    for l in range(1, layer):
        conv_tensor = rsvp_quick_inference.inference_5x5_filter\
            (pool_tensor, 'conv'+str(l), in_feat=feat[l-1], out_feat=feat[l])
        pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(
            conv_tensor)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(
        pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #3
0
def inference_roi_ts_cnn(images, keep_prob, layer=2, feat=[2, 4]):

    _print_tensor_size(images, 'inference_roi_ts_cnn')
    assert isinstance(keep_prob, object)

    if not layer == len(feat):
        print(
            'Make sure you have defined the feature map size for each layer.')
        return

    # local st
    conv_tensor = rsvp_quick_inference.inference_roi_global_ts_filter(
        images, 'conv0', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_s_filter(conv_tensor,
                                                                  kwidth=1)
    for l in range(1, layer):
        conv_tensor = rsvp_quick_inference.inference_roi_s_filter\
            (pool_tensor, 'conv'+str(l), in_feat=feat[l-1], out_feat=feat[l])
        pool_tensor = rsvp_quick_inference.inference_pooling_s_filter(
            conv_tensor, kwidth=1)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(
        pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #4
0
def inference_local_s_cnn(images, keep_prob, layer=1, feat=[2]):

    _print_tensor_size(images, 'inference_local_s_cnn')
    assert isinstance(keep_prob, object)

    if not layer == len(feat):
            print('Make sure you have defined the feature map size for each layer.')
            return

    # local t
    # here use the 1*5 filter which go across channels
    conv_tensor = rsvp_quick_inference.inference_spatial_filter(images, 'conv0', out_feat=feat[0])
    # the pooling should have the width padding to 1 because we only consider channel correlation
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kheight=1)
    for l in range(1, layer):
        # here use the 1*5 filter which go across channels
        conv_tensor = rsvp_quick_inference.inference_spatial_filter\
            (pool_tensor, 'conv'+str(l), in_feat=feat[l-1], out_feat=feat[l])
        # the pooling should have the width padding to 1 because we only consider channel correlation
        pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kheight=1)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #5
0
def inference_local_s_cnn(images, keep_prob, layer=1, feat=[2]):

    _print_tensor_size(images, 'inference_local_s_cnn')
    assert isinstance(keep_prob, object)

    if not layer == len(feat):
        print(
            'Make sure you have defined the feature map size for each layer.')
        return

    # local t
    # here use the 1*5 filter which go across channels
    conv_tensor = rsvp_quick_inference.inference_spatial_filter(
        images, 'conv0', out_feat=feat[0])
    # the pooling should have the width padding to 1 because we only consider channel correlation
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor,
                                                                  kheight=1)
    for l in range(1, layer):
        # here use the 1*5 filter which go across channels
        conv_tensor = rsvp_quick_inference.inference_spatial_filter\
            (pool_tensor, 'conv'+str(l), in_feat=feat[l-1], out_feat=feat[l])
        # the pooling should have the width padding to 1 because we only consider channel correlation
        pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(
            conv_tensor, kheight=1)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(
        pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_dnn_cnn_1layer(images, keep_prob, feat=[4]):

    _print_tensor_size(images, 'inference_dnn_cnn')
    assert isinstance(keep_prob, object)

    # apply the global filter on both temporal & spatial domain
    logits = rsvp_quick_inference.inference_fully_connected_1layer(images, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #7
0
def inference_dnn_cnn(images, keep_prob, layer=1, feat=[4]):

    _print_tensor_size(images, 'inference_dnn_cnn')
    assert isinstance(keep_prob, object)

    # apply the global filter on both temporal & spatial domain
    logits = rsvp_quick_inference.inference_fully_connected_1layer(images, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #8
0
def inference_roicnn(images, keep_prob, deconv=False, layer=2, feat=[2, 4]):

    _print_tensor_size(images, 'inference_roicnn')
    assert isinstance(keep_prob, object)

    if not layer == len(feat):
        print(
            'Make sure you have defined the feature map size for each layer.')
        return

    #images2 = rsvp_quick_inference.inference_augment_s_filter(images)
    #

    # add noise
    #images2 = tf.cond(keep_prob < .999999, lambda: images + tf.truncated_normal(images.get_shape(), mean = 0.0, stddev = (lr /FLAGS.learning_rate) * (lr /FLAGS.learning_rate) * (lr /FLAGS.learning_rate) * 1.0), lambda: images)  # .8 was working well with .25 dropout and .992 or .994 decay

    # local st
    conv_tensor = rsvp_quick_inference.inference_local_st5_filter(
        images, 'conv0', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_s_filter(
        conv_tensor, 2)
    for l in range(1, layer):
        conv_tensor = rsvp_quick_inference.inference_local_st5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
        pool_tensor = rsvp_quick_inference.inference_pooling_s_filter(
            conv_tensor, 'pool' + str(l), 2)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(
        pool_tensor, keep_prob)

    if deconv:
        for l in range(layer - 1, 0, -1):
            conv_tensor = rsvp_quick_inference.inference_local_st5_unfilter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])
            pool_tensor = rsvp_quick_inference.inference_unpooling_s_filter(
                conv_tensor, 'pool' + str(l), 2)
    else:
        logits = rsvp_quick_inference.inference_fully_connected_1layer(
            pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_global_s_cnn_1layer(images, keep_prob, feat=[4]):

    _print_tensor_size(images, 'inference_global_s_cnn')
    assert isinstance(keep_prob, object)
    # here use the spatial filter which go across time
    conv_tensor = rsvp_quick_inference.inference_time_wise_filter(images, 'conv1', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kheight=1)
    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_tscnn_1layer(images, keep_prob, feat=[2]):

    _print_tensor_size(images, 'inference_tscnn')
    assert isinstance(keep_prob, object)

    conv_tensor = rsvp_quick_inference.inference_global_ts_filter(images, 'conv0', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kwidth=1)
    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_stcnn_1layer(images, keep_prob, feat=[4]):

    _print_tensor_size(images, 'inference_stcnn')
    assert isinstance(keep_prob, object)

    # global spatial local temporal
    conv_tensor = rsvp_quick_inference.inference_global_st_filter(images, 'conv0', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kheight=1)
    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_local_s_cnn_1layer(images, keep_prob, layer=1, feat=[2]):

    _print_tensor_size(images, 'inference_local_s_cnn')
    assert isinstance(keep_prob, object)
    # local t
    # here use the 1*5 filter which go across channels
    conv_tensor = rsvp_quick_inference.inference_spatial_filter(images, 'conv0', out_feat=feat[0])

    logits = rsvp_quick_inference.inference_fully_connected_1layer(conv_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_roicnn_1layer(images, keep_prob, feat=[2]):

    _print_tensor_size(images, 'inference_roicnn')
    assert isinstance(keep_prob, object)

    # local st
    conv_tensor = rsvp_quick_inference.inference_local_st5_filter(images, 'conv0', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_s_filter(conv_tensor)
    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_tscnn_1layer(images, keep_prob, feat=[2]):

    _print_tensor_size(images, 'inference_tscnn')
    assert isinstance(keep_prob, object)

    conv_tensor = rsvp_quick_inference.inference_global_ts_filter(
        images, 'conv0', out_feat=feat[0])

    logits = rsvp_quick_inference.inference_fully_connected_1layer(
        conv_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_global_s_cnn_1layer(images, keep_prob, feat=[4]):

    _print_tensor_size(images, 'inference_global_s_cnn')
    assert isinstance(keep_prob, object)
    # here use the spatial filter which go across time
    conv_tensor = rsvp_quick_inference.inference_time_wise_filter(
        images, 'conv1', out_feat=feat[0])

    logits = rsvp_quick_inference.inference_fully_connected_1layer(
        conv_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_local_s_cnn_1layer(images, keep_prob, layer=1, feat=[2]):

    _print_tensor_size(images, 'inference_local_s_cnn')
    assert isinstance(keep_prob, object)
    # local s
    # here use the 1*5 filter which go across channels
    conv_tensor = rsvp_quick_inference.inference_spatial_filter(images, 'conv0', out_feat=feat[0])
    # the pooling should have the width padding to 1 because we only consider channel correlation
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kheight=1)
    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_global_t_cnn_1layer(images, keep_prob, feat=[4]):

    _print_tensor_size(images, 'inference_global_t_cnn')
    assert isinstance(keep_prob, object)

    # global t
    # here use the channel wise filter which go across channels
    conv_tensor = rsvp_quick_inference.inference_channel_wise_filter(images, 'conv1', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kwidth=1)
    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_local_s_cnn_1layer(images, keep_prob, layer=1, feat=[2]):

    _print_tensor_size(images, 'inference_local_s_cnn')
    assert isinstance(keep_prob, object)
    # local t
    # here use the 1*5 filter which go across channels
    conv_tensor = rsvp_quick_inference.inference_spatial_filter(
        images, 'conv0', out_feat=feat[0])

    logits = rsvp_quick_inference.inference_fully_connected_1layer(
        conv_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_roicnn_1layer(images, keep_prob, feat=[2]):

    _print_tensor_size(images, 'inference_roicnn')
    assert isinstance(keep_prob, object)

    # local st
    conv_tensor = rsvp_quick_inference.inference_local_st5_filter(
        images, 'conv0', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_s_filter(conv_tensor)
    logits = rsvp_quick_inference.inference_fully_connected_1layer(
        pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_stcnn_1layer(images, keep_prob, feat=[4]):

    _print_tensor_size(images, 'inference_stcnn')
    assert isinstance(keep_prob, object)

    # global spatial local temporal
    conv_tensor = rsvp_quick_inference.inference_global_st_filter(
        images, 'conv0', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor,
                                                                  kheight=1)
    logits = rsvp_quick_inference.inference_fully_connected_1layer(
        pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #21
0
def inference_global_s_cnn(images, keep_prob, layer=1, feat=[4]):

    _print_tensor_size(images, 'inference_global_s_cnn')
    assert isinstance(keep_prob, object)

    # global s
    # here use the spatial filter which go across time
    conv_tensor = rsvp_quick_inference.inference_time_wise_filter(images, 'conv1', out_feat=feat[0])
    # the pooling should have the height padding to 1 because no channel anymore
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kheight=1)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #22
0
def inference_global_s_cnn(images, keep_prob, layer=1, feat=[4]):

    _print_tensor_size(images, 'inference_global_s_cnn')
    assert isinstance(keep_prob, object)

    # global s
    # here use the spatial filter which go across time
    conv_tensor = rsvp_quick_inference.inference_time_wise_filter(images, 'conv1', out_feat=feat[0])
    # the pooling should have the height padding to 1 because no channel anymore
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kheight=1)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #23
0
def inference_global_t_cnn(images, keep_prob, layer=1, feat=[4]):

    _print_tensor_size(images, 'inference_global_t_cnn')
    assert isinstance(keep_prob, object)

    # global t
    # here use the channel wise filter which go across channels
    conv_tensor = rsvp_quick_inference.inference_channel_wise_filter(images, 'conv1', out_feat=feat[0])
    # the pooling should have the width padding to 1 because no width anymore
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kwidth=1)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def test_cvcnn(images, keep_prob, layer=2, feat=[2, 4]):

    for l in range(0, layer):
        if l == 0:
            conv_tensor = rsvp_quick_inference.inference_5x5_filter(images, 'conv0', in_feat=feat[l - 1], out_feat=feat[0])
        else:
            conv_tensor = rsvp_quick_inference.inference_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])

        pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=poolh, kwidth=poolw)  # was 1 x 4

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)

    return logits
Example #25
0
def test_roicnn(images, keep_prob, layer=2, feat=[2, 4]):

    for l in range(0, layer):
        if l == 0:
            conv_tensor = rsvp_quick_inference.inference_5x5_filter(images, 'conv0', out_feat=feat[0])
        else:
            conv_tensor = rsvp_quick_inference.inference_5x5_filter \
                (pool_tensor, 'conv' + str(l), in_feat=feat[l - 1], out_feat=feat[l])

        pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, 'pool' + str(l), kheight=2, kwidth=2)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)

    return logits
def inference_local_s_cnn_1layer(images, keep_prob, layer=1, feat=[2]):

    _print_tensor_size(images, 'inference_local_s_cnn')
    assert isinstance(keep_prob, object)
    # local s
    # here use the 1*5 filter which go across channels
    conv_tensor = rsvp_quick_inference.inference_spatial_filter(
        images, 'conv0', out_feat=feat[0])
    # the pooling should have the width padding to 1 because we only consider channel correlation
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor,
                                                                  kheight=1)
    logits = rsvp_quick_inference.inference_fully_connected_1layer(
        pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #27
0
def inference_tscnn(images, keep_prob, layer=2, feat=[2, 4]):

    _print_tensor_size(images, 'inference_tscnn')
    assert isinstance(keep_prob, object)

    # global temporal local temporal
    conv_tensor = rsvp_quick_inference.inference_global_ts_filter(images, 'conv0', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kwidth=1)

    for l in range(1, layer):
        # here use the 1*5 filter which go across channels
        conv_tensor = rsvp_quick_inference.inference_spatial_filter\
            (pool_tensor, 'conv'+str(l), in_feat=feat[l-1], out_feat=feat[l])
        # the pooling should have the width padding to 1 because we only consider channel correlation
        pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kwidth=1)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #28
0
def inference_tscnn(images, keep_prob, layer=2, feat=[2, 4]):

    _print_tensor_size(images, 'inference_tscnn')
    assert isinstance(keep_prob, object)

    # global temporal local temporal
    conv_tensor = rsvp_quick_inference.inference_global_ts_filter(images, 'conv0', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kwidth=1)

    for l in range(1, layer):
        # here use the 1*5 filter which go across channels
        conv_tensor = rsvp_quick_inference.inference_spatial_filter\
            (pool_tensor, 'conv'+str(l), in_feat=feat[l-1], out_feat=feat[l])
        # the pooling should have the width padding to 1 because we only consider channel correlation
        pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kwidth=1)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #29
0
def inference_cvcnn(images, keep_prob, layer=2, feat=[2, 4]):

    _print_tensor_size(images, 'inference_cvcnn')
    assert isinstance(keep_prob, object)

    if not layer == len(feat):
        print('Make sure you have defined the feature map size for each layer.')
        return

    # local st
    conv_tensor = rsvp_quick_inference.inference_5x5_filter(images, 'conv0', keep_prob, in_feat=1, out_feat=feat[0])
    pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool0', kheight=poolh, kwidth=poolw)
    for l in range(1, layer):
        conv_tensor = rsvp_quick_inference.inference_5x5_filter\
            (pool_tensor, 'conv'+str(l), keep_prob, in_feat=feat[l-1], out_feat=feat[l])
        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool'+str(l), kheight=poolh, kwidth=poolw)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_cvcnn(images, keep_prob, layer=2, feat=[2, 4]):

    _print_tensor_size(images, 'inference_cvcnn')
    assert isinstance(keep_prob, object)

    if not layer == len(feat):
        print('Make sure you have defined the feature map size for each layer.')
        return

    # local st
    conv_tensor = rsvp_quick_inference.inference_5x5_filter(images, 'conv0', keep_prob, in_feat=1, out_feat=feat[0])
    pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool0', kheight=poolh, kwidth=poolw)
    for l in range(1, layer):
        conv_tensor = rsvp_quick_inference.inference_5x5_filter\
            (pool_tensor, 'conv'+str(l), keep_prob, in_feat=feat[l-1], out_feat=feat[l])
        pool_tensor, switches_tmp = rsvp_quick_deconv.deconv_pooling_n_filter(conv_tensor, 'pool'+str(l), kheight=poolh, kwidth=poolw)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #31
0
def inference_roi_ts_cnn(images, keep_prob, layer=2, feat=[2, 4]):

    _print_tensor_size(images, 'inference_roi_ts_cnn')
    assert isinstance(keep_prob, object)

    if not layer == len(feat):
        print('Make sure you have defined the feature map size for each layer.')
        return

    # local st
    conv_tensor = rsvp_quick_inference.inference_roi_global_ts_filter(images, 'conv0', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_s_filter(conv_tensor, kwidth=1)
    for l in range(1, layer):
        conv_tensor = rsvp_quick_inference.inference_roi_s_filter\
            (pool_tensor, 'conv'+str(l), in_feat=feat[l-1], out_feat=feat[l])
        pool_tensor = rsvp_quick_inference.inference_pooling_s_filter(conv_tensor, kwidth=1)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Example #32
0
def inference_cvcnn(images, keep_prob, layer=2, feat=[2, 4]):

    _print_tensor_size(images, 'inference_cvcnn')
    assert isinstance(keep_prob, object)

    if not layer == len(feat):
        print('Make sure you have defined the feature map size for each layer.')
        return

    # local st
    image_shape = images.get_shape().as_list()
    conv_tensor = rsvp_quick_inference.inference_5x5_filter(images, 'conv0', in_feat=image_shape[3], out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor)
    for l in range(1, layer):
        conv_tensor = rsvp_quick_inference.inference_5x5_filter\
            (pool_tensor, 'conv'+str(l), in_feat=feat[l-1], out_feat=feat[l])
        pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits