def inference_global_t_cnn_1layer(images, keep_prob, feat=[4]):

    _print_tensor_size(images, 'inference_global_t_cnn')
    assert isinstance(keep_prob, object)

    # global t
    # here use the channel wise filter which go across channels
    conv_tensor = rsvp_quick_inference.inference_channel_wise_filter(images, 'conv1', out_feat=feat[0])
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kwidth=1)
    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
def inference_global_t_cnn_1layer(images, keep_prob, feat=[4]):

    _print_tensor_size(images, 'inference_global_t_cnn')
    assert isinstance(keep_prob, object)

    # global t
    # here use the channel wise filter which go across channels
    conv_tensor = rsvp_quick_inference.inference_channel_wise_filter(
        images, 'conv1', out_feat=feat[0])

    logits = rsvp_quick_inference.inference_fully_connected_1layer(
        conv_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits
Пример #3
0
def inference_global_t_cnn(images, keep_prob, layer=1, feat=[4]):

    _print_tensor_size(images, 'inference_global_t_cnn')
    assert isinstance(keep_prob, object)

    # global t
    # here use the channel wise filter which go across channels
    conv_tensor = rsvp_quick_inference.inference_channel_wise_filter(images, 'conv1', out_feat=feat[0])
    # the pooling should have the width padding to 1 because no width anymore
    pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kwidth=1)

    logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob)

    assert isinstance(logits, object)
    return logits