def inference_global_s_cnn_1layer(images, keep_prob, feat=[4]): _print_tensor_size(images, 'inference_global_s_cnn') assert isinstance(keep_prob, object) # here use the spatial filter which go across time conv_tensor = rsvp_quick_inference.inference_time_wise_filter(images, 'conv1', out_feat=feat[0]) pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kheight=1) logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob) assert isinstance(logits, object) return logits
def inference_global_s_cnn_1layer(images, keep_prob, feat=[4]): _print_tensor_size(images, 'inference_global_s_cnn') assert isinstance(keep_prob, object) # here use the spatial filter which go across time conv_tensor = rsvp_quick_inference.inference_time_wise_filter( images, 'conv1', out_feat=feat[0]) logits = rsvp_quick_inference.inference_fully_connected_1layer( conv_tensor, keep_prob) assert isinstance(logits, object) return logits
def inference_global_s_cnn(images, keep_prob, layer=1, feat=[4]): _print_tensor_size(images, 'inference_global_s_cnn') assert isinstance(keep_prob, object) # global s # here use the spatial filter which go across time conv_tensor = rsvp_quick_inference.inference_time_wise_filter(images, 'conv1', out_feat=feat[0]) # the pooling should have the height padding to 1 because no channel anymore pool_tensor = rsvp_quick_inference.inference_pooling_n_filter(conv_tensor, kheight=1) logits = rsvp_quick_inference.inference_fully_connected_1layer(pool_tensor, keep_prob) assert isinstance(logits, object) return logits