コード例 #1
0
def compute_responses(mlps_dir):
    stim_type = 'breakfast'
    spatial_downsample_factor = 4

    if stim_type == 'breakfast':
        video_dir = './temp/breakfast_sorted/cam01/'
        features_dir = os.path.join(
            './temp/video_features/',
            os.path.basename(os.path.normpath(mlps_dir)))
    else:
        raise NotImplementedError

    print 'Reading in the videos from %s...' % mlps_dir
    videos = read_videos(video_dir,
                         spatial_downsample_factor=spatial_downsample_factor)
    print 'Read.'

    action_names = os.listdir(video_dir)
    print 'Classes : ', action_names

    for i in range(11):
        print 'Using MLP %d...' % i
        with open(os.path.join(mlps_dir, 'mlp_%d' % i), 'rb') as f:
            Wh, bh, Wo, bo = pickle.load(f)
            mlp = create_mlp_from_params(Wh, bh, Wo, bo, rectify=True)
            print 'MLP created from saved parameters. Computing MLP output...'

            sim_rsps = [[transform(mlp, video) for video in class_videos]
                        for class_videos in videos]

            print 'Storing MLP output on %s videos...' % stim_type
            mlp_output_dir = os.path.join(features_dir, 'mlp_%d' % i)
            dump_features(mlp_output_dir, sim_rsps, action_names)
            print 'Stored.\n'
コード例 #2
0
def compute_responses(mlps_dir):
    stim_type = 'breakfast'
    spatial_downsample_factor = 4

    if stim_type == 'breakfast':
        video_dir = './temp/breakfast_sorted/cam01/'
        features_dir = os.path.join('./temp/video_features/',
                                    os.path.basename(
                                        os.path.normpath(mlps_dir)))
    else:
        raise NotImplementedError
    
    print 'Reading in the videos from %s...' % mlps_dir
    videos = read_videos(video_dir,
            spatial_downsample_factor=spatial_downsample_factor)
    print 'Read.'

    action_names = os.listdir(video_dir)
    print 'Classes : ', action_names

    for i in range(11):
        print 'Using MLP %d...' % i
        with open(os.path.join(mlps_dir, 'mlp_%d' % i), 'rb') as f:
            Wh, bh, Wo, bo = pickle.load(f)
            mlp = create_mlp_from_params(Wh, bh, Wo, bo, rectify=True)
            print 'MLP created from saved parameters. Computing MLP output...'

            sim_rsps = [[transform(mlp, video) for video in class_videos]
                        for class_videos in videos]

            print 'Storing MLP output on %s videos...' % stim_type
            mlp_output_dir = os.path.join(features_dir, 'mlp_%d' % i)
            dump_features(mlp_output_dir, sim_rsps, action_names)
            print 'Stored.\n'
コード例 #3
0
def get_simulated_responses(model_dir, videos):
    responses = []
    for i in range(11):
        with open(os.path.join(model_dir, 'mlp_%d' % i), 'rb') as f:
            Wh, bh, Wo, bo = pickle.load(f)
            mlp = create_mlp_from_params(Wh, bh, Wo, bo, rectify=True)
            sim_rsps = [[transform(mlp, video) for video in class_videos]
                        for class_videos in videos]
            responses.append(sim_rsps)
    return responses
コード例 #4
0
def get_simulated_responses(model_dir, videos):
    responses = []
    for i in range(11):
        with open(os.path.join(model_dir, 'mlp_%d' % i), 'rb') as f:
            Wh, bh, Wo, bo = pickle.load(f)
            mlp = create_mlp_from_params(Wh, bh, Wo, bo, rectify=True)
            sim_rsps = [[transform(mlp, video) for video in class_videos]
                        for class_videos in videos]
            responses.append(sim_rsps)
    return responses
コード例 #5
0
        # Read videos.
        # videos is a generator. So have to create it again and again.
        print 'Reading videos...'
        if stim_type == 'grating':
            videos = read_grating_videos(grating_dirs)
        else:
            videos = read_videos(video_dir, spatial_downsample_factor)

        print 'Applying the neural network (%d) transformation...' % i
        """
        net.print_parameters()
        net_features = [[transform(net, video, mean_rsps[i])
                         for video in class_videos]
                        for class_videos in videos]
        """
        net_features = [[transform(net, video)
                         for video in class_videos]
                        for class_videos in videos]

        print 'Dumping the neural features...'
        net_features_dir = os.path.join(features_dir, 'net%d' % i)
        dump_features(net_features, net_features_dir)

        features.append(net_features)

predictions = []
for i, net_features in enumerate(features):
    print 'Using features from network %d...' % i
    if stim_type == 'grating':
        tr_features, te_features = train_test_split_grating(net_features,
                                                            grating_train_idxs)