Exemplo n.º 1
0
def eval_state_data(
        data_dir,
        num_examples=128,
        batch_size=32,
        tfrecords_file_format="codraw_%s_combined_state_glove.tfrecords",
        split='train'):
    model = state_models.SconesGPT2StateModel(num_units=64)
    input_dataset = model.get_input_fn(os.path.join(data_dir),
                                       batch_size,
                                       tfrecords_file_format,
                                       split=split,
                                       shuffle=False)()
    dataset_iter = iter(input_dataset)

    with open(os.path.join(data_dir, 'CoDraw_1_0.json')) as f:
        data_json = json.load(f)

    cur_id = -1
    seqs = []
    gts = []
    cur_seq = []
    read_examples = 0
    for _ in range(num_examples):
        try:
            examples = next(dataset_iter)[0]
        except StopIteration:
            break
        data = examples['example_id'], examples['combined_len'], examples[
            'current_scene_len'], examples['combined_vecs'], examples[
                'seq_d'], examples['seq_t']
        for i in range(len(data[0])):
            if cur_id != int(data[0][i]):
                prev_id = cur_id
                cur_id = int(data[0][i])
                if cur_seq:
                    gts.append(load_gt(prev_id, data_json, split))
                    seqs.append(cur_seq)
                    cur_seq = []
            cur_step = {k: v[i].numpy() for k, v in examples.items()}
            cur_seq.append(cur_step)

    prev_id = cur_id
    gts.append(load_gt(prev_id, data_json, split))
    seqs.append(cur_seq)
    return seqs, gts
Exemplo n.º 2
0
        f.close()
    finally:
        f_train.close()
        f_test.close()
        f_scores.close()
        # Save time elapsed
        f = open(model_path + model_name + "_time_elapsed.txt", 'w')
        f.write(str(time_elapsed))
        f.close()


if TEST:
    t0 = time.time()
    print 'Predicting...','\n'

    real_labels_mean = load_gt(test_gt_list)
    real_labels_frames = y_test

    results = np.zeros((X_test.shape[0], tags.shape[0]))
    predicted_labels_mean = np.zeros((num_frames_test.shape[0], 1))
    predicted_labels_frames = np.zeros((y_test.shape[0], 1))


    song_paths = open(test_songs_list, 'r').read().splitlines()

    previous_numFrames = 0
    n=0
    for i in range(0, num_frames_test.shape[0]):
        print song_paths[i]

        num_frames=num_frames_test[i]
Exemplo n.º 3
0
    files = [file for file in files if os.path.isfile(os.path.join(folder, file))]
    for file in files:
        shutil.copyfile(os.path.join(folder, file), os.path.join(volpy_folder, file))
        
#%%
for name in names:
    folder = os.path.join(ROOT_FOLDER, name)
    s_folder = os.path.join(ROOT_FOLDER, name, 'sgpmd')
    file = f'{name}.hdf5'
    m = cm.load(os.path.join(folder, file))
    m.save(os.path.join(s_folder, name+'.tif'))
    
#%%
for name in names:
    folder = os.path.join(ROOT_FOLDER, name)
    spatial, temporal, spikes = load_gt(folder)  
    #ROIs = spatial.transpose([1,2,0])
    ROIs = spatial.copy()
    
    volpy_folder = os.path.join(folder, 'volpy')
    np.save(os.path.join(volpy_folder, 'ROIs_gt'), ROIs)
    
    
    
#%% volpy params
#for ridge_bg in [0.5, 0.1, 0.01, 0.001, 0]:
    context_size = 35                             # number of pixels surrounding the ROI to censor from the background PCA
    flip_signal = True                            # Important!! Flip signal or not, True for Voltron indicator, False for others
    hp_freq_pb = 1 / 3                            # parameter for high-pass filter to remove photobleaching
    threshold_method = 'simple'                   # 'simple' or 'adaptive_threshold'
    min_spikes= 30                                # minimal spikes to be found