Пример #1
0
def stim_pro(feat_ptr, output, orig_size, fps, i):
    """Sugar function for parallel computing."""
    print i
    # scanning parameter
    TR = 1
    # movie fps
    #fps = 15
    time_unit = 1.0 / fps

    # HRF config
    hrf_times = np.arange(0, 35, time_unit)
    hrf_signal = hrf.biGammaHRF(hrf_times)
    hrf_signal = hrf_signal.astype(np.float16)

    # procssing
    r_idx = i / orig_size[1]
    c_idx = i % orig_size[1]
    tmp_list = []
    for p in feat_ptr:
        tmp_list.append(p[r_idx, c_idx, ...])
    ts = np.concatenate(tmp_list, axis=1)
    # log-transform
    # memory saving trick
    ts += 1
    ts = np.log(ts)
    # convolved with HRF
    convolved = np.apply_along_axis(np.convolve, 1, ts, hrf_signal)
    # remove time points after the end of the scanning run
    n_to_remove = len(hrf_times) - 1
    convolved = convolved[:, :-n_to_remove]
    # temporal down-sample
    vol_times = np.arange(0, ts.shape[1], fps)
    output[r_idx, c_idx, ...] = convolved[:, vol_times]
Пример #2
0
def get_stim_seq(stimulus, output_filename):
    """Get stimulus sequence by compute structural similarity between
    adjacent frames.
    """
    fps = 15
    stim_len = stimulus.shape[0]
    stim_seq = np.zeros((stim_len, ))

    img_w, img_h = stimulus.shape[2], stimulus.shape[3]
    for i in range(1, stim_len):
        pim = img_recon(stimulus[i - 1, ...])
        nim = img_recon(stimulus[i, ...])
        pim = rgb2gray(pim)
        nim = rgb2gray(nim)
        stim_seq[i] = compare_ssim(pim, nim, multichannel=False)

    # convolved with HRF
    time_unit = 1.0 / fps
    # HRF config
    hrf_times = np.arange(0, 35, time_unit)
    hrf_signal = hrf.biGammaHRF(hrf_times)
    convolved_seq = np.convolve(stim_seq, hrf_signal)
    # remove time points after the end of the scanning run
    n_to_remove = len(hrf_times) - 1
    convolved_seq = convolved_seq[:-n_to_remove]
    # temporal down-sample
    vol_times = np.arange(0, stim_len, fps)
    dseq = convolved_seq[vol_times]
    np.save(output_filename, dseq)
Пример #3
0
def stim_pro(feat_ptr,
             output,
             orig_size,
             fps,
             fact,
             sal_ts,
             i,
             using_hrf=True):
    """Sugar function for parallel computing."""
    print i
    # scanning parameter
    TR = 1
    # movie fps
    #fps = 15
    time_unit = 1.0 / fps

    # HRF config
    hrf_times = np.arange(0, 35, time_unit)
    hrf_signal = hrf.biGammaHRF(hrf_times)

    # procssing
    bsize = orig_size[1] * orig_size[2]
    for p in range(len(feat_ptr)):
        if not p:
            ts = feat_ptr[p][:, i * bsize:(i + 1) * bsize]
        else:
            ts = np.concatenate(
                [ts, feat_ptr[p][:, i * bsize:(i + 1) * bsize]], axis=0)
    ts = ts.T
    if sal_ts:
        ts = ts * sal_ts
    # log-transform
    ts = np.log(ts + 1)
    if using_hrf:
        # convolved with HRF
        convolved = np.apply_along_axis(np.convolve, 1, ts, hrf_signal)
        # remove time points after the end of the scanning run
        n_to_remove = len(hrf_times) - 1
        convolved = convolved[:, :-n_to_remove]
        # temporal down-sample
        vol_times = np.arange(0, ts.shape[1], fps)
        ndts = convolved[:, vol_times]
    else:
        # temporal down-sample
        dts = down_sample(ts, (1, fps))
        # shift time series
        ndts = np.zeros_like(dts)
        delay_time = 4
        ndts[:, delay_time:] = dts[:, :(-1 * delay_time)]

    # reshape to 3D
    ndts = ndts.reshape(orig_size[1], orig_size[2], ts.shape[1] / fps)
    # get start index
    idx = i * bsize
    channel_idx, row, col = vutil.node2feature(idx, orig_size)

    # spatial down-sample
    if fact:
        ndts = down_sample(ndts, (fact, fact, 1))
    output[channel_idx, ...] = ndts
Пример #4
0
def feat_tr_pro(in_file, out_dir, out_dim=None, using_hrf=True):
    """Get TRs from input 3D dataset (the third dim is time).
    
    Input
    -----
    in_file : absolute path of input file
    out_dir : output directory
    out_dim : spatial resolution of output, a tuple of (row, col)

    """
    # load stimulus time courses
    feat_ts = np.load(in_file, mmap_mode='r')
    ts_shape = feat_ts.shape
    print 'Original data shape : ', ts_shape

    # scanning parameter
    TR = 1
    # movie fps
    fps = 15
    time_unit = 1.0 / fps

    # HRF config
    hrf_times = np.arange(0, 35, time_unit)
    hrf_signal = hrf.biGammaHRF(hrf_times)

    # reshape to 2D for convenience
    feat_ts = feat_ts.reshape(-1, ts_shape[2])
    # log-transform
    feat_ts = np.log(feat_ts + 1)
    if using_hrf:
        # convolved with HRF
        convolved = np.apply_along_axis(np.convolve, 1, feat_ts, hrf_signal)
        # remove time points after the end of the scanning run
        n_to_remove = len(hrf_times) - 1
        convolved = convolved[:, :-n_to_remove]
        # temporal down-sample
        vol_times = np.arange(0, feat_ts.shape[1], fps)
        dconvolved = convolved[:, vol_times]
    else:
        # temporal down-sample
        dts = down_sample(feat_ts, (1, fps))
        # shift time series
        dconvolved = np.zeros_like(dts)
        delay_time = 4
        dconvolved[:, delay_time:] = dts[:, :(-1 * delay_time)]

    # reshape to 3D
    dconvolved3d = dconvolved.reshape(ts_shape[0], ts_shape[1], -1)

    # spatial down-sample
    if out_dim:
        ds_mark = '_%s_%s' % (out_dim[0], out_dim[1])
        dconvolved3d = img_resize(dconvolved3d, out_dim)
        #im_min, im_max = dconvolved3d.min(), dconvolved3d.max()
        #im_std = (dconvolved3d - im_min) / (im_max - im_min)
        #resized_im = resize(im_std, out_dim, order=1)
        #dconvolved3d = resized_im * (im_max - im_min) + im_min
    else:
        ds_mark = ''
    print 'Output data shape : ', dconvolved3d.shape

    # save TRs
    fname = os.path.basename(in_file)
    fname = fname.split('.')
    out_file_name = '.'.join([fname[0] + '_trs%s' % (ds_mark), fname[1]])
    out_file = os.path.join(out_dir, out_file_name)
    print 'Save TR data into file ', out_file
    np.save(out_file, dconvolved3d)