예제 #1
0
            zorder=2)
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
plt.xlabel('Time', fontsize=18)
plt.ylabel('Amplitude', fontsize=18)
plt.xlim(0, len(fit.data))
plt.legend(loc=0)

## multiprocess 3 voxels
data = [data, data, data]
indices = ([1, 2, 3], [4, 6, 5], [7, 8, 9])
bundle = utils.multiprocess_bundle(og.GaussianFit,
                                   model,
                                   data,
                                   grids,
                                   bounds,
                                   indices,
                                   auto_fit=True,
                                   verbose=1,
                                   Ns=3)

## run
print("popeye will analyze %d voxels across %d cores" % (len(bundle), 3))
with sharedmem.Pool(np=3) as pool:
    t1 = datetime.datetime.now()
    output = pool.map(utils.parallel_fit, bundle)
    t2 = datetime.datetime.now()
    delta = t2 - t1
    print("popeye multiprocessing finished in %s.%s seconds" %
          (delta.seconds, delta.microseconds))
예제 #2
0
          len(mass_star3),
          len(idCent_star3)]

    Dlen = 5000000
    center_pivD = numpy.fromfile('/home/twopointdata/randdmpos' + dirno +
                                 '.rand' + str(Dlen) + 'alltracer',
                                 dtype=(numpy.float64, 3))

gnp = 16
Blen = Dlen / gnp
fDMcen_z = fDMcenter[:, 2]

slices1 = [slice(i, i + 1) for i in range(0, gnp)]
if (qzero == 1):
    fq = numpy.zeros(len(fq))
with sharedmem.Pool() as pool:

    def work(slice):
        si1 = slice.start
        sj1 = slice.stop
        seCent = fDMcenter
        seeigvec = feigvec
        sq = fq
        sDcent = center_pivD[si1 * Blen:sj1 * Blen]
        if (si1 == (gnp - 1)):
            sDcent = center_pivD[si1 * Blen:Dlen]
        hij = wgp_shapcorr_allsg(seCent, seeigvec, sq, sDcent, rpbins, zpbins,
                                 si1, sj1, boxlen)
        hij.tofile('/home/wgpdata_alltracer/' + rdstr2 + '_' + str(Dlen) +
                   'Sgp' + clrstr1 + '.' + dirno + '.' + str(mmin) + '.' +
                   str(si1) + str(sj1) + typ1 + '.alltracer')
예제 #3
0
def test_parallel_fit_manual_grids():

    # stimulus features
    viewing_distance = 38
    screen_width = 25
    thetas = np.arange(0, 360, 45)
    num_blank_steps = 0
    num_bar_steps = 30
    ecc = 10
    tr_length = 1.0
    frames_per_tr = 1.0
    scale_factor = 0.10
    pixels_down = 100
    pixels_across = 100
    dtype = ctypes.c_int16
    voxel_index = (1, 2, 3)
    auto_fit = True
    verbose = 1

    # create the sweeping bar stimulus in memory
    bar = simulate_bar_stimulus(pixels_across, pixels_down, viewing_distance,
                                screen_width, thetas, num_bar_steps,
                                num_blank_steps, ecc)

    # create an instance of the Stimulus class
    stimulus = VisualStimulus(bar, viewing_distance, screen_width,
                              scale_factor, tr_length, dtype)

    # initialize the gaussian model
    model = og.GaussianModel(stimulus, utils.double_gamma_hrf)
    model.hrf_delay = 0

    # generate a random pRF estimate
    x = -5.24
    y = 2.58
    sigma = 1.24
    beta = 2.5
    baseline = -0.25

    # create the "data"
    data = model.generate_prediction(x, y, sigma, beta, baseline)

    # set search grid
    x_grid = slice(-5, 4, 5)
    y_grid = slice(-5, 7, 5)
    s_grid = slice(1 / stimulus.ppd, 5.25, 5)
    b_grid = slice(0.1, 4.0, 5)

    # set search bounds
    x_bound = (-12.0, 12.0)
    y_bound = (-12.0, 12.0)
    s_bound = (1 / stimulus.ppd, 12.0)
    b_bound = (1e-8, 1e2)
    m_bound = (None, None)

    # loop over each voxel and set up a GaussianFit object
    grids = (
        x_grid,
        y_grid,
        s_grid,
    )
    bounds = (x_bound, y_bound, s_bound, b_bound, m_bound)

    # make 3 voxels
    all_data = np.array([data, data, data])
    num_voxels = data.shape[0]
    indices = [(1, 2, 3)] * 3

    # bundle the voxels
    bundle = utils.multiprocess_bundle(og.GaussianFit, model, all_data, grids,
                                       bounds, indices)

    # run analysis
    with sharedmem.Pool(np=sharedmem.cpu_count() - 1) as pool:
        output = pool.map(utils.parallel_fit, bundle)

    # assert equivalence
    for fit in output:
        npt.assert_almost_equal(fit.x, x, 2)
        npt.assert_almost_equal(fit.y, y, 2)
        npt.assert_almost_equal(fit.sigma, sigma, 2)
        npt.assert_almost_equal(fit.beta, beta, 2)
        npt.assert_almost_equal(fit.baseline, baseline, 2)
예제 #4
0
def test_recast_estimation_results():

    # stimulus features
    viewing_distance = 38
    screen_width = 25
    thetas = np.arange(0, 360, 45)
    num_blank_steps = 0
    num_bar_steps = 30
    ecc = 10
    tr_length = 1.0
    frames_per_tr = 1.0
    scale_factor = 0.10
    pixels_down = 100
    pixels_across = 100
    dtype = ctypes.c_int16
    voxel_index = (1, 2, 3)
    auto_fit = True
    verbose = 1

    # create the sweeping bar stimulus in memory
    bar = simulate_bar_stimulus(pixels_across, pixels_down, viewing_distance,
                                screen_width, thetas, num_bar_steps,
                                num_blank_steps, ecc)

    # create an instance of the Stimulus class
    stimulus = VisualStimulus(bar, viewing_distance, screen_width,
                              scale_factor, tr_length, dtype)

    # initialize the gaussian model
    model = og.GaussianModel(stimulus, utils.spm_hrf)
    model.hrf_delay = 0

    # generate a random pRF estimate
    x = -5.24
    y = 2.58
    sigma = 1.24
    beta = 2.5
    baseline = -0.25

    # create the "data"
    data = model.generate_prediction(x, y, sigma, beta, baseline)

    # set search grid
    x_grid = utils.grid_slice(-5, 4, 5)
    y_grid = utils.grid_slice(-5, 7, 5)
    s_grid = utils.grid_slice(1 / stimulus.ppd, 5.25, 5)
    b_grid = utils.grid_slice(0.1, 4.0, 5)

    # set search bounds
    x_bound = (-12.0, 12.0)
    y_bound = (-12.0, 12.0)
    s_bound = (1 / stimulus.ppd, 12.0)
    b_bound = (1e-8, 1e2)
    m_bound = (None, None)

    # loop over each voxel and set up a GaussianFit object
    grids = (
        x_grid,
        y_grid,
        s_grid,
    )
    bounds = (x_bound, y_bound, s_bound, b_bound, m_bound)

    # create 3 voxels of data
    all_data = np.array([data, data, data])
    indices = [(0, 0, 0), (0, 0, 1), (0, 0, 2)]

    # bundle the voxels
    bundle = utils.multiprocess_bundle(og.GaussianFit, model, all_data, grids,
                                       bounds, indices)

    # run analysis
    with sharedmem.Pool(np=3) as pool:
        output = pool.map(utils.parallel_fit, bundle)

    # create grid parent
    arr = np.zeros((1, 1, 3))
    grid_parent = nibabel.Nifti1Image(arr, np.eye(4, 4))

    # recast the estimation results
    nif = utils.recast_estimation_results(output, grid_parent)
    dat = nif.get_data()

    # assert equivalence
    npt.assert_almost_equal(np.mean(dat[..., 0]), x)
    npt.assert_almost_equal(np.mean(dat[..., 1]), y)
    npt.assert_almost_equal(np.mean(dat[..., 2]), sigma)
    npt.assert_almost_equal(np.mean(dat[..., 3]), beta)
    npt.assert_almost_equal(np.mean(dat[..., 4]), baseline)

    # recast the estimation results - OVERLOADED
    nif = utils.recast_estimation_results(output, grid_parent, True)
    dat = nif.get_data()

    # assert equivalence
    npt.assert_almost_equal(np.mean(dat[..., 0]), np.arctan2(y, x), 2)
    npt.assert_almost_equal(np.mean(dat[..., 1]), np.sqrt(x**2 + y**2), 2)
    npt.assert_almost_equal(np.mean(dat[..., 2]), sigma)
    npt.assert_almost_equal(np.mean(dat[..., 3]), beta)
    npt.assert_almost_equal(np.mean(dat[..., 4]), baseline)
예제 #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("filename")
    parser.add_argument("-f", "--format", dest='format', required=True)
    parser.add_argument("-p",
                        "--ptype",
                        dest='ptypes',
                        action='append',
                        required=False,
                        type=int)
    parser.add_argument("-o", "--output", dest='output', required=True)
    parser.add_argument("--maxnpar",
                        dest='maxnpar',
                        default=20 * 1024 * 1024,
                        required=False,
                        type=int)
    parser.add_argument("-F",
                        "--filter",
                        dest='filter',
                        required=False,
                        type=lambda x: x.split(':'))
    group = parser.add_argument_group()
    group.add_argument("-m",
                       "--meshindex",
                       dest='meshindex',
                       required=False,
                       type=MeshIndex.fromfile)
    x = group.add_mutually_exclusive_group()
    x.add_argument("--origin",
                   nargs='+',
                   dest='origin',
                   type=float,
                   required=False)
    x.add_argument("--center",
                   nargs='+',
                   dest='center',
                   type=float,
                   required=False)
    group.add_argument("--boxsize",
                       nargs='+',
                       dest='boxsize',
                       type=float,
                       required=False)
    args = parser.parse_args()

    origin = None
    boxsize = None

    def open(template, fid, create=False):
        if '%d' in template:
            snap = Snapshot(template % fid, args.format, create=create)
        else:
            snap = Snapshot(template, args.format, create=create)
        return snap

    def filter(snap, ptype, origin, boxsize):
        if snap.C['N'][ptype] == 0: return None, 0
        if origin is None or boxsize is None:
            return None, snap.C['N'][ptype]
        tail = origin + boxsize
        pos = snap[ptype, 'pos']
        iter = numpy.nditer([pos[:, 0], pos[:, 1], pos[:, 2], None],
                            op_dtypes=[None, None, None, '?'],
                            op_flags=[['readonly']] * 3 +
                            [['writeonly', 'allocate']],
                            flags=['external_loop', 'buffered'])
        for x, y, z, m in iter:
            m[...] = \
              (x >= origin[0]) & (y >= origin[1]) & (z >= origin[2]) \
            & (x <= tail  [0]) & (y <= tail  [1]) & (z <= tail  [2])

        return iter.operands[3], iter.operands[3].sum()

    def select(snap, ptype, block, mask):
        if mask is None:
            result = snap[ptype, block]
        else:
            result = snap[ptype, block][mask]
        del snap[ptype, block]
        return result

    snap0 = open(args.filename, 0)
    nptypes = snap0.reader.schema.nptypes

    if args.ptypes is None or len(args.ptypes) == 0:
        args.ptypes = range(nptypes)
    print 'doing', args.ptypes
    if '%d' in args.filename:
        Nfile = snap0.C['Nfiles']
    else:
        Nfile = 1

    fids = range(Nfile)

    if args.boxsize is not None:
        boxsize = numpy.empty(3, dtype='f8')
        boxsize[:] = args.boxsize
        if args.origin is None and args.center is None:
            parser.print_help()
            parser.exit()
        else:
            origin = numpy.empty(3, dtype='f8')
            if args.origin is not None:
                origin[:] = args.origin
            else:
                origin[:] = args.center
                origin[:] = origin - boxsize * 0.5

            if args.meshindex is not None:
                fids = args.meshindex.cut(origin, boxsize)
            args.meshindex = None

    print origin, boxsize
    defaultheader = snap0.header
    Ntot = snap0.C['Ntot']
    Ntot_out = numpy.zeros(nptypes, dtype='i8')

    with sharedmem.Pool(use_threads=True) as pool:

        def work(fid):
            snap = open(args.filename, fid)
            N_out = numpy.zeros(nptypes, dtype='i8')
            for ptype in args.ptypes:
                mask, count = filter(snap, ptype, origin, boxsize)
                N_out[ptype] = count
            return N_out

        def reduce(N_out):
            Ntot_out[...] = N_out + Ntot_out

        pool.map(work, fids, callback=reduce)

    Nfile_out = (Ntot_out.sum() // args.maxnpar + 1)

    if Nfile_out > 1 and '%d' not in args.output:
        args.output += '.%d'

    outputs = [open(args.output, fid, create=True) for fid in range(Nfile_out)]
    written = numpy.zeros((Nfile_out, nptypes), dtype='i8')
    free = numpy.empty_like(written)
    writing = numpy.zeros((Nfile_out, nptypes), dtype='i8')

    cursor = numpy.zeros(nptypes, dtype='i8')

    for i, output in enumerate(outputs):
        output.header[...] = defaultheader
        output.C['Ntot'] = Ntot_out
        output.C['N'] = (Ntot_out * (i + 1) // Nfile_out) \
                     -  (Ntot_out * i // Nfile_out)
        free[i] = output.C['N']
        output.C['Nfiles'] = Nfile_out
        output.create_structure()

    with sharedmem.Pool(use_threads=True) as pool:

        def work(fid):
            snap = open(args.filename, fid)

            for ptype in args.ptypes:
                mask, count = filter(snap, ptype, origin, boxsize)
                if count == 0: continue
                with pool.lock:
                    cumfree = free[:, ptype].cumsum()
                    last_output = cumfree.searchsorted(count, side='left')
                    first_output = cumfree.searchsorted(0, side='right')
                    table = numpy.zeros(last_output - first_output + 1,
                                        ('i8', 4))
                    outputid, istart, len, ostart = table.T
                    outputid[...] = range(first_output, last_output + 1)
                    ostart[...] = written[first_output:last_output + 1, ptype]
                    len[:-1] = free[first_output:last_output, ptype]
                    len[-1] = count - len[:-1].sum()
                    istart[1:] = len.cumsum()[:-1]

                    writing[first_output:last_output + 1, ptype] += len
                    written[first_output:last_output + 1, ptype] += len
                    free[first_output:last_output + 1, ptype] -= len

                    for i in range(first_output, last_output + 1):
                        for block in snap.reader.schema:
                            outputs[i].alloc(block, ptype)

                for block in snap.reader.schema:
                    if (ptype, block) not in snap: continue
                    towrite = select(snap, ptype, block, mask)
                    for id, i, l, o in table:
                        outputs[id][ptype, block][o:o + l] = towrite[i:i + l]

                with pool.lock:
                    writing[first_output:last_output + 1, ptype] -= len

                    for i, output in enumerate(outputs):
                        if (free[i] == 0).all() and (
                                writing[i]
                                == 0).all() and outputs[i] is not None:
                            print output[0, 'mass']
                            output.save_all()
                            outputs[i] = None
                            output = None
                            print 'saving ', i
            return

        pool.map(work, fids)
예제 #6
0
import numpy
import os.path
import sharedmem
idsfile = argv[1]
sfgasdir = argv[2]

first = Snapshot(idsfile % 0,
                 'cmugadget.GroupIDs',
                 idtype='u8',
                 floattype='f4')

sfgasid = numpy.fromfile(sfgasdir + '/sfgasid', dtype='u8')
sfgassfr = numpy.fromfile(sfgasdir + '/sfgassfr', dtype='f4')
matchedfile = file(sfgasdir + '/matchedsfr', 'w')
matchedidfile = file(sfgasdir + '/matchedid', 'w')
with sharedmem.Pool(use_threads=False) as pool:

    def work(i):
        snap = Snapshot(idsfile % i,
                        'cmugadget.GroupIDs',
                        idtype='u8',
                        floattype='f4')

        id = snap[0, 'id']
        result = numpy.zeros(len(id), dtype='f4')
        lookedid = numpy.zeros(len(id), dtype='u8')
        ind = sfgasid.searchsorted(id)
        ind.clip(0, len(sfgasid) - 1, out=ind)

        found = sfgasid[ind] == id
        result[found] = sfgassfr[ind[found]]
예제 #7
0
    # auto_fit = True fits the model on assignment
    # verbose = 0 is silent
    # verbose = 1 is a single print
    # verbose = 2 is very verbose
    if fixed_hrf is not False:
        fit = og_nohrf.GaussianFit(model, vx, grids, bounds, Ns=Ns,
                                   voxel_index=(ii, 1, 1), auto_fit=True, verbose=2)
    else:
        fit = og.GaussianFit(model, vx, grids, bounds, Ns=Ns,
                             voxel_index=(ii, 1, 1), auto_fit=True, verbose=2)
    return (ii, vx) + tuple(fit.overloaded_estimate) + (fit.prediction,)
if mps == 1:
    voxs = [fit_voxel((ii, vx, js)) for (ii, vx, js) in zip(range(len(bold)), bold, stim_json)]
else:
    tups = list(zip(range(len(bold)), bold, stim_json))
    with sharedmem.Pool(np=mps) as pool:
        voxs = pool.map(fit_voxel, tups)
    voxs = list(sorted(voxs, key=lambda tup:tup[0]))
# Update the results to match the x0/y0, sigma style used by prfanalyze
all_fields = ('index','voxel') + fields + ('pred',)
res = {k:np.asarray([u[ii] for u in voxs]) for (ii,k) in enumerate(all_fields)}
rr = {}
rr['centerx0'] = np.cos(res['theta'])  * res['rho']
rr['centery0'] = -np.sin(res['theta']) * res['rho']
rr['sigmamajor'] = res['sigma']
rr['sigmaminor'] = res['sigma']
rr['beta'] = res['beta']
rr['baseline'] = res['baseline']
if fixed_hrf is False: rr['hrfdelay'] = res['hrfdelay']
# Export the files
for (k,v) in six.iteritems(rr):