Ejemplo n.º 1
0
def MeshSmooth(NC, neighbList, Iter):
    print "Two-stage Taubin Smoothing"
    N1 = NC.shape[0]
    NPP1 = N1 / LIM
    for i in range(Iter):
        print "	Iteration ", i + 1
        print "		Umbrella-Operator Step"
        Umb = -np.zeros((NC.shape))
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(MeshUmbrella))
        for j in range(0, LIM):
            calc(np.array(range(0, NPP1)) + j * NPP1, NC, neighbList, Umb)
        for j in range(0, LIM):
            Umb[np.array(range(0, NPP1)) + j * NPP1, :] = results[j]
        Umb[range(LIM * NPP1, N1), :] = MeshUmbrella(range(LIM * NPP1, N1), NC,
                                                     neighbList, Umb)
        print "		Smoothing Step"
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(MeshTaubin))
        for j in range(0, LIM):
            calc(np.array(range(0, NPP1)) + j * NPP1, NC, neighbList, Umb)
        for j in range(0, LIM):
            NC[np.array(range(0, NPP1)) + j * NPP1, :] = results[j]
        NC[range(LIM * NPP1, N1), :] = MeshTaubin(range(LIM * NPP1, N1), NC,
                                                  neighbList, Umb)
    return NC
Ejemplo n.º 2
0
def MeshSmooth(nodes, NC, neighbList, Iter, listPos=0):
    # If listPos ==0: use neighbourlist position same as nodal position in nodes. i.e len(neighbList)==nodes.size
    # If listPos <>0: use neighbList[ndnr] where nodes[i]=ndnr
    print "Two-stage Taubin Smoothing"
    print "ListPos: ", listPos
    N1 = nodes.size
    NPP1 = N1 / LIM
    for i in range(Iter):
        print "	Iteration ", i + 1
        print "		Umbrella-Operator Step"
        Umb = -np.zeros((NC.shape))
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(MeshUmbrella))
        for j in range(0, LIM):
            calc(
                np.array(range(0, NPP1)) + j * NPP1, nodes, NC, neighbList,
                Umb, listPos)
        for j in range(0, LIM):
            Umb[nodes[np.array(range(0, NPP1)) + j * NPP1], :] = results[j]
        Umb[nodes[range(LIM * NPP1, N1)], :] = MeshUmbrella(
            range(LIM * NPP1, N1), nodes, NC, neighbList, Umb, listPos)
        print "		Smoothing Step"
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(MeshTaubin))
        for j in range(0, LIM):
            calc(
                np.array(range(0, NPP1)) + j * NPP1, nodes, NC, neighbList,
                Umb, listPos)
        for j in range(0, LIM):
            NC[nodes[np.array(range(0, NPP1)) + j * NPP1], :] = results[j]
        NC[nodes[range(LIM * NPP1, N1)], :] = MeshTaubin(
            range(LIM * NPP1, N1), nodes, NC, neighbList, Umb, listPos)
    return NC
Ejemplo n.º 3
0
def elemQual_mu(elem, NC, Tet, disp=1, deltPresc=0):
    if disp == 1:
        print 'Determine element Quality [mu]'
    gamma = 0.0000001
    delta = 0
    T1 = elem.shape[0]
    TPP1 = T1 / LIM
    Sn2 = np.zeros((elem.shape[0], ))
    Sig = np.zeros((elem.shape[0], ))
    if elem.size > LIM:
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(Sn2Sig))
        for j in range(0, LIM):
            calc(elem[np.array(range(0, TPP1))] + j * TPP1, NC, Tet)
        for j in range(0, LIM):
            Sn2[j * TPP1:(1 + j) * TPP1], Sig[j * TPP1:(1 + j) *
                                              TPP1] = results[j]
    if np.array(range(LIM * TPP1, T1)).size > 0:
        Sn2[LIM * TPP1:T1], Sig[LIM * TPP1:T1] = Sn2Sig(
            elem[np.array(range(LIM * TPP1, T1))], NC, Tet)
    if np.min(Sig) < gamma:
        if disp == 1:
            print '	minnimum Sig = ', np.min(Sig)
        delta = np.sqrt(gamma * (gamma - np.min(Sig)))
    if delta < deltPresc:
        delta = deltPresc
    if disp == 1:
        print '	delta = ', delta
    h_sig = (Sig + np.sqrt(Sig * Sig + 4 * delta * delta)) / 2
    return 3 * np.power(h_sig, 2. / 3) / Sn2, delta, Sn2, Sig
Ejemplo n.º 4
0
def Neigenvalues(NC,Tri,layer,ls):
  KDTNC = KDTree(NC,ls)
  VNORM = vertexnormal(NC,Tri)
  Neig = np.zeros(NC.shape)
  N1 = Neig.size/3;
  NPP1 = N1/LIM
  results = pprocess.Map(limit=LIM)
  calc = results.manage(pprocess.MakeParallel(NodeEigVal))
  for j in range(0,LIM):
      calc(np.array(range(0,NPP1))+j*NPP1,NC,Tri,KDTNC,ls,VNORM,layer,Neig)
  for j in range(0,LIM):
      Neig[np.array(range(0,NPP1))+j*NPP1,] = results[j]
  Neig[range(LIM*NPP1,N1),]=NodeEigVal(range(LIM*NPP1,N1),NC,Tri,KDTNC,ls,VNORM,layer,Neig)
  #queue=pprocess.Queue(limit=LIM)
  #results = []
  #Nneig = queue.manage(pprocess.MakeParallel(NodeEigVal))
  #for i in range(0,N):
    #Nneig(i,NC,Tri,KDTNC,ls,VNORM,2)
  #for i,NE in queue:
    #Neig[i,] = NE
  rows=np.where(Neig[:,0]<Neig[:,1])[0]
  Neig[rows,:]=np.c_[Neig[rows,1],Neig[rows,0],Neig[rows,2]]
  rows=np.where(Neig[:,0]<Neig[:,2])[0]
  Neig[rows,:]=np.c_[Neig[rows,2],Neig[rows,1],Neig[rows,0]]
  rows=np.where(Neig[:,1]<Neig[:,2])[0]
  Neig[rows,:]=np.c_[Neig[rows,0],Neig[rows,2],Neig[rows,1]]
  return Neig
Ejemplo n.º 5
0
def filter_regions(multi_level_mask, thresholds_list, parameter_dict, one_label):
    one_threshold = thresholds_list[one_label]
    mask = (multi_level_mask > one_label)
    min_size = parameter_dict['small_vol_threshold']
    label_image, bounding_box_slices = lyBWareaopen(mask, min_size)

    region_result = [False]
    '''single thread'''
    # for label_number in range(1, np.max(label_image) + 1):
    #     single_result = region_task(label_number, label_image, bounding_box_slices, one_threshold, parameter_dict)
    #     region_result.append(single_result)

    '''multi thread'''
    results = pprocess.Map(limit=pprocess.get_number_of_cores())
    calc = results.manage(pprocess.MakeParallel(region_task))

    for label_number in range(1, np.max(label_image) + 1):
        calc(label_number, label_image, bounding_box_slices, one_threshold, parameter_dict)

    for i, result in enumerate(results):
        region_result.append(result)

    region_result = np.array(region_result, np.bool)
    tuild_result = np.logical_not(region_result)
    label_image[tuild_result[label_image]] = 0

    return (label_image > 0)
Ejemplo n.º 6
0
def main():
    lidc_dict = get_series_uid_path_dict(LIDCPath)
    list_of_args = range(ProcessorNum)
    group_num = len(csvLines) // ProcessorNum
    cutPoint = np.empty([ProcessorNum, 2], dtype=int)
    for row in range(ProcessorNum):
        # start point
        cutPoint[row, 0] = row * group_num
        if row == ProcessorNum - 1:
            # stop point
            cutPoint[row, 1] = len(csvLines)
        else:
            # stop point
            cutPoint[row, 1] = row * group_num + group_num - 1 + 1

    # starting parallel reading
    st = time.time()
    results = pprocess.Map()
    parallel_function = results.manage(pprocess.MakeParallel(write_csv))
    for args in list_of_args:
        parallel_function(args, cutPoint[args, 0], cutPoint[args, 1], lidc_dict)
    print('\nStarting Parallel time {:.2f} seconds...'.format(time.time() - st))

    st = time.time()
    results[:]
    # parallel_results = results[:]
    print('\nParallel costs {:.2f} seconds...'.format(time.time() - st))
Ejemplo n.º 7
0
def ensemble_predictions(members, testX, nproc):
	if nproc>6:
		nproc=6

	def para_predict(modname):
		from keras.models import load_model
		model = load_model(modname)
		prediction = model.predict(testX)
		return prediction

	queue = pprocess.Queue(limit=nproc)
	calc = queue.manage(pprocess.MakeParallel(para_predict))

	yhats = []
	for i in members:
		calc(i)
		#yhats.append(para_predict(i,testX))

	for preds in queue:
		yhats.append(preds)

	# make predictions
	#yhats = [model.predict(testX) for model in members]
	yhats = numpy.array(yhats)
	# weighted sum across ensemble members
	#summed = numpy.tensordot(yhats, weights, axes=((0),(0)))
        summed = numpy.sum(yhats, axis=0)
	# argmax across classes
	result = numpy.argmax(summed, axis=1)
	return result
Ejemplo n.º 8
0
def TetCellToVertex(NC, Connect, ScalarValue):
    N1 = NC.shape[0]
    NPP1 = N1 / LIM
    ConnectNC = np.c_[
        np.sum(
            np.c_[NC[Connect[:, 0], 0], NC[Connect[:, 1], 0],
                  NC[Connect[:, 2], 0], NC[Connect[:, 3], 0]], 1) / 4,
        np.sum(
            np.c_[NC[Connect[:, 0], 1], NC[Connect[:, 1], 1],
                  NC[Connect[:, 2], 1], NC[Connect[:, 3], 1]], 1) / 4,
        np.sum(
            np.c_[NC[Connect[:, 0], 2], NC[Connect[:, 1], 2],
                  NC[Connect[:, 2], 2], NC[Connect[:, 3], 2]], 1) / 4]
    Scalars = np.zeros(NC.shape[0])
    results = pprocess.Map(limit=LIM)
    calc = results.manage(pprocess.MakeParallel(weightSCVAL))
    for j in range(LIM):
        calc(
            np.array(range(0, NPP1)) + j * NPP1, NC, Connect, ConnectNC,
            ScalarValue)
    for j in range(LIM):
        Scalars[np.array(range(0, NPP1)) + j * NPP1] = results[j]
    #for j in range(LIM):
    if N1 > LIM * NPP1:
        Scalars[range(LIM * NPP1,
                      N1)] = weightSCVAL(np.array(range(LIM * NPP1, N1)), NC,
                                         Connect, ConnectNC, ScalarValue)
    return Scalars
Ejemplo n.º 9
0
def cast_unsuitable_regions_by_label_MT(multi_level_mask, thresholds_list, parameter_dict):
    '''cast small region as noise and big region as vessel'''
    nodule_mask = np.zeros_like(multi_level_mask, np.int8)
    multi_image_labels = range(int(np.max(multi_level_mask)))
    multi_image_labels.reverse()
    loop_times = len(multi_image_labels)
    # TODO map style parallezision
    shared_array_s = mp.Array(ctypes.c_int8, loop_times * np.size(nodule_mask))
    shared_array = np.frombuffer(shared_array_s.get_obj(), dtype=np.int8).reshape((loop_times,) + nodule_mask.shape)

    num_of_proc = pprocess.get_number_of_cores()
    results = pprocess.Map(limit=num_of_proc / 2)
    para_func = results.manage(pprocess.MakeParallel(put_result_into_shared_memory))

    for i in range(loop_times):
        one_label = multi_image_labels[i]
        para_func(shared_array, multi_level_mask, thresholds_list, parameter_dict, one_label)
    results.finish()

    for num_of_loop in range(shared_array.shape[0]):
        nodule_mask = np.logical_or(nodule_mask, shared_array[num_of_loop, ...])

    datastate = shared_array_s.get_obj()._wrapper._state
    arenaobj = datastate[0][0]
    arenaobj.buffer.close()
    mp.heap.BufferWrapper._heap = mp.heap.Heap()

    return nodule_mask
Ejemplo n.º 10
0
    def _sl_call(self, dataset, roi_ids, nproc):
        """Classical generic searchlight implementation
        """
        assert (self.results_backend in ('native', 'hdf5'))
        # compute
        if nproc is not None and nproc > 1:
            # split all target ROIs centers into `nproc` equally sized blocks
            nproc_needed = min(len(roi_ids), nproc)
            nblocks = nproc_needed \
                      if self.nblocks is None else self.nblocks
            roi_blocks = np.array_split(roi_ids, nblocks)

            # the next block sets up the infrastructure for parallel computing
            # this can easily be changed into a ParallelPython loop, if we
            # decide to have a PP job server in PyMVPA
            import pprocess
            p_results = pprocess.Map(limit=nproc_needed)
            if __debug__:
                debug(
                    'SLC', "Starting off %s child processes for nblocks=%i" %
                    (nproc_needed, nblocks))
            compute = p_results.manage(pprocess.MakeParallel(self._proc_block))
            for iblock, block in enumerate(roi_blocks):
                # should we maybe deepcopy the measure to have a unique and
                # independent one per process?
                seed = mvpa2.get_random_seed()
                compute(block,
                        dataset,
                        copy.copy(self.__datameasure),
                        seed=seed,
                        iblock=iblock)
        else:
            # otherwise collect the results in an 1-item list
            p_results = [
                self._proc_block(roi_ids, dataset, self.__datameasure)
            ]

        # Finally collect and possibly process results
        # p_results here is either a generator from pprocess.Map or a list.
        # In case of a generator it allows to process results as they become
        # available
        result_ds = self.results_fx(
            sl=self,
            dataset=dataset,
            roi_ids=roi_ids,
            results=self.__handle_all_results(p_results))

        # Assure having a dataset (for paranoid ones)
        if not is_datasetlike(result_ds):
            try:
                result_a = np.atleast_1d(result_ds)
            except ValueError, e:
                if 'setting an array element with a sequence' in str(e):
                    # try forcing object array.  Happens with
                    # test_custom_results_fx_logic on numpy 1.4.1 on Debian
                    # squeeze
                    result_a = np.array(result_ds, dtype=object)
                else:
                    raise
            result_ds = Dataset(result_a)
Ejemplo n.º 11
0
def pp_wavelength_loop(cube,
                       head,
                       wavels,
                       out_cube,
                       AO,
                       psfvars,
                       adrvals,
                       newsize,
                       outspax,
                       adr_switch='ON',
                       usecpus=mp.cpu_count() - 1):
    '''Function to take input datacube and process it iteratively through
    each wavelength channel as follows:
    - Generate PSF array for given channel
    - Add effect of ADR to channel
    - Convolve cube channel with PSF
    - Frebin up to chosen output spaxel scale

    Inputs:

        cube: Datacube
        head: Datacube header
        wavels: Wavelength array
        out_cube: Empty output cube
        AO: AO mode [LTAO, SCAO, Gaussian]
        psdvars: list containing [psfparams, psfspax, psfsize,
                                  [D,eps], res_jitter, seeing]
        adrvals: array of ADR values
        newsize: tuple containing (x_newsize, y_newsize) array sizes
        outspax: tuple containing (x, y) output spaxel scales
        adr_switch: On or OFF. Turns ADR effect on or off
        usecpus: no. of CPUs to use

    Output:

        cube: Processed cube
        head: Updated header
        inspax: Input spaxel scale (mas, mas)
        outspax: Output spaxel scale (mas, mas)

    '''
    import pprocess as pp

    print 'Using ', usecpus, ' CPUs'
    queue = pp.Queue(limit=usecpus)
    waveloop = queue.manage(pp.MakeParallel(pp_wavelength_channel))

    print "Initialising..."
    for lam in xrange(len(wavels)):
        waveloop(cube[lam, :, :], head, wavels[lam], lam, AO, psfvars,
                 adrvals[lam], newsize, outspax, adr_switch)

    print "Processing..."
    for chan, it in queue:
        update_progress(n.round(it / float(len(wavels)), 2))
        out_cube[it, :, :] = chan

    return out_cube, head
Ejemplo n.º 12
0
def pp_wavelength_loop(cube,
                       head,
                       wavels,
                       out_cube,
                       newsize,
                       outspax,
                       usecpus=mp.cpu_count() - 1):
    '''Function to take input datacube and process it iteratively through
    each wavelength channel as follows:
    - Generate PSF array for given channel
    - Add effect of ADR to channel
    - Convolve cube channel with PSF
    - Frebin up to chosen output spaxel scale

    Inputs:

        cube: Datacube
        head: Datacube header
        wavels: Wavelength array
        out_cube: Empty output cube
        newsize: tuple containing (x_newsize, y_newsize) array sizes
        outspax: tuple containing (x, y) output spaxel scales
        usecpus: no. of CPUs to use

    Output:

        cube: Processed cube
        head: Updated header
        inspax: Input spaxel scale (mas, mas)
        outspax: Output spaxel scale (mas, mas)

    '''
    import pprocess as pp

    print 'Using ', usecpus, ' CPUs'
    queue = pp.Queue(limit=usecpus)
    waveloop = queue.manage(pp.MakeParallel(pp_wavelength_channel))

    print "Initialising..."
    for lam in xrange(len(wavels)):
        waveloop(cube[lam, :, :], head, wavels[lam], lam, newsize, outspax)

    print "Processing..."
    for chan, it in queue:
        update_progress(n.round(it / float(len(wavels)), 2))
        out_cube[it, :, :] = chan

    return out_cube, head
Ejemplo n.º 13
0
def TetListScalars(NodeTetList, ScalarVal, AoM=0):
    # if AoM ==0, use average, if AoM<>0, use the minimum associated with that point
    N1 = len(NodeTetList)
    NPP1 = N1 / LIM
    NodeScal = np.zeros((N1, ))
    results = pprocess.Map(limit=LIM)
    calc = results.manage(pprocess.MakeParallel(TetListScalarsSec))
    for j in range(LIM):
        calc(np.array(range(0, NPP1)) + j * NPP1, NodeTetList, ScalarVal, AoM)
    for j in range(LIM):
        NodeScal[np.array(range(0, NPP1)) + j * NPP1] = results[j]
    #for j in range(LIM):
    if N1 > LIM * NPP1:
        NodeScal[range(LIM * NPP1, N1)] = TetListScalarsSec(
            np.array(range(LIM * NPP1, N1)), NodeTetList, ScalarVal, AoM)
    return NodeScal
Ejemplo n.º 14
0
    def __call__(self, callable, sequence, *args, **kw):

        "Wrap and invoke 'callable' for each element in the 'sequence'."

        if not isinstance(callable, pprocess.MakeParallel):
            wrapped = pprocess.MakeParallel(callable)
        else:
            wrapped = callable

        self.init()

        # Start processes for each element in the sequence.

        for i in sequence:
            self.start(wrapped, i, *args, **kw)

        # Access to the results occurs through this object.

        self.finish()
        return self
Ejemplo n.º 15
0
def ShapeHist(NC, Tri, FeatPoints, radius, thetaB=12, phiB=12, rhoB=6):
    # Construct Shape Context histogram for Feature points
    N1 = FeatPoints.size
    NPP1 = N1 / LIM
    print 'Get Triangle and Vertex normals'
    VNORM = vertexnormal(NC, Tri)
    KDTnc = KDTree(NC, 5)
    print 'Set up Polar Histogram for given points'
    PolarHist = np.zeros((N1, 12, 12, 6))
    results = pprocess.Map(limit=LIM)
    calc = results.manage(pprocess.MakeParallel(HistP))
    for j in range(0, LIM):
        calc(
            np.array(range(0, NPP1)) + j * NPP1, FeatPoints, NC, VNORM, KDTnc,
            radius, wd, thetaB, phiB, rhoB)
    for j in range(0, LIM):
        PolarHist[np.array(range(0, NPP1)) + j * NPP1, ] = results[j]
    PolarHist[np.array(range(LIM * NPP1, N1)), ] = HistP(
        np.array(range(LIM * NPP1, N1)), FeatPoints, NC, VNORM, KDTnc, radius,
        wd, thetaB, phiB, rhoB)
    return PolarHist
Ejemplo n.º 16
0
def LaplacMesh(nodes, NC, neighbList, Iter, listPos=0):
    # If listPos ==0: use neighbourlist position same as nodal position in nodes. i.e len(neighbList)==nodes.size
    # If listPos <>0: use neighbList[ndnr] where nodes[i]=ndnr
    print "Laplacian Smooth"
    N1 = nodes.size
    NPP1 = N1 / LIM
    for i in range(Iter):
        print "	Iteration ", i + 1
        Umb = np.zeros((NC.shape))
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(MeshUmbrella))
        for j in range(0, LIM):
            calc(
                np.array(range(0, NPP1)) + j * NPP1, nodes, NC, neighbList,
                Umb, listPos)
        for j in range(0, LIM):
            Umb[nodes[np.array(range(0, NPP1)) + j * NPP1], :] = results[j]
        Umb[nodes[range(LIM * NPP1, N1)], :] = MeshUmbrella(
            range(LIM * NPP1, N1), nodes, NC, neighbList, Umb, listPos)
        NC = NC + Umb
    return NC
Ejemplo n.º 17
0
def Get3DCube_multiprocess(csvpath, augpath, datapath):
    PPNum = 6
    augLines = ReadCSV(augpath)[1:]
    csvLines = ReadCSV(csvpath)[1:]
    LIDCPath = '/home/yanghan/data/LIDC-IDRI'
    KAGGLEPath = '/home/yanghan/data/stage1'
    lidc_dict = GetAbsolutePath(LIDCPath, KAGGLEPath, csvLines)
    list_pp = range(PPNum)
    group_num = len(csvLines) // PPNum
    cutPoint = np.empty([PPNum, 2], dtype=int)
    for row in range(PPNum):
        # start point
        cutPoint[row, 0] = row * group_num
        if row == PPNum - 1:
            # stop point
            cutPoint[row, 1] = len(csvLines)
        else:
            # stop point
            cutPoint[row, 1] = row * group_num + group_num - 1 + 1

    # starting parallel reading
    st = time.time()
    results = pprocess.Map()
    parallel_function = results.manage(
        pprocess.MakeParallel(
            write_csv(0, 0, len(csvLines), lidc_dict, csvLines, augLines,
                      datapath, 1)))
    for args in list_pp:
        parallel_function(args, cutPoint[args, 0], cutPoint[args, 1],
                          lidc_dict)
    print('\nStarting Parallel time {:.2f} seconds...'.format(time.time() -
                                                              st))

    st = time.time()
    results[:]
    # parallel_results = results[:]
    print('\nParallel costs {:.2f} seconds...'.format(time.time() - st))
Ejemplo n.º 18
0
def FeatPoints(Kmax, Kmin, NC, Tri, radius, alpha=0.5, beta=0.5):
    # Get feature points from computed maximum and minnimum feature curvature
    print 'Get Triangle and Vertex normals'
    VNORM = vertexnormal(NC, Tri)
    print 'Use Shape Index for Feature Point extraction'
    SI = 0.5 - np.arctan((Kmax + Kmin) / (Kmax - Kmin)) / np.pi
    KDTnc = KDTree(NC, 5)
    N1 = NC.shape[0]
    NPP1 = N1 / LIM
    FeatPoints = np.array([np.nan] * NC.shape[0])
    results = pprocess.Map(limit=LIM)
    calc = results.manage(pprocess.MakeParallel(IsFeat))
    for j in range(0, LIM):
        calc(
            np.array(range(0, NPP1)) + j * NPP1, NC, VNORM, KDTnc, radius, SI,
            alpha, beta, FeatPoints)
    for j in range(0, LIM):
        FeatPoints[np.array(range(0, NPP1)) + j * NPP1] = results[j]
    FeatPoints[np.array(range(LIM * NPP1,
                              N1))] = IsFeat(np.array(range(LIM * NPP1, N1)),
                                             NC, VNORM, KDTnc, radius, SI,
                                             alpha, beta, FeatPoints)
    FeatPoints = FeatPoints[FeatPoints >= 0]
    return np.array(FeatPoints, int)
Ejemplo n.º 19
0
err_list = []
for x in freqs:
    flux_list.append(data["int_flux_"+x][brightsrcs])
# Error propagation: error on log(x) = err_x/x
    fitting_error = data["err_int_flux_"+x][brightsrcs]/data["int_flux_"+x][brightsrcs]
    err_list.append(np.sqrt(fitting_error**2 + calibration_error**2))
    
flux_array = np.transpose(np.ma.vstack(flux_list)).astype("float32")
flux_array = np.ma.log(flux_array)
flux_errors = np.transpose(np.ma.vstack(err_list)).astype("float32")
#names = data["Name"][brightsrcs]

#weights = 1/(flux_errors*flux_errors)

results = pprocess.Map(limit=cores)
calc = results.manage(pprocess.MakeParallel(fit_spectrum))

for i in range(0,len(brightsrcs)):
    calc(freq_array,flux_array[i],flux_errors[i]) # ,options.plot)

# Unpack results
alpha, err_alpha, amp, err_amp, chi2red = map(list, zip(*results))

# Convert to numpy arrays
alpha = np.array(alpha, dtype="float32")
err_alpha = np.array(err_alpha, dtype="float32")
amp = np.array(amp, dtype="float32")
err_amp = np.array(err_amp, dtype="float32")
chi2red = np.array(chi2red, dtype="float32")

# Exclude any sources which came out with NaN alphas or amps
Ejemplo n.º 20
0
def OptINT(inner, outer, NC, Tet, neighbListTet, iterations, boundPenalty=10):
    N1 = NC.shape[0]
    NPP1 = N1 / LIM
    NCcur = np.r_[NC]
    T1 = Tet.shape[0]
    TPP1 = T1 / LIM
    print 'Submesh Optimization on ', inner.size, ' of ', NC.shape[0], ' nodes'
    for i in range(iterations):
        print '	Iteration ', i + 1
        NCprev = np.r_[NCcur]
        EQ, delt, Sn2, Sig = elemQual_mu(np.array(range(Tet.shape[0])), NCprev,
                                         Tet)
        np.ma.dump(EQ, 'ElQual' + str(i + 1))
        #EQ = np.ma.load('ElQual1')
        print '			Max ', np.max(EQ), '; Min ', np.min(
            EQ), '; Average ', np.average(EQ)
        print '			number of degenerate elements : ', np.where(Sig < 0)[0].size
        print '		build optimization matrix G'
        global Gmat
        Gmat = ps.spmatrix.ll_mat(NC.shape[0] + outer.size, NC.shape[0])
        Gmat.put(1, np.array(range(NC.shape[0])), np.array(range(NC.shape[0])))
        rows = np.array(range(outer.size)) + NC.shape[0]
        Gmat.put(1, rows, outer)
        print '			number of initial non-zeros: ', Gmat.nnz
        EQinv = 1 / EQ
        GMparts = [[]] * LIM
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(US_Gmat))
        for j in range(LIM):
            calc(
                np.array(range(0, NPP1)) + j * NPP1, NCprev, Tet,
                neighbListTet, EQinv, Sig, j)
        for j in range(LIM):
            vals, rows, cols = results[j]
            #for j in range(LIM):
            Gmat.put(vals, rows, cols)
        if N1 > LIM * NPP1:
            vals, rows, cols = US_Gmat(np.array(range(LIM * NPP1, N1)), NCprev,
                                       Tet, neighbListTet, EQinv, Sig, 1)
            Gmat.put(vals, rows, cols)
        print '			final number of non-zeros: ', Gmat.nnz
        Gmat.export_mtx('Gmat' + str(i + 1))
        print '		do optimization using conjugate gradient'
        GTG = ps.spmatrix.dot(Gmat, Gmat)
        gVec = ps.spmatrix.ll_mat(NC.shape[0] + outer.size, 3)
        for j in outer:
            rows = np.array([j, j, j]) + NC.shape[0]
            cols = np.array([0, 1, 2])
            vals = NC[j, ]
            gVec.put(vals, rows, cols)
        GTg = ps.spmatrix.dot(Gmat, gVec)
        GTgNUMPY = np.zeros(NC.shape)
        GTgNUMPY[GTg.keys()] = GTg.values()
        mu = GTg.norm('inf')
        mu = mu * mu
        if mu < 1:
            mu = 1
        GTG.update_add_at(mu * np.ones((NC.shape[0], )),
                          np.array(range(NC.shape[0])),
                          np.array(range(NC.shape[0])))
        RHS = GTgNUMPY + mu * NCprev
        X, Y, Z = np.zeros((NC.shape[0], )), np.zeros(
            (NC.shape[0], )), np.zeros((NC.shape[0], ))
        print 'X'
        infx, itex, resx = ps.itsolvers.cgs(GTG, RHS[:, 0], X, 0.00000000001,
                                            100)
        print 'Y'
        infy, itey, resy = ps.itsolvers.cgs(GTG, RHS[:, 1], Y, 0.00000000001,
                                            100)
        print 'Z'
        infz, itez, resz = ps.itsolvers.cgs(GTG, RHS[:, 2], Z, 0.00000000001,
                                            100)
        NCcur = np.c_[X, Y, Z]
        np.ma.dump(NCcur, 'NCopt_temp' + str(i + 1))
    return NCcur
Ejemplo n.º 21
0
def elasticsurf(NCB,
                ConnectB,
                LandmB,
                LandmB_NC,
                AllowableBI,
                NCT,
                ConnectT,
                AllowableT,
                UseN_B,
                UseN_T,
                k_max,
                USENORMALS,
                gamm=2,
                sigm0=10,
                f=1.0715):
    # Elastic surface registration:
    #inputs:
    # NCB,NCT: nodal coordinates of base and target surfaces
    # ConnectB;ConnectT: Base&target connectivity
    # LandmB,LandmB_NC landmarks that have to have a 1 to 1 correspondence (input 0 no landmarks are present)
    # UseN_B & AllowableB: Feature dependant nodes on Base-mesh (indices in NCB) and allowable triangles to match.
    # UseN_T & AllowableT: Selective Feature preserving nodes and triangles (indices in NCT and ConnectT) on target mesh.
    # k_max: maximum number of iterations
    ######## ADDITIONAL SETTINGS REQUIRED ARE SET INTERNAL TO CODE #########
    print
    print "SELECTIVE MESH MORPHING ALGORITHM USING ELASTIC SURFACE REGISTRATION"
    print "	-G.J.J.v.Rensburg - 22/04/2010-"
    t_start = time.clock()
    ConnectB = np.array(ConnectB, int)
    ConnectT = np.array(ConnectT, int)
    #LandmB = np.array(LandmB[:,0],int)		# do -1 later to be consistent with python indexing, first need to do other "temporary landmarks"& check that they dont fall on actual landmark positions!

    # Settings for elastic surface registration:
    m = 20  # nearest neighbour parameter
    alph = 0.5  # normilization factor
    #gamm=2 # smoothing parameter1
    #sigm0=10 # smoothing parameter2
    #f=1.0715 # smoothing parameter3
    Tol = 0.0001  # stopping criteria

    # determine N1,N2,T1 and T2:
    N1 = NCB.shape[0]
    N2 = NCT.shape[0]
    T1 = ConnectB.shape[0]
    T2 = ConnectT.shape[0]
    NL = LandmB.shape[0]
    # For parallel programming devide Nr of computations by number of parallel processes (LIM)
    NPP1 = N1 / LIM
    NPP2 = N2 / LIM

    ################################     INITIALIZE & NODES OF CONCERN:    #################################
    ########################################################################################################
    print
    print
    print "Set up 1-ring neighbor list for all points on the generic mesh"
    #neighbList = [[0]]*N1
    #results = pprocess.Map(limit=LIM)
    #calc = results.manage(pprocess.MakeParallel(Get1neigh))
    #for j in range(0,LIM):
    #calc(np.array(range(0,NPP1))+j*NPP1,NCB,ConnectB)
    #for j in range(0,LIM):
    #neighbList[j*NPP1:(1+j)*NPP1] = results[j]
    #neighbList[LIM*NPP1:N1]=Get1neigh(np.array(range(LIM*NPP1,N1)),NCB,ConnectB)
    #np.ma.dump(neighbList,'SkullSurf_neighbList')
    neighbList = np.ma.load('SkullSurf_neighbList')

    print
    print "INITIALIZE SURFACE DEFORMATION"
    CONV = []
    print " 	enquire nodes where required displacement is checked"
    ###remove Landmarks from FDNB and SFPNT:
    #for i in range(0,NL):
    #if find_repeats(np.r_[UseN_B,LandmB[i,]])[0].size>0:
    #r=np.where(UseN_B==LandmB[i,])[0]
    #UseN_B = np.r_[UseN_B[0:r,],UseN_B[r+1:UseN_B.size,]]
    SamplingB = UseN_B.size
    SamplingT = UseN_T.size
    ## Full list of nodes used in Surface registration:
    LMB = np.r_[
        UseN_B]  #,LandmB]	# Last NL entries are reserved for Landmarks that HAVE TO FIT points on the target mesh
    LMT = np.r_[UseN_T]

    # For parallel programming devide Nr of computations by number of parallel processes (LIM)
    SBPP = SamplingB / LIM
    STPP = SamplingT / LIM
    FMorph = 0

    print
    print "COARSE SURFACE REGISTRATION"
    #print "	Compute known displacement for Base_Landmarks "
    #knownC = NCB[LandmB,]
    #knownD = LandmB_NC-knownC
    ####print "	using landmark displacements to deform using RBF"
    ####W_km1 = RBFmorph(NCB,knownC,knownD)
    ####tic = time.clock()
    ####W_km1 = MeshSmooth(W_km1,neighbList,10)
    ####print "		Smoothing done in ",time.clock()-tic," seconds"
    ####np.ma.dump(W_km1,'TempElasNodes_Iter'+str(k-1)+'_Time'+time.ctime())
    #print 'Smooth Gaussian Weight deformation to align Landmarks to target positions'
    #k=0
    #Err = 2
    #W_km1 = np.r_[NCB]
    #while (k<100)|(Err>Tol):
    #k=k+1
    #print 'Iteration : ',k
    #DS = np.zeros((N1,3))
    #knownC = W_km1[LandmB,]
    #knownD = LandmB_NC-knownC
    #knownD[np.isnan(knownD)]=0
    ## Deform mesh using Gaussian smoothing as suggested in paper by R.Bryan et al.
    #sigma_k2 = np.power(np.power(f,-k)*20,2)
    #results = pprocess.Map(limit=LIM)
    #calc = results.manage(pprocess.MakeParallel(GaussianSmooth))
    #for j in range(0,LIM):
    #calc(np.array(range(0,NPP1))+j*NPP1,W_km1,knownC,knownD,sigma_k2,gamm)
    #for j in range(0,LIM):
    #DS[np.array(range(0,NPP1))+j*NPP1,:] = results[j]
    #DS[range(LIM*NPP1,N1),:]=GaussianSmooth(np.array(range(LIM*NPP1,N1)),W_km1,knownC,knownD,sigma_k2,gamm)
    #DS[np.isnan(DS)]=0
    #W_km1 = W_km1+DS
    #Err = np.sum(np.sqrt(np.sum(DS*DS,1)),0)/N1
    #W_km1 = MeshSmooth(W_km1,neighbList,10)
    #np.ma.dump(W_km1,'TempElasNodes_Iter0_TimeWedMar14_2011_20')
    ###np.ma.dump(W_km1,'TempElasNodes_Iter0_Time'+time.ctime())
    W_km1 = NCB

    ################################    MAIN MESH DEFORMATION ALGORITHM:   #################################
    ########################################################################################################
    k = 1
    print
    print "ELASTIC SURFACE REGISTRATION"
    print "determine vertex normals of target surface"
    #Compute target-mesh triangle centroids:
    print "determining centroids of target surface triangles"
    S_2_centr = np.c_[np.sum(
        np.c_[NCT[ConnectT[:, 0], 0], NCT[ConnectT[:, 1], 0],
              NCT[ConnectT[:, 2], 0]], 1) / 3,
                      np.sum(
                          np.c_[NCT[ConnectT[:, 0], 1], NCT[ConnectT[:, 1], 1],
                                NCT[ConnectT[:, 2], 1]], 1) / 3,
                      np.sum(
                          np.c_[NCT[ConnectT[:, 0], 2], NCT[ConnectT[:, 1], 2],
                                NCT[ConnectT[:, 2], 2]], 1) / 3]
    print "determine triangle and vertex normals of target surface"
    TNORMT = np.cross(NCT[ConnectT[:, 1], :] - NCT[ConnectT[:, 0], :],
                      NCT[ConnectT[:, 2], :] - NCT[ConnectT[:, 0], :])
    TNORMT = (TNORMT.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([TNORMT * TNORMT]), 2)))).T
    VNORMT = vrtxnormal(NCT, ConnectT, S_2_centr, TNORMT)

    print "determining kd-trees of target surface centroids and nodal coordinates"
    KDT_TC = KDTree(S_2_centr, m)
    KDT_TN = KDTree(NCT, m)

    print 'initialize absolute Gaussian weight for final displacement to preserve element quality'
    GW = np.ones((SamplingB + SamplingT, 1))

    while k <= k_max:
        D1 = np.zeros((SamplingB, 3))
        D2 = np.zeros((SamplingT, 3))
        DS = np.zeros((N1, 3))
        AllowableB = np.r_[AllowableBI]
        print
        print "MESH DEFORMATION ITERATION", k
        print "	determining known displacement of landmarks"
        if NL > 0:
            knownD = LandmB_NC - W_km1[LandmB, ]
        print "	determining centroids of deforming mesh"
        W_km1_centr = np.c_[
            np.sum(
                np.c_[W_km1[ConnectB[:, 0], 0], W_km1[ConnectB[:, 1], 0],
                      W_km1[ConnectB[:, 2], 0]], 1) / 3,
            np.sum(
                np.c_[W_km1[ConnectB[:, 0], 1], W_km1[ConnectB[:, 1], 1],
                      W_km1[ConnectB[:, 2], 1]], 1) / 3,
            np.sum(
                np.c_[W_km1[ConnectB[:, 0], 2], W_km1[ConnectB[:, 1], 2],
                      W_km1[ConnectB[:, 2], 2]], 1) / 3]
        print "	determine triangle and vertex normals of deforming surface"
        TNORMB = np.cross(W_km1[ConnectB[:, 1], :] - W_km1[ConnectB[:, 0], :],
                          W_km1[ConnectB[:, 2], :] - W_km1[ConnectB[:, 0], :])
        TNORMB = (TNORMB.T / (np.ones(
            (3, 1)) * np.sqrt(np.sum(np.array([TNORMB * TNORMB]), 2)))).T
        VNORMB = vrtxnormal(W_km1, ConnectB, W_km1_centr, TNORMB)

        print "	determining kd-tree of current deforming surface centroids and nodal coordinates"
        KDT_KC = KDTree(W_km1_centr, m)
        KDT_KN = KDTree(W_km1, m)
        #if find_repeats(np.r_[USENORMALS,k])[0].size>0:
        #print " ###	Use triangle and vertex normals in setting up point correspondence"
        print "		setting up D1(i,d)"
        tic = time.clock()
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(DsetupNorm))
        for j in range(0, LIM):
            calc(
                np.array(range(0, SBPP)) + j * SBPP, W_km1, VNORMB, NCT,
                TNORMT, VNORMT, ConnectT, S_2_centr, AllowableT, LMB, D1)
        for j in range(0, LIM):
            D1[np.array(range(0, SBPP)) + j * SBPP, :] = results[j]
        D1[range(LIM * SBPP, SamplingB), :] = DsetupNorm(
            range(LIM * SBPP, SamplingB), W_km1, VNORMB, NCT, TNORMT, VNORMT,
            ConnectT, S_2_centr, AllowableT, LMB, D1)
        #D1=np.r_[D1,knownD]
        print "			", time.clock() - tic, " seconds"
        print "		update allowable triangles on generic mesh:"
        remP = D1[:, 0] + D1[:, 1] + D1[:, 2] == 0
        removeP = LMB[remP]
        print "			unregistered points on generic mesh: ", removeP.size
        print "			number of original generic triangles allowed: ", AllowableB.shape[
            0]
        for rp in removeP:
            rowsNo = np.where(AllowableB == rp)[0]
            rowsNo.sort
            for rr in rowsNo[::-1]:
                AllowableB = AllowableB[np.where(
                    range(AllowableB.shape[0]) <> rr)[0], ]
        print "			number of generic triangles allowed for current iteration: ", AllowableB.shape[
            0]
        if find_repeats(np.r_[USENORMALS, k])[0].size > 0:
            print " ###	Use triangle and vertex normals in setting up point correspondence"
            print "		setting up D2(j,c)"
            tic = time.clock()
            results = pprocess.Map(limit=LIM)
            calc = results.manage(pprocess.MakeParallel(DsetupNorm))
            for j in range(0, LIM):
                calc(
                    np.array(range(0, STPP)) + j * STPP, NCT, VNORMT, W_km1,
                    TNORMB, VNORMB, ConnectB, W_km1_centr, AllowableB, LMT, D2)
            for j in range(0, LIM):
                D2[np.array(range(0, STPP)) + j * STPP, :] = results[j]
            D2[range(LIM * STPP, SamplingT), :] = DsetupNorm(
                range(LIM * STPP, SamplingT), NCT, VNORMT, W_km1, TNORMB,
                VNORMB, ConnectB, W_km1_centr, AllowableB, LMT, D2)
            print "			", time.clock() - tic, " seconds"
        else:
            print "	Simple closest point search iteration "
            #print "		setting up D1(i,d)"
            #tic = time.clock()
            #results = pprocess.Map(limit=LIM)
            #calc = results.manage(pprocess.MakeParallel(Dsetup))
            #for j in range(0,LIM):
            #calc(np.array(range(0,SBPP))+j*SBPP,W_km1,NCT,ConnectT,S_2_centr,AllowableT,LMB,D1,KDT_TC,KDT_TN)
            #for j in range(0,LIM):
            #D1[np.array(range(0,SBPP))+j*SBPP,:] = results[j]
            #D1[range(LIM*SBPP,SamplingB),:]=Dsetup(range(LIM*SBPP,SamplingB),W_km1,NCT,ConnectT,S_2_centr,AllowableT,LMB,D1,KDT_TC,KDT_TN)
            ##D1=np.r_[D1,knownD]
            #print "			",time.clock()-tic," seconds"
            #remP = D1[:,0]+D1[:,1]+D1[:,2]==0
            #removeP = LMB[remP]
            #print "			unregistered points on generic mesh: ",removeP.size
            #print "			number of original generic triangles allowed: ",AllowableB.shape[0]
            #for rp in removeP:
            #rowsNo = np.where(AllowableB==rp)[0]
            #rowsNo.sort
            #for rr in rowsNo[::-1]:
            #AllowableB = AllowableB[np.where(range(AllowableB.shape[0])<>rr)[0],]
            #print "			number of generic triangles allowed for current iteration: ",AllowableB.shape[0]
            print "		setting up D2(j,c)"
            tic = time.clock()
            results = pprocess.Map(limit=LIM)
            calc = results.manage(pprocess.MakeParallel(Dsetup))
            for j in range(0, LIM):
                calc(
                    np.array(range(0, STPP)) + j * STPP, NCT, W_km1, ConnectB,
                    W_km1_centr, AllowableB, LMT, D2, KDT_KC, KDT_KN)
            for j in range(0, LIM):
                D2[np.array(range(0, STPP)) + j * STPP, :] = results[j]
            D2[range(LIM * STPP, SamplingT), :] = Dsetup(
                range(LIM * STPP, SamplingT), NCT, W_km1, ConnectB,
                W_km1_centr, AllowableB, LMT, D2, KDT_KC, KDT_KN)
            print "			", time.clock() - tic, " seconds"

        # Compute displacement update for each node using suggested Gaussian radial basis function:
        print "	determining smoothed displacement field"

        tic = time.clock()
        NCp = np.r_[W_km1[LMB, :], NCT[LMT, :] + D2]
        DD = np.r_[D1, -D2]
        # Mask Nan and Inf values if any:
        DD[np.isnan(DD)] = 0
        DD[np.isinf(DD)] = 0
        #keepP = DD[:,0]+DD[:,1]+DD[:,2]<>0
        #print keepP
        #NCp,DD = NCp[keepP,:],DD[keepP,:]
        #KDTp = KDTree(NCp,5)
        # Deform mesh using Gaussian smoothing as suggested in paper by R.Bryan et al.
        sigma_k2 = np.power(np.power(f, -k) * sigm0, 2)
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(GaussianSmooth))
        for j in range(0, LIM):
            calc(
                np.array(range(0, NPP1)) + j * NPP1, W_km1, NCp, DD, sigma_k2,
                gamm)
        for j in range(0, LIM):
            DS[np.array(range(0, NPP1)) + j * NPP1, :] = results[j]
        DS[range(LIM * NPP1, N1), :] = GaussianSmooth(
            np.array(range(LIM * NPP1, N1)), W_km1, NCp, DD, sigma_k2, gamm)
        print "			", time.clock() - tic, " seconds"

        # Mask Nan and Inf if any:
        DS[np.isnan(DS)] = 0
        DS[np.isinf(DS)] = 0

        #print 'Check if current iteration reduces element quality to below allowable and stiffen mesh accordingly'
        print
        print
        print 'Convergence History'
        print CONV
        print
        print

        # Determine Jacobian of all elements and if unsattisfied apply stiffening (Decrease GW <1) untill this doesn't happen

        # determine wheter convergence is acheived
        #TotalMorph = np.sum(np.sqrt(np.sum(DS*DS,1)),0)/NCB.shape[0]
        TotalMorph = np.sum(np.sqrt(np.sum(DD * DD, 1))) / (DD.size / 3)
        CONV = CONV + [TotalMorph]
        FMorph = (k == 1) * TotalMorph + FMorph
        print "	average nodal displacement for current deformation iteration:"
        print TotalMorph
        if (TotalMorph < Tol):
            print
            print "CONVERGED SOLUTION OBTAINED"
            #CONV = CONV + [TotalMorph]
            k = k_max * 10 + 1
            W_km1 = W_km1 + DS
        elif (k < 10) | (TotalMorph < 10 * FMorph):
            print "problem not yet converged at iteration", k
            #CONV = CONV + [TotalMorph]
            k = k + 1
            # Deform mesh:
            print "	deforming mesh (update of W_{k-1})"
            W_km1 = W_km1 + DS
            #np.ma.dump(W_km1,'Femur2NC_'+str(k))
        else:
            print "PROBLEM DIVERGING"
            k = k_max * 10 - 1

        #np.ma.dump(W_km1,'TempElasNodes_Iter'+str(k-1)+'_Time'+time.ctime())
        if (k > 2) & (np.mod(k - 1, 5) == 0):
            print
            #np.ma.dump(W_km1,'TempElasNodes_Iter'+str(k-1)+'_Time'+time.ctime())
            #W_km1 = RBFmorph(W_km1,W_km1[LandmB,],LandmB_NC-W_km1[LandmB,])
            tic = time.clock()
            W_km1 = MeshSmooth(W_km1, neighbList, 10)
            np.ma.dump(
                W_km1, 'SkullUnique2_gamm' + str(gamm) + '_sigN' + str(sigm0) +
                '_iter' + str(k - 1))
            print "		Smoothing done in ", time.clock() - tic, " seconds"
        #print "COARSE SURFACE REGISTRATION"
        #print "	using landmark displacements to deform using RBF"
        #W_km1 = RBFmorph(W_km1,W_km1[LandmB,],LandmB_NC-W_km1[LandmB,])
    print

    if k == k_max + 1:
        print
        print "SOLUTION TERMINATED: maximum iterations,(", k_max, ") reached"
    print
    print "TOTAL TIME FOR ELASTIC SURFACE REGISTRATION : ", time.clock(
    ) - t_start, "seconds"
    CONV = np.array(CONV)
    return W_km1, CONV
Ejemplo n.º 22
0
def voxel_selection(vol_surf_mapping,
                    radius,
                    source_surf=None,
                    source_surf_nodes=None,
                    distance_metric='dijkstra',
                    eta_step=10,
                    nproc=None,
                    outside_node_margin=None,
                    results_backend=None,
                    tmp_prefix='tmpvoxsel'):
    """
    Voxel selection for multiple center nodes on the surface

    Parameters
    ----------
    vol_surf_mapping: volsurf.VolSurfMapping
        Contains gray and white matter surface, and volume geometry
    radius: int or float
        Size of searchlight. If an integer, then it indicates the number of
        voxels. If a float, then it indicates the radius of the disc
    source_surf: surf.Surface or None
        Surface used to compute distance between nodes. If omitted, it is
        the average of the gray and white surfaces.
    source_surf_nodes: list of int or numpy array or None
        Indices of nodes in source_surf that serve as searchlight center.
        By default every node serves as a searchlight center.
    distance_metric: str
        Distance metric between nodes. 'euclidean' or 'dijksta' (default)
    eta_step: int
        Report progress every eta_step (default: 10).
    nproc: int or None
        Number of parallel threads. None means as many threads as the
        system supports. The pprocess is required for parallel threads; if
        it cannot be used, then a single thread is used.
    outside_node_margin: float or True or None (default)
        By default nodes outside the volume are skipped; using this
        parameter allows for a marign. If this value is a float (possibly
        np.inf), then all nodes within outside_node_margin Dijkstra
        distance from any node within the volume are still assigned
        associated voxels. If outside_node_margin is True, then a node is
        always assigned voxels regardless of its position in the volume.
    results_backend : 'native' or 'hdf5' or None (default).
        Specifies the way results are provided back from a processing block
        in case of nproc > 1. 'native' is pickling/unpickling of results by
        pprocess, while 'hdf5' would use h5save/h5load functionality.
        'hdf5' might be more time and memory efficient in some cases.
        If None, then 'hdf5' if used if available, else 'native'.
    tmp_prefix : str, optional
        If specified -- serves as a prefix for temporary files storage
        if results_backend == 'hdf5'.  Thus can specify the directory to use
        (trailing file path separator is not added automagically).

    Returns
    -------
    sel: volume_mask_dict.VolumeMaskDictionary
        Voxel selection results, that associates, which each node, the indices
        of the surrounding voxels.
    """

    # construct the intermediate surface, which is used
    # to measure distances
    intermediate_surf = (vol_surf_mapping.pial_surface * .5) + \
                        (vol_surf_mapping.white_surface * .5)

    if source_surf is None:
        source_surf = intermediate_surf
    else:
        source_surf = surf.from_any(source_surf)

    if _debug():
        debug(
            'SVS', "Generated high-res intermediate surface: "
            "%d nodes, %d faces" %
            (intermediate_surf.nvertices, intermediate_surf.nfaces))
        debug(
            'SVS', "Mapping source to high-res surface:"
            " %d nodes, %d faces" %
            (source_surf.nvertices, source_surf.nfaces))

    if distance_metric[0].lower() == 'e' and outside_node_margin:
        # euclidean distance: identity mapping
        # this is *slow*
        n = source_surf.nvertices
        xyz = source_surf.vertices
        src2intermediate = dict((i, tuple(xyz[i])) for i in xrange(n))
    else:
        # find a mapping from nodes in source_surf to those in
        # intermediate surface
        src2intermediate = source_surf.map_to_high_resolution_surf(\
                                                        intermediate_surf)

    # if no sources are given, then visit all ndoes
    if source_surf_nodes is None:
        source_surf_nodes = np.arange(source_surf.nvertices)

    n = len(source_surf_nodes)

    if _debug():
        debug('SVS', "Performing surface-based voxel selection"
              " for %d centers" % n)

    # visit in random order, for for better ETA estimate
    visitorder = list(np.random.permutation(len(source_surf_nodes)))

    # construct mapping from nodes to enclosing voxels
    n2v = vol_surf_mapping.get_node2voxels_mapping()

    if __debug__:
        debug('SVS', "Generated mapping from nodes" " to intersecting voxels")

    # build voxel selector
    voxel_selector = VoxelSelector(radius,
                                   intermediate_surf,
                                   n2v,
                                   distance_metric,
                                   outside_node_margin=outside_node_margin)

    if _debug():
        debug('SVS', "Instantiated voxel selector (radius %r)" % radius)

    # structure to keep output data. Initialize with None, then
    # make a sparse_attributes instance when we know what the attributes are
    node2volume_attributes = None

    attribute_mapper = voxel_selector.disc_voxel_indices_and_attributes

    srcs_order = [source_surf_nodes[node] for node in visitorder]
    src_trg_nodes = [(src, src2intermediate[src]) for src in srcs_order]

    if nproc is not None and nproc > 1 and not externals.exists('pprocess'):
        raise RuntimeError("The 'pprocess' module is required for "
                           "multiprocess searchlights. Please either "
                           "install python-pprocess, or reduce `nproc` "
                           "to 1 (got nproc=%i) or set to default None" %
                           nproc)

    if nproc is None:
        if externals.exists('pprocess'):
            try:
                import pprocess
                nproc = pprocess.get_number_of_cores() or 1
                if _debug():
                    debug("SVS", 'Using pprocess with %d cores' % nproc)
            except:
                if _debug():
                    debug("SVS", 'pprocess not available')

        if nproc is None:
            # importing pprocess failed - so use a single core
            nproc = 1
            debug("SVS", 'Using %d cores - pprocess not available' % nproc)

    # get the the voxel selection parameters
    parameter_dict = vol_surf_mapping.get_parameter_dict()
    parameter_dict.update(dict(radius=radius,
                               outside_node_margin=outside_node_margin,
                               distance_metric=distance_metric),
                          source_nvertices=source_surf.nvertices)

    init_output = lambda: volume_mask_dict.VolumeMaskDictionary(
        vol_surf_mapping.volgeom, intermediate_surf, meta=parameter_dict)

    if nproc > 1:
        if results_backend == 'hdf5':
            externals.exists('h5py', raise_=True)
        elif results_backend is None:
            if externals.exists(
                    'h5py') and externals.versions['hdf5'] >= '1.8.7':
                results_backend = 'hdf5'
            else:
                results_backend = 'native'
        if _debug():
            debug('SVS', "Using '%s' backend" % (results_backend, ))

        if not results_backend in ('native', 'hdf5'):
            raise ValueError('Illegal results backend %r' % results_backend)

        import pprocess
        n_srcs = len(src_trg_nodes)
        blocks = np.array_split(np.arange(n_srcs), nproc)

        results = pprocess.Map(limit=nproc)
        reducer = results.manage(pprocess.MakeParallel(_reduce_mapper))

        if __debug__:
            debug('SVS', "Starting %d child processes", (len(blocks), ))

        for i, block in enumerate(blocks):
            empty_dict = init_output()

            src_trg = []
            for idx in block:
                src_trg.append(src_trg_nodes[idx])

            if _debug():
                debug('SVS',
                      "  starting block %d/%d: %d centers" %
                      (i + 1, nproc, len(src_trg)),
                      cr=True)

            reducer(empty_dict,
                    attribute_mapper,
                    src_trg,
                    eta_step=eta_step,
                    proc_id='%d' % (i + 1, ),
                    results_backend=results_backend,
                    tmp_prefix=tmp_prefix)
        if _debug():
            debug('SVS', '')
            debug('SVS', 'Started all %d child processes' % (len(blocks)))
            tstart = time.time()

        node2volume_attributes = None
        for i, result in enumerate(results):
            if result is None:
                continue

            if results_backend == 'hdf5':
                result_fn = result
                result = h5load(result_fn)
                os.remove(result_fn)

            if node2volume_attributes is None:
                # first time we have actual results.
                # Use as a starting point
                node2volume_attributes = result
                if _debug():
                    debug('SVS', '')
                    debug(
                        'SVS', "Merging results from %d child "
                        "processes using '%s' backend" %
                        (len(blocks), results_backend))
            else:
                # merge new with current data
                node2volume_attributes.merge(result)
            if _debug():
                debug('SVS',
                      "  merged result block %d/%d" % (i + 1, nproc),
                      cr=True)

        if _debug():
            telapsed = time.time() - tstart
            debug('SVS', "")
            debug(
                'SVS', 'Merged results from %d child processed - '
                'took %s' % (len(blocks), seconds2prettystring(telapsed)))

    else:
        empty_dict = init_output()
        node2volume_attributes = _reduce_mapper(empty_dict,
                                                attribute_mapper,
                                                src_trg_nodes,
                                                eta_step=eta_step)
        debug('SVS', "")

    if _debug():
        if node2volume_attributes is None:
            msgs = [
                "Voxel selection completed: none of %d nodes have "
                "voxels associated" % len(visitorder)
            ]
        else:
            nvox_selected = np.sum(node2volume_attributes.get_mask() != 0)
            vg = vol_surf_mapping.volgeom

            msgs = [
                "Voxel selection completed: %d / %d nodes have "
                "voxels associated" %
                (len(node2volume_attributes.keys()), len(visitorder)),
                "Selected %d / %d  voxels (%.0f%%) in the mask at least once" %
                (nvox_selected, vg.nvoxels_mask,
                 100. * nvox_selected / vg.nvoxels_mask)
            ]

        for msg in msgs:
            debug("SVS", msg)

    if node2volume_attributes is None:
        warning('No voxels associated with any of %d nodes' % len(visitorder))
    return node2volume_attributes
Ejemplo n.º 23
0
def lineRST(NCB,
            RlinesB,
            VlinesB,
            NCT,
            RlinesT,
            VlinesT,
            UseFeat=0,
            UseScale=0,
            Use1Scale=1):
    # Use lines of curvature on two models to determine rigid body transformation for best fit
    # Takes as input the nodal coordinates of the two surface meshes as well as the ridge and valley lines of the two.
    # Target mesh is then rotated, scaled and translated to best fit the corresponding lines of curvature on the Base mesh
    #global RsegmB,VsegmB,RsegmT,VsegmT
    #global RnodesB,VnodesB,RnodesT,VnodesT
    if UseFeat == 1:
        [RsegmB, VsegmB, RsegmT, VsegmT] = [
            np.array([[], [], []]).reshape((0, 3)),
            np.array([[], [], []]).reshape((0, 3)),
            np.array([[], [], []]).reshape((0, 3)),
            np.array([[], [], []]).reshape((0, 3))
        ]
        [RnodesB, VnodesB, RnodesT, VnodesT] = [
            np.array([[], []]).reshape((0, 2)),
            np.array([[], []]).reshape((0, 2)),
            np.array([[], []]).reshape((0, 2)),
            np.array([[], []]).reshape((0, 2))
        ]
        # Transform Lines into linesegments with i'th segment allocated as [NC_point1,NC_point2,Line_nr]
        #	and list of nodes allocated as [Nd_nr, Line_nr]
        for i in range(1, RlinesB[0] + 1):
            Lsize = RlinesB[i].size
            RnodesB = np.array(
                np.r_[RnodesB, np.c_[RlinesB[i],
                                     np.ones((Lsize, 1)) * i]], int)
            RsegmB = np.array(
                np.r_[RsegmB, np.c_[RlinesB[i][0:Lsize - 1],
                                    RlinesB[i][1:Lsize],
                                    np.ones((Lsize - 1, 1)) * i]], int)
        for i in range(1, VlinesB[0] + 1):
            Lsize = VlinesB[i].size
            VnodesB = np.array(
                np.r_[VnodesB, np.c_[VlinesB[i],
                                     np.ones((Lsize, 1)) * i]], int)
            VsegmB = np.array(
                np.r_[VsegmB, np.c_[VlinesB[i][0:Lsize - 1],
                                    VlinesB[i][1:Lsize],
                                    np.ones((Lsize - 1, 1)) * i]], int)
        for i in range(1, RlinesT[0] + 1):
            Lsize = RlinesT[i].size
            RnodesT = np.array(
                np.r_[RnodesT, np.c_[RlinesT[i],
                                     np.ones((Lsize, 1)) * i]], int)
            RsegmT = np.array(
                np.r_[RsegmT, np.c_[RlinesT[i][0:Lsize - 1],
                                    RlinesT[i][1:Lsize],
                                    np.ones((Lsize - 1, 1)) * i]], int)
        for i in range(1, VlinesT[0] + 1):
            Lsize = VlinesT[i].size
            VnodesT = np.array(
                np.r_[VnodesT, np.c_[VlinesT[i],
                                     np.ones((Lsize, 1)) * i]], int)
            VsegmT = np.array(
                np.r_[VsegmT, np.c_[VlinesT[i][0:Lsize - 1],
                                    VlinesT[i][1:Lsize],
                                    np.ones((Lsize - 1, 1)) * i]], int)
        # find average nodal coordinate of base linesegments
        RsegBNC, VsegBNC = (NCB[RsegmB[:, 0], ] + NCB[RsegmB[:, 1], ]) / 2, (
            NCB[VsegmB[:, 0], ] + NCB[VsegmB[:, 1], ]) / 2
        # set up k-d tree of nodes and segments in Base model lines
        kdt_RBS, kdt_VBS = KDTree(RsegBNC, 5), KDTree(VsegBNC, 5)
    else:
        N1, N2 = NCB.shape[0], NCT.shape[0]
        NPP1 = N1 / LIM
        NPP2 = N2 / LIM
        kdt_Base = KDTree(NCB, 20)
    diff = 2
    k = 0
    Conv = np.zeros((100, ))
    while (diff > 0.000001) & (k < 100):
        k = k + 1
        print '		ITERATION ', k
        if UseFeat == 1:
            RsegTNC, VsegTNC = (NCT[RsegmT[:, 0], ] + NCT[RsegmT[:, 1], ]
                                ) / 2, (NCT[VsegmT[:, 0], ] +
                                        NCT[VsegmT[:, 1], ]) / 2
            kdt_RTS, kdt_VTS = KDTree(RsegTNC, 5), KDTree(VsegTNC, 5)
            R12R = LineICP(RnodesB, NCB, RnodesT, NCT, kdt_RTS,
                           RsegmT)  #find registered mesh 1 to 2 Ridges
            R12V = LineICP(VnodesB, NCB, VnodesT, NCT, kdt_VTS,
                           VsegmT)  #find registered mesh 1 to 2 Valleys
            R21R = LineICP(RnodesT, NCT, RnodesB, NCB, kdt_RBS,
                           RsegmB)  #find registered mesh 2 to 1 Ridges
            R21V = LineICP(VnodesT, NCT, VnodesB, NCB, kdt_VBS,
                           VsegmB)  #find registered mesh 2 to 1 Valleys

            # Determine translation required for best fit. Known that minnimum is at this position [D. Du et al]
            B12R, T12R = NCB[np.array(R12R[:, 0], int), ], R12R[:, 1:4]
            B12V, T12V = NCB[np.array(R12V[:, 0], int), ], R12V[:, 1:4]
            B21R, T21R = R21R[:, 1:4], NCT[np.array(R21R[:, 0], int), ]
            B21V, T21V = R21V[:, 1:4], NCT[np.array(R21V[:, 0], int), ]
            Base_i = np.r_[B12R, B12V, B21R, B21V]
            Target_i = np.r_[T12R, T12V, T21R, T21V]
        else:
            kdt_Targ = KDTree(NCT, 20)
            print 'k-d trees and closest point search'
            B2T, T2B = np.zeros((NCB.shape[0], 1)), np.zeros((NCT.shape[0], 1))
            print '	base'
            results = pprocess.Map(limit=LIM)
            calc = results.manage(pprocess.MakeParallel(FullICP))
            for j in range(0, LIM):
                calc(np.array(range(0, NPP1)) + j * NPP1, NCB, kdt_Targ, B2T)
            for j in range(0, LIM):
                B2T[np.array(range(0, NPP1)) + j * NPP1, :] = results[j]
            B2T[range(LIM * NPP1, N1), :] = FullICP(range(LIM * NPP1, N1), NCB,
                                                    kdt_Targ, B2T)
            #for i in range(0,NCB.shape[0]):
            #B2T = B2T + [kdt_Targ.query(NCB[i,])[1]]
            print '	target'
            results = pprocess.Map(limit=LIM)
            calc = results.manage(pprocess.MakeParallel(FullICP))
            for j in range(0, LIM):
                calc(np.array(range(0, NPP2)) + j * NPP2, NCT, kdt_Base, T2B)
            for j in range(0, LIM):
                T2B[np.array(range(0, NPP2)) + j * NPP2, :] = results[j]
            T2B[range(LIM * NPP2, N2), :] = FullICP(range(LIM * NPP2, N2), NCT,
                                                    kdt_Base, T2B)
            #for i in range(0,NCT.shape[0]):
            #T2B = T2B + [kdt_Base.query(NCT[i,])[1]]
            B2T, T2B = np.array(B2T, int).reshape(
                (NCB.shape[0], )), np.array(T2B, int).reshape((NCT.shape[0], ))
            Base_i = np.r_[NCB, NCB[T2B, ]]
            Target_i = np.r_[NCT[B2T, ], NCT]
        print 'translate'
        Distance = Base_i - Target_i
        Translate = np.sum(Distance, 0) / Distance.shape[0]
        # Apply translation to Target nodal coordinates and Base to Target registered pairs
        NCTT = np.c_[NCT[:, 0] + Translate[0], NCT[:, 1] + Translate[1],
                     NCT[:, 2] + Translate[2]]
        Target_i = np.c_[Target_i[:, 0] + Translate[0],
                         Target_i[:, 1] + Translate[1],
                         Target_i[:, 2] + Translate[2]]
        print '	translation = ', Translate
        # Determine Current Quadratic approximation:
        E1, E2, E3 = np.matrix([[0, -1, 0], [1, 0, 0],
                                [0, 0,
                                 0]]), np.matrix([[0, 0, 0], [0, 0, -1],
                                                  [0, 1,
                                                   0]]), np.matrix([[0, 0, 1],
                                                                    [0, 0, 0],
                                                                    [-1, 0,
                                                                     0]])
        D1, D2, D3 = np.matrix([[1, 0, 0], [0, 0, 0],
                                [0, 0,
                                 0]]), np.matrix([[0, 0, 0], [0, 1, 0],
                                                  [0, 0,
                                                   0]]), np.matrix([[0, 0, 0],
                                                                    [0, 0, 0],
                                                                    [0, 0, 1]])
        IMat = np.matrix(np.eye(3))
        #Ukm1,Skm1,Rkm1 = np.linalg.svd(AO)
        Ukm1, Skm1, Rkm1 = IMat, IMat, IMat  #*1.3
        #k2,diff2=0,2
        #while (diff2>0.1)&(k2<100):
        #k2=k2+1
        #Bi1,Bi2,Bi3 = np.array(Ukm1*E1*Skm1*Rkm1*Target_i.T).T,np.array(Ukm1*E2*Skm1*Rkm1*Target_i.T).T,np.array(Ukm1*E3*Skm1*Rkm1*Target_i.T).T
        #Bi4,Bi5,Bi6 = np.array(Ukm1*Skm1*D1*Rkm1*Target_i.T).T,np.array(Ukm1*Skm1*D2*Rkm1*Target_i.T).T,np.array(Ukm1*Skm1*D3*Rkm1*Target_i.T).T
        #Bi7,Bi8,Bi9 = np.array(Ukm1*Skm1*Rkm1*E1*Target_i.T).T,np.array(Ukm1*Skm1*Rkm1*E2*Target_i.T).T,np.array(Ukm1*Skm1*Rkm1*E3*Target_i.T).T

        #Distance = np.array(Ukm1*Skm1*Rkm1*Target_i.T-Base_i.T).T
        ## Set up Hessian matrix:
        #Hes = np.matrix([[np.sum(Bi1*Bi1),np.sum(Bi1*Bi2),np.sum(Bi1*Bi3),np.sum(Bi1*Bi4),np.sum(Bi1*Bi5),np.sum(Bi1*Bi6),np.sum(Bi1*Bi7),np.sum(Bi1*Bi8),np.sum(Bi1*Bi9)],
        #[np.sum(Bi2*Bi1),np.sum(Bi2*Bi2),np.sum(Bi2*Bi3),np.sum(Bi2*Bi4),np.sum(Bi2*Bi5),np.sum(Bi2*Bi6),np.sum(Bi2*Bi7),np.sum(Bi2*Bi8),np.sum(Bi2*Bi9)],
        #[np.sum(Bi3*Bi1),np.sum(Bi3*Bi2),np.sum(Bi3*Bi3),np.sum(Bi3*Bi4),np.sum(Bi3*Bi5),np.sum(Bi3*Bi6),np.sum(Bi3*Bi7),np.sum(Bi3*Bi8),np.sum(Bi3*Bi9)],
        #[np.sum(Bi4*Bi1),np.sum(Bi4*Bi2),np.sum(Bi4*Bi3),np.sum(Bi4*Bi4),np.sum(Bi4*Bi5),np.sum(Bi4*Bi6),np.sum(Bi4*Bi7),np.sum(Bi4*Bi8),np.sum(Bi4*Bi9)],
        #[np.sum(Bi5*Bi1),np.sum(Bi5*Bi2),np.sum(Bi5*Bi3),np.sum(Bi5*Bi4),np.sum(Bi5*Bi5),np.sum(Bi5*Bi6),np.sum(Bi5*Bi7),np.sum(Bi5*Bi8),np.sum(Bi5*Bi9)],
        #[np.sum(Bi6*Bi1),np.sum(Bi6*Bi2),np.sum(Bi6*Bi3),np.sum(Bi6*Bi4),np.sum(Bi6*Bi5),np.sum(Bi6*Bi6),np.sum(Bi6*Bi7),np.sum(Bi6*Bi8),np.sum(Bi6*Bi9)],
        #[np.sum(Bi7*Bi1),np.sum(Bi7*Bi2),np.sum(Bi7*Bi3),np.sum(Bi7*Bi4),np.sum(Bi7*Bi5),np.sum(Bi7*Bi6),np.sum(Bi7*Bi7),np.sum(Bi7*Bi8),np.sum(Bi7*Bi9)],
        #[np.sum(Bi8*Bi1),np.sum(Bi8*Bi2),np.sum(Bi8*Bi3),np.sum(Bi8*Bi4),np.sum(Bi8*Bi5),np.sum(Bi8*Bi6),np.sum(Bi8*Bi7),np.sum(Bi8*Bi8),np.sum(Bi8*Bi9)],
        #[np.sum(Bi9*Bi1),np.sum(Bi9*Bi2),np.sum(Bi9*Bi3),np.sum(Bi9*Bi4),np.sum(Bi9*Bi5),np.sum(Bi9*Bi6),np.sum(Bi9*Bi7),np.sum(Bi9*Bi8),np.sum(Bi9*Bi9)]])

        ## set up fj
        #Fj = np.matrix([[np.sum(Bi1*Distance),np.sum(Bi2*Distance),np.sum(Bi3*Distance),np.sum(Bi4*Distance),np.sum(Bi5*Distance),
        #np.sum(Bi6*Distance),np.sum(Bi7*Distance),np.sum(Bi8*Distance),np.sum(Bi9*Distance)]]).T
        print 'find rotation and scale'
        Xf = fmin_powell(costF,
                         np.array([0, 0, 0, 1, 1, 1, 0, 0, 0]),
                         args=(Base_i, Target_i, Ukm1, Skm1, Rkm1, UseScale,
                               Use1Scale))
        Xf = np.array(Xf).reshape((9, ))
        print 'u1..3,s1..3,r1..3 = ', Xf
        FvP = np.array(Ukm1 * Skm1 * Rkm1 * Target_i.T - Base_i.T).T
        FvP = np.sum(FvP * FvP)
        Conv[k] = FvP
        Ukm1 = Ukm1 + Ukm1 * np.matrix(E1 * Xf[0] + E2 * Xf[1] + E3 * Xf[2])
        if UseScale == 1:
            Skm1 = Skm1 + Skm1 * np.matrix(D1 * Xf[3] + D2 * Xf[4] +
                                           D3 * Xf[5])
        if Use1Scale == 1:
            Skm1 = Skm1 + Skm1 * np.matrix(D1 * Xf[3] + D2 * Xf[3] +
                                           D3 * Xf[3])
        print
        print Skm1
        print
        Rkm1 = Rkm1 + Rkm1 * np.matrix(E1 * Xf[6] + E2 * Xf[7] + E3 * Xf[8])
        NCTT = np.array(Ukm1 * Skm1 * Rkm1 * NCTT.T).T
        diff = np.sum((NCT - NCTT) * (NCT - NCTT)) / NCT.shape[0]
        print '	Average difference between current and previous nodal coordinates:  ', diff
        np.ma.dump(NCTT, 'Femur1NC_' + str(k))
        NCT = NCTT
    return NCT, Conv
    def __call__(self, datasets):
        """Estimate mappers for each dataset using searchlight-based
        hyperalignment.

        Parameters
        ----------
          datasets : list or tuple of datasets

        Returns
        -------
        A list of trained StaticProjectionMappers of the same length as datasets
        """

        # Perform some checks first before modifying internal state
        params = self.params
        ndatasets = len(datasets)

        if len(datasets) <= 1:
            raise ValueError("SearchlightHyperalignment needs > 1 dataset to "
                             "operate on. Got: %d" % self.ndatasets)

        if params.ref_ds in params.exclude_from_model:
            raise ValueError("Requested reference dataset %i is also "
                             "in the exclude list." % params.ref_ds)

        if params.ref_ds >= ndatasets:
            raise ValueError("Requested reference dataset %i is out of "
                             "bounds. We have only %i datasets provided" %
                             (params.ref_ds, self.ndatasets))

        # The rest of the checks are just warnings
        self.ndatasets = ndatasets

        _shpaldebug("SearchlightHyperalignment %s for %i datasets" %
                    (self, self.ndatasets))

        selected = [
            _ for _ in range(ndatasets) if _ not in params.exclude_from_model
        ]
        ref_ds_train = selected.index(params.ref_ds)
        params.hyperalignment.params.ref_ds = ref_ds_train
        warning('Using %dth dataset as the reference dataset (%dth after '
                'excluding datasets)' % (params.ref_ds, ref_ds_train))
        if len(params.exclude_from_model) > 0:
            warning("These datasets will not participate in building common "
                    "model: %s" % params.exclude_from_model)

        if __debug__:
            # verify that datasets were zscored prior the alignment since it is
            # assumed/required preprocessing step
            for ids, ds in enumerate(datasets):
                for f, fname, tval in ((np.mean, 'means', 0), (np.std, 'stds',
                                                               1)):
                    vals = f(ds, axis=0)
                    vals_comp = np.abs(vals - tval) > 1e-5
                    if np.any(vals_comp):
                        warning(
                            '%d %s are too different (max diff=%g) from %d in '
                            'dataset %d to come from a zscored dataset. '
                            'Please zscore datasets first for correct operation '
                            '(unless if was intentional)' %
                            (np.sum(vals_comp), fname, np.max(
                                np.abs(vals)), tval, ids))

        # Setting up SearchlightHyperalignment
        # we need to know which original features where comprising the
        # individual SL ROIs
        _shpaldebug('Initializing FeatureSelectionHyperalignment.')
        hmeasure = FeatureSelectionHyperalignment(
            ref_ds=params.ref_ds,
            featsel=params.featsel,
            hyperalignment=params.hyperalignment,
            full_matrix=params.combine_neighbormappers,
            use_same_features=params.use_same_features,
            exclude_from_model=params.exclude_from_model,
            dtype=params.dtype)

        # Performing SL processing manually
        _shpaldebug("Setting up for searchlights")
        if params.nproc is None and externals.exists('pprocess'):
            import pprocess
            try:
                params.nproc = pprocess.get_number_of_cores() or 1
            except AttributeError:
                warning("pprocess version %s has no API to figure out maximal "
                        "number of cores. Using 1" %
                        externals.versions['pprocess'])
                params.nproc = 1

        # XXX I think this class should already accept a single dataset only.
        # It should have a ``space`` setting that names a sample attribute that
        # can be used to identify individual/original datasets.
        # Taking a single dataset as argument would be cleaner, because the
        # algorithm relies on the assumption that there is a coarse feature
        # alignment, i.e. the SL ROIs cover roughly the same area
        queryengines = self._get_trained_queryengines(datasets,
                                                      params.queryengine,
                                                      params.radius,
                                                      params.ref_ds)
        # For surface nodes to voxels queryengines, roi_seed hardly makes sense
        qe = queryengines[(0 if len(queryengines) == 1 else params.ref_ds)]
        if isinstance(qe, SurfaceVerticesQueryEngine):
            self.force_roi_seed = False
            if not self.params.combine_neighbormappers:
                raise NotImplementedError(
                    "Mapping from voxels to surface nodes is not "
                    "implmented yet. Try setting combine_neighbormappers to True."
                )
        self.nfeatures = datasets[params.ref_ds].nfeatures
        _shpaldebug("Performing Hyperalignment in searchlights")
        # Setting up centers for running SL Hyperalignment
        if params.sparse_radius is None:
            roi_ids = self._get_verified_ids(queryengines) \
                if params.mask_node_ids is None \
                else params.mask_node_ids
        else:
            if params.queryengine is not None:
                raise NotImplementedError(
                    "using sparse_radius whenever custom queryengine is "
                    "provided is not yet supported.")
            _shpaldebug("Setting up sparse neighborhood")
            from mvpa2.misc.neighborhood import scatter_neighborhoods
            if params.mask_node_ids is None:
                scoords, sidx = scatter_neighborhoods(
                    Sphere(params.sparse_radius),
                    datasets[params.ref_ds].fa.voxel_indices,
                    deterministic=True)
                roi_ids = sidx
            else:
                scoords, sidx = scatter_neighborhoods(
                    Sphere(params.sparse_radius),
                    datasets[params.ref_ds].fa.voxel_indices[
                        params.mask_node_ids],
                    deterministic=True)
                roi_ids = [params.mask_node_ids[sid] for sid in sidx]

        # Initialize projections
        _shpaldebug('Initializing projection matrices')
        self.projections = [
            csc_matrix((self.nfeatures, self.nfeatures), dtype=params.dtype)
            for isub in range(self.ndatasets)
        ]

        # compute
        if params.nproc is not None and params.nproc > 1:
            # split all target ROIs centers into `nproc` equally sized blocks
            nproc_needed = min(len(roi_ids), params.nproc)
            params.nblocks = nproc_needed \
                if params.nblocks is None else params.nblocks
            params.nblocks = min(len(roi_ids), params.nblocks)
            node_blocks = np.array_split(roi_ids, params.nblocks)
            # the next block sets up the infrastructure for parallel computing
            # this can easily be changed into a ParallelPython loop, if we
            # decide to have a PP job server in PyMVPA
            import pprocess
            p_results = pprocess.Map(limit=nproc_needed)
            if __debug__:
                debug(
                    'SLC', "Starting off %s child processes for nblocks=%i" %
                    (nproc_needed, params.nblocks))
            compute = p_results.manage(pprocess.MakeParallel(self._proc_block))
            seed = mvpa2.get_random_seed()
            for iblock, block in enumerate(node_blocks):
                # should we maybe deepcopy the measure to have a unique and
                # independent one per process?
                compute(block,
                        datasets,
                        copy.copy(hmeasure),
                        queryengines,
                        seed=seed,
                        iblock=iblock)
        else:
            # otherwise collect the results in an 1-item list
            _shpaldebug('Using 1 process to compute mappers.')
            if params.nblocks is None:
                params.nblocks = 1
            params.nblocks = min(len(roi_ids), params.nblocks)
            node_blocks = np.array_split(roi_ids, params.nblocks)
            p_results = [
                self._proc_block(block, datasets, hmeasure, queryengines)
                for block in node_blocks
            ]
        results_ds = self.__handle_all_results(p_results)
        # Dummy iterator for, you know, iteration
        list(results_ds)

        _shpaldebug(
            'Wrapping projection matrices into StaticProjectionMappers')
        self.projections = [
            StaticProjectionMapper(proj=proj, recon=proj.T)
            if params.compute_recon else StaticProjectionMapper(proj=proj)
            for proj in self.projections
        ]
        return self.projections
Ejemplo n.º 25
0
#np.ma.dump(NCS,fname[0:6]+'NC_Ainit')
#NCS = ptet.LaplacMesh(inner,NCS,neighbListTet,100,1)
#np.ma.dump(NCS,fname[0:6]+'NC_AinitLapl')

NCSprev = np.r_[NCS]
DispOuter = (NCouter - NCTnc[outer, ]) / Steps
for inc in range(Steps):
    #NCS[outer,] = NCS[outer,]+DispOuter	#update boundary displacement
    # Deform mesh using Gaussian smoothing as suggested in paper by R.Bryan et al.
    NCp = NCS[outer, ]
    DD = DispOuter
    DS = np.zeros(NCS.shape)
    print 'Do Gaussian Smooth on internal nodes'
    sigma_k2 = np.power(np.power(1.0715, -(inc + 1)) * 10, 2)
    results = pprocess.Map(limit=LIM)
    calc = results.manage(pprocess.MakeParallel(ptet.GaussianSmooth))
    for j in range(0, LIM):
        calc(
            np.array(range(0, NPP1)) + j * NPP1, np.r_[NCS], NCp, DD, sigma_k2,
            2)
    for j in range(0, LIM):
        DS[np.array(range(0, NPP1)) + j * NPP1, :] = results[j]
    DS[range(LIM * NPP1, N1), :] = ptet.GaussianSmooth(
        np.array(range(LIM * NPP1, N1)), np.r_[NCS], NCp, DD, sigma_k2, 2)
    NCS[outer, ] = NCS[outer, ] + DispOuter
    NCS[inner, ] = NCS[inner, ] + DS[inner, ]

    EQ, delt, Sn2, Sig = qu.elemQual_mu(np.array(range(TetT.shape[0])), NCS,
                                        TetT)
    print '					Average Element Quality: 	', np.average(EQ)
    print '					Degenerate (q<0.15): 		', np.where(EQ < 0.15)[0].size
Ejemplo n.º 26
0
def LineReg(NCB, TriB, RlinesB, VlinesB, NCT, TriT, RlinesT, VlinesT,
            percentReg, DistMax):
    N1 = NCB.shape[0]
    NPP1 = N1 / LIM
    [RsegmB, VsegmB, RsegmT, VsegmT] = [
        np.array([[], [], []]).reshape((0, 3)),
        np.array([[], [], []]).reshape((0, 3)),
        np.array([[], [], []]).reshape((0, 3)),
        np.array([[], [], []]).reshape((0, 3))
    ]
    [RnodesB, VnodesB, RnodesT, VnodesT] = [
        np.array([[], []]).reshape((0, 2)),
        np.array([[], []]).reshape((0, 2)),
        np.array([[], []]).reshape((0, 2)),
        np.array([[], []]).reshape((0, 2))
    ]
    # Transform Lines into linesegments with i'th segment allocated as [NC_point1,NC_point2,Line_nr]
    #	and list of nodes allocated as [Nd_nr, Line_nr]
    for i in range(1, RlinesB[0] + 1):
        Lsize = RlinesB[i].size
        RnodesB = np.array(
            np.r_[RnodesB, np.c_[RlinesB[i],
                                 np.ones((Lsize, 1)) * i]], int)
        RsegmB = np.array(
            np.r_[RsegmB, np.c_[RlinesB[i][0:Lsize - 1], RlinesB[i][1:Lsize],
                                np.ones((Lsize - 1, 1)) * i]], int)
    for i in range(1, VlinesB[0] + 1):
        Lsize = VlinesB[i].size
        VnodesB = np.array(
            np.r_[VnodesB, np.c_[VlinesB[i],
                                 np.ones((Lsize, 1)) * i]], int)
        VsegmB = np.array(
            np.r_[VsegmB, np.c_[VlinesB[i][0:Lsize - 1], VlinesB[i][1:Lsize],
                                np.ones((Lsize - 1, 1)) * i]], int)
    for i in range(1, RlinesT[0] + 1):
        Lsize = RlinesT[i].size
        RnodesT = np.array(
            np.r_[RnodesT, np.c_[RlinesT[i],
                                 np.ones((Lsize, 1)) * i]], int)
        RsegmT = np.array(
            np.r_[RsegmT, np.c_[RlinesT[i][0:Lsize - 1], RlinesT[i][1:Lsize],
                                np.ones((Lsize - 1, 1)) * i]], int)
    for i in range(1, VlinesT[0] + 1):
        Lsize = VlinesT[i].size
        VnodesT = np.array(
            np.r_[VnodesT, np.c_[VlinesT[i],
                                 np.ones((Lsize, 1)) * i]], int)
        VsegmT = np.array(
            np.r_[VsegmT, np.c_[VlinesT[i][0:Lsize - 1], VlinesT[i][1:Lsize],
                                np.ones((Lsize - 1, 1)) * i]], int)
    # find average nodal coordinate of base linesegments
    RsegTNC, VsegTNC = (NCT[RsegmT[:, 0], ] + NCT[RsegmT[:, 1], ]) / 2, (
        NCT[VsegmT[:, 0], ] + NCT[VsegmT[:, 1], ]) / 2
    # set up k-d tree of nodes and segments in Base model lines
    print 'TARGET: Determine triangle centroids, triangle normals and weighted vertex normals'
    TBCT = np.c_[
        np.
        sum(np.c_[NCT[TriT[:, 0], 0], NCT[TriT[:, 1], 0], NCT[TriT[:, 2],
                                                              0]], 1) / 3,
        np.
        sum(np.c_[NCT[TriT[:, 0], 1], NCT[TriT[:, 1], 1], NCT[TriT[:, 2],
                                                              1]], 1) / 3,
        np.
        sum(np.c_[NCT[TriT[:, 0], 2], NCT[TriT[:, 1], 2], NCT[TriT[:, 2],
                                                              2]], 1) / 3]
    TNORMT = np.cross(NCT[TriT[:, 1], :] - NCT[TriT[:, 0], :],
                      NCT[TriT[:, 2], :] - NCT[TriT[:, 0], :])
    TNORMT = (TNORMT.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([TNORMT * TNORMT]), 2)))).T
    VNORMT = vrtxnormal(NCT, TriT, TBCT, TNORMT)
    print 'TARGET: Determine segment normal directions'
    SegNormRT, SegNormVT = (VNORMT[RsegmT[:, 0], ] +
                            VNORMT[RsegmT[:, 1], ]), (VNORMT[VsegmT[:, 0], ] +
                                                      VNORMT[VsegmT[:, 1], ])
    SegNormRT = (SegNormRT.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([SegNormRT * SegNormRT]), 2)))).T
    SegNormVT = (SegNormVT.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([SegNormVT * SegNormVT]), 2)))).T

    # find average nodal coordinate of base linesegments
    RsegBNC, VsegBNC = (NCB[RsegmB[:, 0], ] + NCB[RsegmB[:, 1], ]) / 2, (
        NCB[VsegmB[:, 0], ] + NCB[VsegmB[:, 1], ]) / 2
    # set up k-d tree of nodes and segments in Base model lines
    print 'BASE: Determine triangle centroids, triangle normals and weighted vertex normals'
    TBCB = np.c_[
        np.
        sum(np.c_[NCB[TriB[:, 0], 0], NCB[TriB[:, 1], 0], NCB[TriB[:, 2],
                                                              0]], 1) / 3,
        np.
        sum(np.c_[NCB[TriB[:, 0], 1], NCB[TriB[:, 1], 1], NCB[TriB[:, 2],
                                                              1]], 1) / 3,
        np.
        sum(np.c_[NCB[TriB[:, 0], 2], NCB[TriB[:, 1], 2], NCB[TriB[:, 2],
                                                              2]], 1) / 3]
    TNORMB = np.cross(NCB[TriB[:, 1], :] - NCB[TriB[:, 0], :],
                      NCB[TriB[:, 2], :] - NCB[TriB[:, 0], :])
    TNORMB = (TNORMB.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([TNORMB * TNORMB]), 2)))).T
    VNORMB = vrtxnormal(NCB, TriB, TBCB, TNORMB)
    print 'BASE: Determine segment normal directions'
    SegNormRB, SegNormVB = (VNORMB[RsegmB[:, 0], ] +
                            VNORMB[RsegmB[:, 1], ]), (VNORMB[VsegmB[:, 0], ] +
                                                      VNORMB[VsegmB[:, 1], ])
    SegNormRB = (SegNormRB.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([SegNormRB * SegNormRB]), 2)))).T
    SegNormVB = (SegNormVB.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([SegNormVB * SegNormVB]), 2)))).T

    deform, k = 2, 0
    while (deform > 0.0001) & (k < 100):
        Supp = 50. / (1 + k / 5)
        if Supp < 10:
            Supp = 10
        k = k + 1
        print '		ITERATION ', k
        ## find average nodal coordinate of base linesegments
        #RsegBNC,VsegBNC = (NCB[RsegmB[:,0],]+NCB[RsegmB[:,1],])/2,(NCB[VsegmB[:,0],]+NCB[VsegmB[:,1],])/2
        ## set up k-d tree of nodes and segments in Base model lines
        #print 'BASE: Determine triangle centroids, triangle normals and weighted vertex normals'
        #TBCB = np.c_[np.sum(np.c_[NCB[TriB[:,0],0],NCB[TriB[:,1],0],NCB[TriB[:,2],0]],1)/3,
        #np.sum(np.c_[NCB[TriB[:,0],1],NCB[TriB[:,1],1],NCB[TriB[:,2],1]],1)/3,np.sum(np.c_[NCB[TriB[:,0],2],NCB[TriB[:,1],2],NCB[TriB[:,2],2]],1)/3]
        #TNORMB = np.cross(NCB[TriB[:,1],:]-NCB[TriB[:,0],:],NCB[TriB[:,2],:]-NCB[TriB[:,0],:])
        #TNORMB = (TNORMB.T/(np.ones((3,1))*np.sqrt(np.sum(np.array([TNORMB*TNORMB]),2)))).T
        #VNORMB = vrtxnormal(NCB,TriB,TBCB,TNORMB)
        #print 'BASE: Determine segment normal directions'
        #SegNormRB,SegNormVB = (VNORMB[RsegmB[:,0],]+VNORMB[RsegmB[:,1],]),(VNORMB[VsegmB[:,0],]+VNORMB[VsegmB[:,1],])
        #SegNormRB = (SegNormRB.T/(np.ones((3,1))*np.sqrt(np.sum(np.array([SegNormRB*SegNormRB]),2)))).T
        #SegNormVB = (SegNormVB.T/(np.ones((3,1))*np.sqrt(np.sum(np.array([SegNormVB*SegNormVB]),2)))).T

        ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~REGISTRATION~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##
        print 'Register feature lines'
        R12R = FeatReg(RnodesB, NCB, VNORMB, RsegmT, RsegTNC, SegNormRT,
                       RnodesT, NCT,
                       DistMax)  #find registered mesh 1 to 2 Ridges
        R12V = FeatReg(VnodesB, NCB, VNORMB, VsegmT, VsegTNC, SegNormVT,
                       VnodesT, NCT,
                       DistMax)  #find registered mesh 1 to 2 Valleys
        R21R = FeatReg(RnodesT, NCT, VNORMT, RsegmB, RsegBNC, SegNormRB,
                       RnodesB, NCB,
                       DistMax)  #find registered mesh 2 to 1 Ridges
        R21V = FeatReg(VnodesT, NCT, VNORMT, VsegmB, VsegBNC, SegNormVB,
                       VnodesB, NCB,
                       DistMax)  #find registered mesh 2 to 1 Valleys
        print 'Build topological map to disregard unmatched features'
        R12R, R21R = BuildMap(NCB, NCT, RnodesB, RnodesT, RsegmB, RsegmT, R12R,
                              R21R, percentReg)
        R12V, R21V = BuildMap(NCB, NCT, VnodesB, VnodesT, VsegmB, VsegmT, R12V,
                              R21V, percentReg)
        print 'Determine Base deformation required'
        B12R, T12R = NCB[np.array(R12R[:, 0], int), ], R12R[:, 1:4]
        B12V, T12V = NCB[np.array(R12V[:, 0], int), ], R12V[:, 1:4]
        B21R, T21R = R21R[:, 1:4], NCT[np.array(R21R[:, 0], int), ]
        B21V, T21V = R21V[:, 1:4], NCT[np.array(R21V[:, 0], int), ]
        Base_i = np.r_[B12R, B12V, B21R, B21V]
        Target_i = np.r_[T12R, T12V, T21R, T21V]
        DISP = Target_i - Base_i
        DISP_Full = np.zeros(NCB.shape)
        print 'Determine smooth defromation'
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(GaussMove))
        for j in range(0, LIM):
            calc(
                np.array(range(0, NPP1)) + j * NPP1, NCB, Base_i, DISP,
                DISP_Full, Supp)
        for j in range(0, LIM):
            DISP_Full[np.array(range(0, NPP1)) + j * NPP1, :] = results[j]
        DISP_Full[range(LIM * NPP1, N1), :] = GaussMove(
            range(LIM * NPP1, N1), NCB, Base_i, DISP, DISP_Full, Supp)
        NCB = NCB + DISP_Full
        deform = np.sum(np.sqrt(np.sum(DISP_Full * DISP_Full, 1))) / N1
        print '	TOTAL CURRENT DEFORMATION: ', deform
    # return which NODES within surface features are registered:
    keepB = np.r_[R12R[:, 0], R12V[:, 0]]
    keepT = np.r_[R21R[:, 0], R21V[:, 0]]
    keepB, keepT = np.array(keepB, int), np.array(keepT, int)
    return NCB, keepB, keepT
Ejemplo n.º 27
0
def gauss_fitter(region='Cepheus_L1251',
                 snr_min=3.0,
                 mol='C2S',
                 vmin=5.0,
                 vmax=10.0,
                 convolve=False,
                 use_old_conv=False,
                 multicore=1,
                 file_extension=None):
    """
    	Fit a Gaussian to non-NH3 emission lines from GAS.
    	It creates a cube for the best-fit Gaussian, a cube 
    	for the best-fit Gaussian with noise added back into 
    	the spectrum, and a parameter map of Tpeak, Vlsr, and FWHM
    
    	Parameters
    	----------
    	region : str
        	Name of region to reduce
    	snr_min : float
        	Lowest signal-to-noise pixels to include in the line-fitting
    	mol : str
        	name of molecule to fit
   	vmin : numpy.float
        	Minimum centroid velocity, in km/s.
    	vmax : numpy.float
        	Maximum centroid velocity, in km/s.
    	convolve : bool or float
        	If not False, specifies the beam-size to convolve the original map with
		Beam-size must be given in arcseconds
    	use_old_conv : bool
        	If True, use an already convolved map with name:
		region + '_' + mol + file_extension + '_conv.fits'
		This convolved map must be in units of km/s
    	multicore : int
		Maximum number of simultaneous processes desired
	file_extension: str
		filename extension 
    	"""
    if file_extension:
        root = file_extension
    else:
        # root = 'base{0}'.format(blorder)
        root = 'all'

    molecules = ['C2S', 'HC7N_22_21', 'HC7N_21_20', 'HC5N']

    MolFile = '{0}/{0}_{2}_{1}.fits'.format(region, root, mol)
    ConvFile = '{0}/{0}_{2}_{1}_conv.fits'.format(region, root, mol)
    GaussOut = '{0}/{0}_{2}_{1}_gauss_cube.fits'.format(region, root, mol)
    GaussNoiseOut = '{0}/{0}_{2}_{1}_gauss_cube_noise.fits'.format(
        region, root, mol)
    ParamOut = '{0}/{0}_{2}_{1}_param_cube.fits'.format(region, root, mol)

    # Load the spectral cube and convert to velocity units
    cube = SpectralCube.read(MolFile)
    cube_km = cube.with_spectral_unit(u.km / u.s, velocity_convention='radio')

    # If desired, convolve map with larger beam
    # or load previously created convolved cube
    if convolve:
        cube = SpectralCube.read(MolFile)
        cube_km_1 = cube.with_spectral_unit(u.km / u.s,
                                            velocity_convention='radio')
        beam = radio_beam.Beam(major=convolve * u.arcsec,
                               minor=convolve * u.arcsec,
                               pa=0 * u.deg)
        cube_km = cube_km_1.convolve_to(beam)
        cube_km.write(ConvFile, format='fits', overwrite=True)
    if use_old_conv:
        cube_km = SpectralCube.read(ConvFile)

# Define the spectral axis in km/s
    spectra_x_axis_kms = np.array(cube_km.spectral_axis)

    # Find the channel range corresponding to vmin and vmax
    # -- This is a hold-over from when I originally set up the code to
    #    use a channel range rather than velocity range.
    #    Can change later, but this should work for now.
    low_channel = np.where(spectra_x_axis_kms <= vmax
                           )[0][0] + 1  # Add ones to change index to channel
    high_channel = np.where(spectra_x_axis_kms >= vmin
                            )[0][-1] + 1  # Again, hold-over from older setup
    peak_channels = [low_channel, high_channel]

    # Create cubes for storing the fitted Gaussian profiles
    # and the Gaussians with noise added back into the spectrum
    header = cube_km.header
    cube_gauss = np.array(cube_km.unmasked_data[:, :, :])
    cube_gauss_noise = np.array(cube_km.unmasked_data[:, :, :])
    shape = np.shape(cube_gauss)

    # Set up a cube for storing fitted parameters
    param_cube = np.zeros((6, shape[1], shape[2]))
    param_header = cube_km.header

    # Define the Gaussian profile
    def p_eval(x, a, x0, sigma):
        return a * np.exp(-(x - x0)**2 / (2 * sigma**2))

# Create some arrays full of NANs
# To be used in output cubes if fits fail

    nan_array = np.empty(shape[0])  # For gauss cubes
    nan_array[:] = np.NAN
    nan_array2 = np.empty(param_cube.shape[0])  # For param cubes
    nan_array2[:] = np.NAN

    # Loop through each pixel and find those
    # with SNR above snr_min
    x = []
    y = []
    pixels = 0
    for (i, j), value in np.ndenumerate(cube_gauss[0]):
        spectra = np.array(cube_km.unmasked_data[:, i, j])
        if (False in np.isnan(spectra)):
            rms = np.nanstd(
                np.append(spectra[0:(peak_channels[0] - 1)],
                          spectra[(peak_channels[1] + 1):len(spectra)]))
            if (max(spectra[peak_channels[0]:peak_channels[1]]) /
                    rms) > snr_min:
                pixels += 1
                x.append(i)
                y.append(j)
        else:
            cube_gauss[:, i, j] = nan_array
            param_cube[:, i, j] = nan_array2
            cube_gauss_noise[:, i, j] = nan_array
    print str(pixels) + ' Pixels above SNR=' + str(snr_min)

    # Define a Gaussian fitting function for each pixel
    # i, j are the x,y coordinates of the pixel being fit
    def pix_fit(i, j):
        spectra = np.array(cube_km.unmasked_data[:, i, j])
        # Use the peak brightness Temp within specified channel
        # range as the initial guess for Gaussian height
        max_ch = np.argmax(spectra[peak_channels[0]:peak_channels[1]])
        Tpeak = spectra[peak_channels[0]:peak_channels[1]][max_ch]
        # Use the velocity of the brightness Temp peak as
        # initial guess for Gaussian mean
        vpeak = spectra_x_axis_kms[peak_channels[0]:peak_channels[1]][max_ch]
        rms = np.std(
            np.append(spectra[0:(peak_channels[0] - 1)],
                      spectra[(peak_channels[1] + 1):len(spectra)]))
        err1 = np.zeros(shape[0]) + rms
        # Create a noise spectrum based on rms of off-line channels
        # This will be added to best-fit Gaussian to obtain a noisy Gaussian
        noise = np.random.normal(0., rms, len(spectra_x_axis_kms))
        # Define initial guesses for Gaussian fit
        guess = [Tpeak, vpeak, 0.3]  # [height, mean, sigma]
        try:
            coeffs, covar_mat = curve_fit(p_eval,
                                          xdata=spectra_x_axis_kms,
                                          ydata=spectra,
                                          p0=guess,
                                          sigma=err1,
                                          maxfev=500)
            gauss = np.array(
                p_eval(spectra_x_axis_kms, coeffs[0], coeffs[1], coeffs[2]))
            noisy_gauss = np.array(
                p_eval(spectra_x_axis_kms, coeffs[0], coeffs[1],
                       coeffs[2])) + noise
            params = np.append(coeffs, (covar_mat[0][0]**0.5, covar_mat[1][1]**
                                        0.5, covar_mat[2][2]**0.5))
            # params = ['Tpeak', 'VLSR','sigma','Tpeak_err','VLSR_err','sigma_err']

            # Don't accept fit if fitted parameters are non-physical or too uncertain
            if (params[0] < 0.01) or (params[3] > 1.0) or (
                    params[2] < 0.05) or (params[5] > 0.5) or (params[4] >
                                                               0.75):
                noisy_gauss = nan_array
                gauss = nan_array
                params = nan_array2

            # Don't accept fit if the SNR for fitted spectrum is less than SNR threshold
            #if max(gauss)/rms < snr_min:
            #	noisy_gauss = nan_array
            #	gauss = nan_array
            #	params = nan_array2

        except RuntimeError:
            noisy_gauss = nan_array
            gauss = nan_array
            params = nan_array2

        return i, j, gauss, params, noisy_gauss

# Parallel computation:

    nproc = multicore  # maximum number of simultaneous processes desired
    queue = pprocess.Queue(limit=nproc)
    calc = queue.manage(pprocess.MakeParallel(pix_fit))
    tic = time.time()
    counter = 0

    # Uncomment to see some plots of the fitted spectra
    #for i,j in zip(x,y):
    #pix_fit(i,j)
    #plt.plot(spectra_x_axis_kms, spectra, color='blue', drawstyle='steps')
    #plt.plot(spectra_x_axis_kms, gauss, color='red')
    #plt.show()
    #plt.close()

    # Begin parallel computations
    # Store the best-fit Gaussians and parameters
    # in their correct positions in the previously created cubes
    for i, j in zip(x, y):
        calc(i, j)
    for i, j, gauss_spec, parameters, noisy_gauss_spec in queue:
        cube_gauss[:, i, j] = gauss_spec
        param_cube[:, i, j] = parameters
        cube_gauss_noise[:, i, j] = noisy_gauss_spec
        counter += 1
        print str(counter) + ' of ' + str(pixels) + ' pixels completed \r',
        sys.stdout.flush()
    print "\n %f s for parallel computation." % (time.time() - tic)

    # Save final cubes
    # These will be in km/s units.
    # Spectra will have larger values to the left, lower values to right
    cube_final_gauss = SpectralCube(data=cube_gauss,
                                    wcs=cube_km.wcs,
                                    header=cube_km.header)
    cube_final_gauss.write(GaussOut, format='fits', overwrite=True)
    cube_final_gauss_noise = SpectralCube(data=cube_gauss_noise,
                                          wcs=cube_km.wcs,
                                          header=cube_km.header)
    cube_final_gauss_noise.write(GaussNoiseOut, format='fits', overwrite=True)

    # Construct appropriate header for param_cube
    param_header['NAXIS3'] = len(nan_array2)
    param_header['WCSAXES'] = 3
    param_header['CRPIX3'] = 1
    param_header['CDELT3'] = 1
    param_header['CRVAL3'] = 0
    param_header['PLANE1'] = 'Tpeak'
    param_header['PLANE2'] = 'VLSR'
    param_header['PLANE3'] = 'sigma'
    param_header['PLANE5'] = 'Tpeak_err'
    param_header['PLANE6'] = 'VLSR_err'
    param_header['PLANE7'] = 'sigma_err'

    fits.writeto(ParamOut, param_cube, header=param_header, clobber=True)