Exemple #1
0
def MeshSmooth(NC, neighbList, Iter):
    print "Two-stage Taubin Smoothing"
    N1 = NC.shape[0]
    NPP1 = N1 / LIM
    for i in range(Iter):
        print "	Iteration ", i + 1
        print "		Umbrella-Operator Step"
        Umb = -np.zeros((NC.shape))
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(MeshUmbrella))
        for j in range(0, LIM):
            calc(np.array(range(0, NPP1)) + j * NPP1, NC, neighbList, Umb)
        for j in range(0, LIM):
            Umb[np.array(range(0, NPP1)) + j * NPP1, :] = results[j]
        Umb[range(LIM * NPP1, N1), :] = MeshUmbrella(range(LIM * NPP1, N1), NC,
                                                     neighbList, Umb)
        print "		Smoothing Step"
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(MeshTaubin))
        for j in range(0, LIM):
            calc(np.array(range(0, NPP1)) + j * NPP1, NC, neighbList, Umb)
        for j in range(0, LIM):
            NC[np.array(range(0, NPP1)) + j * NPP1, :] = results[j]
        NC[range(LIM * NPP1, N1), :] = MeshTaubin(range(LIM * NPP1, N1), NC,
                                                  neighbList, Umb)
    return NC
Exemple #2
0
def MeshSmooth(nodes, NC, neighbList, Iter, listPos=0):
    # If listPos ==0: use neighbourlist position same as nodal position in nodes. i.e len(neighbList)==nodes.size
    # If listPos <>0: use neighbList[ndnr] where nodes[i]=ndnr
    print "Two-stage Taubin Smoothing"
    print "ListPos: ", listPos
    N1 = nodes.size
    NPP1 = N1 / LIM
    for i in range(Iter):
        print "	Iteration ", i + 1
        print "		Umbrella-Operator Step"
        Umb = -np.zeros((NC.shape))
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(MeshUmbrella))
        for j in range(0, LIM):
            calc(
                np.array(range(0, NPP1)) + j * NPP1, nodes, NC, neighbList,
                Umb, listPos)
        for j in range(0, LIM):
            Umb[nodes[np.array(range(0, NPP1)) + j * NPP1], :] = results[j]
        Umb[nodes[range(LIM * NPP1, N1)], :] = MeshUmbrella(
            range(LIM * NPP1, N1), nodes, NC, neighbList, Umb, listPos)
        print "		Smoothing Step"
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(MeshTaubin))
        for j in range(0, LIM):
            calc(
                np.array(range(0, NPP1)) + j * NPP1, nodes, NC, neighbList,
                Umb, listPos)
        for j in range(0, LIM):
            NC[nodes[np.array(range(0, NPP1)) + j * NPP1], :] = results[j]
        NC[nodes[range(LIM * NPP1, N1)], :] = MeshTaubin(
            range(LIM * NPP1, N1), nodes, NC, neighbList, Umb, listPos)
    return NC
def update_model(iteration, model_in_dir, model_out_dir, kerneltype,
                 modeltype):  #,update_files,step_length):
    # e.g. kerneltype = 'alpha_acoustic_kernel'
    # e.g. modeltype = 'vp'
    import input_parameters, os, pprocess, sys
    import numpy as np

    def update_proc(current_proc, maxkernel):
        proc_prefix = get_proc_prefix(
            current_proc)  # returns string "proc<6-digit prefix>_"
        modelvalues = read_bin_file(
            model_in_dir + proc_prefix + modeltype + '.bin',
            ['f'])[0]  # current model values
        modelvalues = np.array(modelvalues, dtype='f')
        search_direction = read_bin_file(
            input_parameters.summed_kernel_directory + proc_prefix +
            kerneltype + '.bin', ['f'])[0]
        search_direction = np.array(search_direction, dtype='f')
        # use steepest descent algorithm with fixed step length
        # normalize update direction by maximum absolute gradient
        newmodel = modelvalues * (
            1 + input_parameters.step_length * search_direction / maxkernel)
        file_out = model_out_dir + '/' + proc_prefix + modeltype + '.bin'
        write_bin_file(file_out, [newmodel.tolist()], ['f'], 1)

    def get_maxabs_kernel(proc):
        filename = input_parameters.summed_kernel_directory + get_proc_prefix(
            proc) + kerneltype + '.bin'
        u = read_bin_file(filename, ['f'])[0]
        u = np.array(u, dtype='f')
        vp = read_bin_file(
            model_in_dir + get_proc_prefix(proc) + modeltype + '.bin',
            ['f'])[0]
        maxval = max(abs(u))
        return maxval

    results = pprocess.Map(limit=input_parameters.NPROCS, reuse=1)
    calc = results.manage(pprocess.MakeReusable(get_maxabs_kernel))
    for proc in range(input_parameters.NPROCS):
        calc(proc)
    results.finish()

    max_kernel_value = 0.0
    for i in range(input_parameters.NPROCS):
        max_kernel_value = max(max_kernel_value, results[i])

    print "Initializing directory for updated model : " + model_out_dir
    os.system('mkdir -p ' + model_out_dir)

    results = pprocess.Map(limit=input_parameters.NPROCS, reuse=1)
    parfun = results.manage(pprocess.MakeReusable(update_proc))
    for proc in range(input_parameters.NPROCS):
        parfun(proc, max_kernel_value)
    results.finish()
def write_grid_gll():
    # specfem mesh files (e.g. proc000000_x.bin) are defined on global coordinates
    # however, we need them defined local (GLL) points
    # thus, we output files of type proc000000_<xlocal/ylocal/zlocal>.bin, the entries of which correspond to the spatial location of the model files (e.g. proc000000_vp.bin)
    import directory_parameters, input_parameters, pprocess
    import numpy as np

    def global_to_local(proc, coord):
        proc_prefix = get_proc_prefix(proc)
        # load proc*_<coord>.bin file
        global_coord = read_bin_file(
            directory_parameters.DATABASES_MPI_path + proc_prefix + coord +
            '.bin', ['f'])[0]
        # load local to global mapping, proc*_ibool.bin
        ibool = read_bin_file(
            directory_parameters.DATABASES_MPI_path + proc_prefix +
            'ibool.bin', ['i'])[0]
        local_coord = np.zeros(len(ibool))
        for i in range(len(ibool)):
            local_coord[i] = global_coord[
                ibool[i] -
                1]  # python arrays start at 0, whereas ibool starts at 1
        filename_out = directory_parameters.DATABASES_MPI_path + proc_prefix + coord + 'local.bin'
        write_bin_file(filename_out, [local_coord.tolist()], ['f'], 1)

    results = pprocess.Map(limit=input_parameters.total_processors, reuse=1)
    parfun = results.manage(pprocess.MakeReusable(global_to_local))
    for proc in range(input_parameters.NPROCS):
        for coord in ['x', 'y', 'z']:
            parfun(proc, coord)
    results.finish()
def test_multiprocessing():

    args = []
    for i in xrange(5):
        arg = (500, 2)
        args.append(arg)
    print args
    start_time = time.time()
    #map(takeuptime, [(500,2), (500,2), (500,2), (500,2)])  #or in form of: [takeuptime(args) for args in [500,500]]
    #print 'single Processing: ', time.time() - start_time

    #start_time = time.time()
    #Source-multiprocessing: http://pastebin.com/iGPs699r and http://www.astrobetter.com/parallel-processing-in-python/
    #pool = Pool(processes=4)

    #pool.map(takeuptime, args)

    #print 'parallel Processing: ', time.time() - start_time

    start_time = time.time()
    nproc = 10  # maximum number of simultaneous processes desired
    results = pprocess.Map(limit=nproc, reuse=1)
    parallel_function = results.manage(pprocess.MakeReusable(takeuptime))
    [parallel_function(args2) for args2 in args]
    # Start computing things
    print results.results

    parallel_results = results[0:10]
    print '2nd parallel Processing: ', time.time() - start_time
    print dir(results)
    print dir(results.results)
    print results.results
    for i in results.results.__iter__():
        print i
Exemple #6
0
def elemQual_mu(elem, NC, Tet, disp=1, deltPresc=0):
    if disp == 1:
        print 'Determine element Quality [mu]'
    gamma = 0.0000001
    delta = 0
    T1 = elem.shape[0]
    TPP1 = T1 / LIM
    Sn2 = np.zeros((elem.shape[0], ))
    Sig = np.zeros((elem.shape[0], ))
    if elem.size > LIM:
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(Sn2Sig))
        for j in range(0, LIM):
            calc(elem[np.array(range(0, TPP1))] + j * TPP1, NC, Tet)
        for j in range(0, LIM):
            Sn2[j * TPP1:(1 + j) * TPP1], Sig[j * TPP1:(1 + j) *
                                              TPP1] = results[j]
    if np.array(range(LIM * TPP1, T1)).size > 0:
        Sn2[LIM * TPP1:T1], Sig[LIM * TPP1:T1] = Sn2Sig(
            elem[np.array(range(LIM * TPP1, T1))], NC, Tet)
    if np.min(Sig) < gamma:
        if disp == 1:
            print '	minnimum Sig = ', np.min(Sig)
        delta = np.sqrt(gamma * (gamma - np.min(Sig)))
    if delta < deltPresc:
        delta = deltPresc
    if disp == 1:
        print '	delta = ', delta
    h_sig = (Sig + np.sqrt(Sig * Sig + 4 * delta * delta)) / 2
    return 3 * np.power(h_sig, 2. / 3) / Sn2, delta, Sn2, Sig
def TetCellToVertex(NC, Connect, ScalarValue):
    N1 = NC.shape[0]
    NPP1 = N1 / LIM
    ConnectNC = np.c_[
        np.sum(
            np.c_[NC[Connect[:, 0], 0], NC[Connect[:, 1], 0],
                  NC[Connect[:, 2], 0], NC[Connect[:, 3], 0]], 1) / 4,
        np.sum(
            np.c_[NC[Connect[:, 0], 1], NC[Connect[:, 1], 1],
                  NC[Connect[:, 2], 1], NC[Connect[:, 3], 1]], 1) / 4,
        np.sum(
            np.c_[NC[Connect[:, 0], 2], NC[Connect[:, 1], 2],
                  NC[Connect[:, 2], 2], NC[Connect[:, 3], 2]], 1) / 4]
    Scalars = np.zeros(NC.shape[0])
    results = pprocess.Map(limit=LIM)
    calc = results.manage(pprocess.MakeParallel(weightSCVAL))
    for j in range(LIM):
        calc(
            np.array(range(0, NPP1)) + j * NPP1, NC, Connect, ConnectNC,
            ScalarValue)
    for j in range(LIM):
        Scalars[np.array(range(0, NPP1)) + j * NPP1] = results[j]
    #for j in range(LIM):
    if N1 > LIM * NPP1:
        Scalars[range(LIM * NPP1,
                      N1)] = weightSCVAL(np.array(range(LIM * NPP1, N1)), NC,
                                         Connect, ConnectNC, ScalarValue)
    return Scalars
Exemple #8
0
def main():
    lidc_dict = get_series_uid_path_dict(LIDCPath)
    list_of_args = range(ProcessorNum)
    group_num = len(csvLines) // ProcessorNum
    cutPoint = np.empty([ProcessorNum, 2], dtype=int)
    for row in range(ProcessorNum):
        # start point
        cutPoint[row, 0] = row * group_num
        if row == ProcessorNum - 1:
            # stop point
            cutPoint[row, 1] = len(csvLines)
        else:
            # stop point
            cutPoint[row, 1] = row * group_num + group_num - 1 + 1

    # starting parallel reading
    st = time.time()
    results = pprocess.Map()
    parallel_function = results.manage(pprocess.MakeParallel(write_csv))
    for args in list_of_args:
        parallel_function(args, cutPoint[args, 0], cutPoint[args, 1], lidc_dict)
    print('\nStarting Parallel time {:.2f} seconds...'.format(time.time() - st))

    st = time.time()
    results[:]
    # parallel_results = results[:]
    print('\nParallel costs {:.2f} seconds...'.format(time.time() - st))
Exemple #9
0
def cast_unsuitable_regions_by_label_MT(multi_level_mask, thresholds_list, parameter_dict):
    '''cast small region as noise and big region as vessel'''
    nodule_mask = np.zeros_like(multi_level_mask, np.int8)
    multi_image_labels = range(int(np.max(multi_level_mask)))
    multi_image_labels.reverse()
    loop_times = len(multi_image_labels)
    # TODO map style parallezision
    shared_array_s = mp.Array(ctypes.c_int8, loop_times * np.size(nodule_mask))
    shared_array = np.frombuffer(shared_array_s.get_obj(), dtype=np.int8).reshape((loop_times,) + nodule_mask.shape)

    num_of_proc = pprocess.get_number_of_cores()
    results = pprocess.Map(limit=num_of_proc / 2)
    para_func = results.manage(pprocess.MakeParallel(put_result_into_shared_memory))

    for i in range(loop_times):
        one_label = multi_image_labels[i]
        para_func(shared_array, multi_level_mask, thresholds_list, parameter_dict, one_label)
    results.finish()

    for num_of_loop in range(shared_array.shape[0]):
        nodule_mask = np.logical_or(nodule_mask, shared_array[num_of_loop, ...])

    datastate = shared_array_s.get_obj()._wrapper._state
    arenaobj = datastate[0][0]
    arenaobj.buffer.close()
    mp.heap.BufferWrapper._heap = mp.heap.Heap()

    return nodule_mask
Exemple #10
0
def Neigenvalues(NC,Tri,layer,ls):
  KDTNC = KDTree(NC,ls)
  VNORM = vertexnormal(NC,Tri)
  Neig = np.zeros(NC.shape)
  N1 = Neig.size/3;
  NPP1 = N1/LIM
  results = pprocess.Map(limit=LIM)
  calc = results.manage(pprocess.MakeParallel(NodeEigVal))
  for j in range(0,LIM):
      calc(np.array(range(0,NPP1))+j*NPP1,NC,Tri,KDTNC,ls,VNORM,layer,Neig)
  for j in range(0,LIM):
      Neig[np.array(range(0,NPP1))+j*NPP1,] = results[j]
  Neig[range(LIM*NPP1,N1),]=NodeEigVal(range(LIM*NPP1,N1),NC,Tri,KDTNC,ls,VNORM,layer,Neig)
  #queue=pprocess.Queue(limit=LIM)
  #results = []
  #Nneig = queue.manage(pprocess.MakeParallel(NodeEigVal))
  #for i in range(0,N):
    #Nneig(i,NC,Tri,KDTNC,ls,VNORM,2)
  #for i,NE in queue:
    #Neig[i,] = NE
  rows=np.where(Neig[:,0]<Neig[:,1])[0]
  Neig[rows,:]=np.c_[Neig[rows,1],Neig[rows,0],Neig[rows,2]]
  rows=np.where(Neig[:,0]<Neig[:,2])[0]
  Neig[rows,:]=np.c_[Neig[rows,2],Neig[rows,1],Neig[rows,0]]
  rows=np.where(Neig[:,1]<Neig[:,2])[0]
  Neig[rows,:]=np.c_[Neig[rows,0],Neig[rows,2],Neig[rows,1]]
  return Neig
Exemple #11
0
def filter_regions(multi_level_mask, thresholds_list, parameter_dict, one_label):
    one_threshold = thresholds_list[one_label]
    mask = (multi_level_mask > one_label)
    min_size = parameter_dict['small_vol_threshold']
    label_image, bounding_box_slices = lyBWareaopen(mask, min_size)

    region_result = [False]
    '''single thread'''
    # for label_number in range(1, np.max(label_image) + 1):
    #     single_result = region_task(label_number, label_image, bounding_box_slices, one_threshold, parameter_dict)
    #     region_result.append(single_result)

    '''multi thread'''
    results = pprocess.Map(limit=pprocess.get_number_of_cores())
    calc = results.manage(pprocess.MakeParallel(region_task))

    for label_number in range(1, np.max(label_image) + 1):
        calc(label_number, label_image, bounding_box_slices, one_threshold, parameter_dict)

    for i, result in enumerate(results):
        region_result.append(result)

    region_result = np.array(region_result, np.bool)
    tuild_result = np.logical_not(region_result)
    label_image[tuild_result[label_image]] = 0

    return (label_image > 0)
Exemple #12
0
    def _sl_call(self, dataset, roi_ids, nproc):
        """Classical generic searchlight implementation
        """
        assert (self.results_backend in ('native', 'hdf5'))
        # compute
        if nproc is not None and nproc > 1:
            # split all target ROIs centers into `nproc` equally sized blocks
            nproc_needed = min(len(roi_ids), nproc)
            nblocks = nproc_needed \
                      if self.nblocks is None else self.nblocks
            roi_blocks = np.array_split(roi_ids, nblocks)

            # the next block sets up the infrastructure for parallel computing
            # this can easily be changed into a ParallelPython loop, if we
            # decide to have a PP job server in PyMVPA
            import pprocess
            p_results = pprocess.Map(limit=nproc_needed)
            if __debug__:
                debug(
                    'SLC', "Starting off %s child processes for nblocks=%i" %
                    (nproc_needed, nblocks))
            compute = p_results.manage(pprocess.MakeParallel(self._proc_block))
            for iblock, block in enumerate(roi_blocks):
                # should we maybe deepcopy the measure to have a unique and
                # independent one per process?
                seed = mvpa2.get_random_seed()
                compute(block,
                        dataset,
                        copy.copy(self.__datameasure),
                        seed=seed,
                        iblock=iblock)
        else:
            # otherwise collect the results in an 1-item list
            p_results = [
                self._proc_block(roi_ids, dataset, self.__datameasure)
            ]

        # Finally collect and possibly process results
        # p_results here is either a generator from pprocess.Map or a list.
        # In case of a generator it allows to process results as they become
        # available
        result_ds = self.results_fx(
            sl=self,
            dataset=dataset,
            roi_ids=roi_ids,
            results=self.__handle_all_results(p_results))

        # Assure having a dataset (for paranoid ones)
        if not is_datasetlike(result_ds):
            try:
                result_a = np.atleast_1d(result_ds)
            except ValueError, e:
                if 'setting an array element with a sequence' in str(e):
                    # try forcing object array.  Happens with
                    # test_custom_results_fx_logic on numpy 1.4.1 on Debian
                    # squeeze
                    result_a = np.array(result_ds, dtype=object)
                else:
                    raise
            result_ds = Dataset(result_a)
Exemple #13
0
def ncMain():
    """
    This function is the main netCDF (read/write) function. The required
    address, list of stations and other info will be collected here and
    will be passed to ncChoose (choose between Create/Extract)
    """

    global input

    if input['ncCreate'] != 'N':
        address = input['ncCreate']
    if input['ncExtract'] != 'N':
        address = input['ncExtract']

    events, address_events = quake_info(address, 'info')

    if input['nc_parallel'] == 'Y':
        import pprocess

        print "\n#######################"
        print "Parallel Create/Extract"
        print "Number of Nodes: " + str(input['nc_np'])
        print "#######################\n"

        parallel_results = pprocess.Map(limit=input['nc_np'], reuse=1)
        parallel_job = parallel_results.manage(
            pprocess.MakeReusable(nc_parallel_core))

        for i in range(0, len(events)):
            parallel_job(events=events, address_events=address_events, i=i)
        parallel_results.finish()

    else:
        for i in range(0, len(events)):
            nc_parallel_core(events=events, address_events=address_events, i=i)
def simulation_loop(iteration, simtype='forward', save_forward=True):
    import input_parameters, os, pprocess, sys, directory_parameters

    def do_event(event):
        print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
        print "     (ITERATION, EVENT, SIMULATION TYPE)=  : (" + str(
            iteration) + ' ' + str(event) + ' ' + simtype + ')'
        print "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%"
        inversion_home_directory = os.getcwd()
        event_dir = input_parameters.event_scratch_directory + 'event' + str(
            event) + '/'
        os.chdir(event_dir + 'bin')
        os.system('mpirun.openmpi -np ' + str(input_parameters.NPROCS) +
                  ' xspecfem3D')
        os.chdir(inversion_home_directory)  # back to root directory
        if simtype == 'forward':
            if input_parameters.save_all_synthetics == True:  # save traces and link them into directory to be read for adjoint source construction
                save_trace_dir = event_dir + '/save_traces/it' + str(
                    iteration) + '/'
                os.system('mkdir -p ' + save_trace_dir)
                os.system('mv ' + event_dir +
                          directory_parameters.OUTPUT_FILES_path +
                          '*XZ.sema ' + save_trace_dir)
                # schiemenz: gotta be a better way to do this
                os.system('rm ' + event_dir + 'traces_forward/*')
                os.system('ln -s ' + save_trace_dir + '*' + ' ' + event_dir +
                          'traces_forward/')
            else:  # just move the traces into a directory to be read for adjoint source construction
                os.system('mv ' + event_dir +
                          directory_parameters.OUTPUT_FILES_path +
                          '*XZ.sema ' + event_dir + 'traces_forward/')
        elif simtype == 'adjoint':
            for kerneltype in input_parameters.kernels_to_save:
                kernel_dir = input_parameters.unsmoothed_kernel_path + '/it' + str(
                    iteration) + '/event' + str(event) + '/'
                os.system('mkdir -p ' + kernel_dir)
                os.system('mv ' + event_dir +
                          directory_parameters.DATABASES_MPI_path + '*' +
                          kerneltype + '.bin ' + kernel_dir)

    print "Changing Par_file for simtype = " + simtype + ', save_forward = ' + str(
        save_forward)
    if simtype not in ['forward', 'adjoint']:
        print 'ERROR: simulation type must be forward or adjoint'
        sys.exit(1)
    elif simtype == 'forward':
        if save_forward == True: os.system('./change_simulation_type.pl -F')
        else: os.system('./change_simulation_type.pl -f')
    elif simtype == 'adjoint':
        os.system('./change_simulation_type.pl -b')

    # Simulate events, an embarrasingly-parallel loop
    results = pprocess.Map(limit=input_parameters.total_processors /
                           input_parameters.NPROCS,
                           reuse=1)
    parfun = results.manage(pprocess.MakeReusable(do_event))
    for i in range(input_parameters.number_of_events):
        parfun(i)
    results.finish()
Exemple #15
0
def TetListScalars(NodeTetList, ScalarVal, AoM=0):
    # if AoM ==0, use average, if AoM<>0, use the minimum associated with that point
    N1 = len(NodeTetList)
    NPP1 = N1 / LIM
    NodeScal = np.zeros((N1, ))
    results = pprocess.Map(limit=LIM)
    calc = results.manage(pprocess.MakeParallel(TetListScalarsSec))
    for j in range(LIM):
        calc(np.array(range(0, NPP1)) + j * NPP1, NodeTetList, ScalarVal, AoM)
    for j in range(LIM):
        NodeScal[np.array(range(0, NPP1)) + j * NPP1] = results[j]
    #for j in range(LIM):
    if N1 > LIM * NPP1:
        NodeScal[range(LIM * NPP1, N1)] = TetListScalarsSec(
            np.array(range(LIM * NPP1, N1)), NodeTetList, ScalarVal, AoM)
    return NodeScal
Exemple #16
0
def LaplacMesh(nodes, NC, neighbList, Iter, listPos=0):
    # If listPos ==0: use neighbourlist position same as nodal position in nodes. i.e len(neighbList)==nodes.size
    # If listPos <>0: use neighbList[ndnr] where nodes[i]=ndnr
    print "Laplacian Smooth"
    N1 = nodes.size
    NPP1 = N1 / LIM
    for i in range(Iter):
        print "	Iteration ", i + 1
        Umb = np.zeros((NC.shape))
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(MeshUmbrella))
        for j in range(0, LIM):
            calc(
                np.array(range(0, NPP1)) + j * NPP1, nodes, NC, neighbList,
                Umb, listPos)
        for j in range(0, LIM):
            Umb[nodes[np.array(range(0, NPP1)) + j * NPP1], :] = results[j]
        Umb[nodes[range(LIM * NPP1, N1)], :] = MeshUmbrella(
            range(LIM * NPP1, N1), nodes, NC, neighbList, Umb, listPos)
        NC = NC + Umb
    return NC
Exemple #17
0
def ShapeHist(NC, Tri, FeatPoints, radius, thetaB=12, phiB=12, rhoB=6):
    # Construct Shape Context histogram for Feature points
    N1 = FeatPoints.size
    NPP1 = N1 / LIM
    print 'Get Triangle and Vertex normals'
    VNORM = vertexnormal(NC, Tri)
    KDTnc = KDTree(NC, 5)
    print 'Set up Polar Histogram for given points'
    PolarHist = np.zeros((N1, 12, 12, 6))
    results = pprocess.Map(limit=LIM)
    calc = results.manage(pprocess.MakeParallel(HistP))
    for j in range(0, LIM):
        calc(
            np.array(range(0, NPP1)) + j * NPP1, FeatPoints, NC, VNORM, KDTnc,
            radius, wd, thetaB, phiB, rhoB)
    for j in range(0, LIM):
        PolarHist[np.array(range(0, NPP1)) + j * NPP1, ] = results[j]
    PolarHist[np.array(range(LIM * NPP1, N1)), ] = HistP(
        np.array(range(LIM * NPP1, N1)), FeatPoints, NC, VNORM, KDTnc, radius,
        wd, thetaB, phiB, rhoB)
    return PolarHist
Exemple #18
0
def Get3DCube_multiprocess(csvpath, augpath, datapath):
    PPNum = 6
    augLines = ReadCSV(augpath)[1:]
    csvLines = ReadCSV(csvpath)[1:]
    LIDCPath = '/home/yanghan/data/LIDC-IDRI'
    KAGGLEPath = '/home/yanghan/data/stage1'
    lidc_dict = GetAbsolutePath(LIDCPath, KAGGLEPath, csvLines)
    list_pp = range(PPNum)
    group_num = len(csvLines) // PPNum
    cutPoint = np.empty([PPNum, 2], dtype=int)
    for row in range(PPNum):
        # start point
        cutPoint[row, 0] = row * group_num
        if row == PPNum - 1:
            # stop point
            cutPoint[row, 1] = len(csvLines)
        else:
            # stop point
            cutPoint[row, 1] = row * group_num + group_num - 1 + 1

    # starting parallel reading
    st = time.time()
    results = pprocess.Map()
    parallel_function = results.manage(
        pprocess.MakeParallel(
            write_csv(0, 0, len(csvLines), lidc_dict, csvLines, augLines,
                      datapath, 1)))
    for args in list_pp:
        parallel_function(args, cutPoint[args, 0], cutPoint[args, 1],
                          lidc_dict)
    print('\nStarting Parallel time {:.2f} seconds...'.format(time.time() -
                                                              st))

    st = time.time()
    results[:]
    # parallel_results = results[:]
    print('\nParallel costs {:.2f} seconds...'.format(time.time() - st))
Exemple #19
0
def FeatPoints(Kmax, Kmin, NC, Tri, radius, alpha=0.5, beta=0.5):
    # Get feature points from computed maximum and minnimum feature curvature
    print 'Get Triangle and Vertex normals'
    VNORM = vertexnormal(NC, Tri)
    print 'Use Shape Index for Feature Point extraction'
    SI = 0.5 - np.arctan((Kmax + Kmin) / (Kmax - Kmin)) / np.pi
    KDTnc = KDTree(NC, 5)
    N1 = NC.shape[0]
    NPP1 = N1 / LIM
    FeatPoints = np.array([np.nan] * NC.shape[0])
    results = pprocess.Map(limit=LIM)
    calc = results.manage(pprocess.MakeParallel(IsFeat))
    for j in range(0, LIM):
        calc(
            np.array(range(0, NPP1)) + j * NPP1, NC, VNORM, KDTnc, radius, SI,
            alpha, beta, FeatPoints)
    for j in range(0, LIM):
        FeatPoints[np.array(range(0, NPP1)) + j * NPP1] = results[j]
    FeatPoints[np.array(range(LIM * NPP1,
                              N1))] = IsFeat(np.array(range(LIM * NPP1, N1)),
                                             NC, VNORM, KDTnc, radius, SI,
                                             alpha, beta, FeatPoints)
    FeatPoints = FeatPoints[FeatPoints >= 0]
    return np.array(FeatPoints, int)
Exemple #20
0
def LineReg(NCB, TriB, RlinesB, VlinesB, NCT, TriT, RlinesT, VlinesT,
            percentReg, DistMax):
    N1 = NCB.shape[0]
    NPP1 = N1 / LIM
    [RsegmB, VsegmB, RsegmT, VsegmT] = [
        np.array([[], [], []]).reshape((0, 3)),
        np.array([[], [], []]).reshape((0, 3)),
        np.array([[], [], []]).reshape((0, 3)),
        np.array([[], [], []]).reshape((0, 3))
    ]
    [RnodesB, VnodesB, RnodesT, VnodesT] = [
        np.array([[], []]).reshape((0, 2)),
        np.array([[], []]).reshape((0, 2)),
        np.array([[], []]).reshape((0, 2)),
        np.array([[], []]).reshape((0, 2))
    ]
    # Transform Lines into linesegments with i'th segment allocated as [NC_point1,NC_point2,Line_nr]
    #	and list of nodes allocated as [Nd_nr, Line_nr]
    for i in range(1, RlinesB[0] + 1):
        Lsize = RlinesB[i].size
        RnodesB = np.array(
            np.r_[RnodesB, np.c_[RlinesB[i],
                                 np.ones((Lsize, 1)) * i]], int)
        RsegmB = np.array(
            np.r_[RsegmB, np.c_[RlinesB[i][0:Lsize - 1], RlinesB[i][1:Lsize],
                                np.ones((Lsize - 1, 1)) * i]], int)
    for i in range(1, VlinesB[0] + 1):
        Lsize = VlinesB[i].size
        VnodesB = np.array(
            np.r_[VnodesB, np.c_[VlinesB[i],
                                 np.ones((Lsize, 1)) * i]], int)
        VsegmB = np.array(
            np.r_[VsegmB, np.c_[VlinesB[i][0:Lsize - 1], VlinesB[i][1:Lsize],
                                np.ones((Lsize - 1, 1)) * i]], int)
    for i in range(1, RlinesT[0] + 1):
        Lsize = RlinesT[i].size
        RnodesT = np.array(
            np.r_[RnodesT, np.c_[RlinesT[i],
                                 np.ones((Lsize, 1)) * i]], int)
        RsegmT = np.array(
            np.r_[RsegmT, np.c_[RlinesT[i][0:Lsize - 1], RlinesT[i][1:Lsize],
                                np.ones((Lsize - 1, 1)) * i]], int)
    for i in range(1, VlinesT[0] + 1):
        Lsize = VlinesT[i].size
        VnodesT = np.array(
            np.r_[VnodesT, np.c_[VlinesT[i],
                                 np.ones((Lsize, 1)) * i]], int)
        VsegmT = np.array(
            np.r_[VsegmT, np.c_[VlinesT[i][0:Lsize - 1], VlinesT[i][1:Lsize],
                                np.ones((Lsize - 1, 1)) * i]], int)
    # find average nodal coordinate of base linesegments
    RsegTNC, VsegTNC = (NCT[RsegmT[:, 0], ] + NCT[RsegmT[:, 1], ]) / 2, (
        NCT[VsegmT[:, 0], ] + NCT[VsegmT[:, 1], ]) / 2
    # set up k-d tree of nodes and segments in Base model lines
    print 'TARGET: Determine triangle centroids, triangle normals and weighted vertex normals'
    TBCT = np.c_[
        np.
        sum(np.c_[NCT[TriT[:, 0], 0], NCT[TriT[:, 1], 0], NCT[TriT[:, 2],
                                                              0]], 1) / 3,
        np.
        sum(np.c_[NCT[TriT[:, 0], 1], NCT[TriT[:, 1], 1], NCT[TriT[:, 2],
                                                              1]], 1) / 3,
        np.
        sum(np.c_[NCT[TriT[:, 0], 2], NCT[TriT[:, 1], 2], NCT[TriT[:, 2],
                                                              2]], 1) / 3]
    TNORMT = np.cross(NCT[TriT[:, 1], :] - NCT[TriT[:, 0], :],
                      NCT[TriT[:, 2], :] - NCT[TriT[:, 0], :])
    TNORMT = (TNORMT.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([TNORMT * TNORMT]), 2)))).T
    VNORMT = vrtxnormal(NCT, TriT, TBCT, TNORMT)
    print 'TARGET: Determine segment normal directions'
    SegNormRT, SegNormVT = (VNORMT[RsegmT[:, 0], ] +
                            VNORMT[RsegmT[:, 1], ]), (VNORMT[VsegmT[:, 0], ] +
                                                      VNORMT[VsegmT[:, 1], ])
    SegNormRT = (SegNormRT.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([SegNormRT * SegNormRT]), 2)))).T
    SegNormVT = (SegNormVT.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([SegNormVT * SegNormVT]), 2)))).T

    # find average nodal coordinate of base linesegments
    RsegBNC, VsegBNC = (NCB[RsegmB[:, 0], ] + NCB[RsegmB[:, 1], ]) / 2, (
        NCB[VsegmB[:, 0], ] + NCB[VsegmB[:, 1], ]) / 2
    # set up k-d tree of nodes and segments in Base model lines
    print 'BASE: Determine triangle centroids, triangle normals and weighted vertex normals'
    TBCB = np.c_[
        np.
        sum(np.c_[NCB[TriB[:, 0], 0], NCB[TriB[:, 1], 0], NCB[TriB[:, 2],
                                                              0]], 1) / 3,
        np.
        sum(np.c_[NCB[TriB[:, 0], 1], NCB[TriB[:, 1], 1], NCB[TriB[:, 2],
                                                              1]], 1) / 3,
        np.
        sum(np.c_[NCB[TriB[:, 0], 2], NCB[TriB[:, 1], 2], NCB[TriB[:, 2],
                                                              2]], 1) / 3]
    TNORMB = np.cross(NCB[TriB[:, 1], :] - NCB[TriB[:, 0], :],
                      NCB[TriB[:, 2], :] - NCB[TriB[:, 0], :])
    TNORMB = (TNORMB.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([TNORMB * TNORMB]), 2)))).T
    VNORMB = vrtxnormal(NCB, TriB, TBCB, TNORMB)
    print 'BASE: Determine segment normal directions'
    SegNormRB, SegNormVB = (VNORMB[RsegmB[:, 0], ] +
                            VNORMB[RsegmB[:, 1], ]), (VNORMB[VsegmB[:, 0], ] +
                                                      VNORMB[VsegmB[:, 1], ])
    SegNormRB = (SegNormRB.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([SegNormRB * SegNormRB]), 2)))).T
    SegNormVB = (SegNormVB.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([SegNormVB * SegNormVB]), 2)))).T

    deform, k = 2, 0
    while (deform > 0.0001) & (k < 100):
        Supp = 50. / (1 + k / 5)
        if Supp < 10:
            Supp = 10
        k = k + 1
        print '		ITERATION ', k
        ## find average nodal coordinate of base linesegments
        #RsegBNC,VsegBNC = (NCB[RsegmB[:,0],]+NCB[RsegmB[:,1],])/2,(NCB[VsegmB[:,0],]+NCB[VsegmB[:,1],])/2
        ## set up k-d tree of nodes and segments in Base model lines
        #print 'BASE: Determine triangle centroids, triangle normals and weighted vertex normals'
        #TBCB = np.c_[np.sum(np.c_[NCB[TriB[:,0],0],NCB[TriB[:,1],0],NCB[TriB[:,2],0]],1)/3,
        #np.sum(np.c_[NCB[TriB[:,0],1],NCB[TriB[:,1],1],NCB[TriB[:,2],1]],1)/3,np.sum(np.c_[NCB[TriB[:,0],2],NCB[TriB[:,1],2],NCB[TriB[:,2],2]],1)/3]
        #TNORMB = np.cross(NCB[TriB[:,1],:]-NCB[TriB[:,0],:],NCB[TriB[:,2],:]-NCB[TriB[:,0],:])
        #TNORMB = (TNORMB.T/(np.ones((3,1))*np.sqrt(np.sum(np.array([TNORMB*TNORMB]),2)))).T
        #VNORMB = vrtxnormal(NCB,TriB,TBCB,TNORMB)
        #print 'BASE: Determine segment normal directions'
        #SegNormRB,SegNormVB = (VNORMB[RsegmB[:,0],]+VNORMB[RsegmB[:,1],]),(VNORMB[VsegmB[:,0],]+VNORMB[VsegmB[:,1],])
        #SegNormRB = (SegNormRB.T/(np.ones((3,1))*np.sqrt(np.sum(np.array([SegNormRB*SegNormRB]),2)))).T
        #SegNormVB = (SegNormVB.T/(np.ones((3,1))*np.sqrt(np.sum(np.array([SegNormVB*SegNormVB]),2)))).T

        ##~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~REGISTRATION~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~##
        print 'Register feature lines'
        R12R = FeatReg(RnodesB, NCB, VNORMB, RsegmT, RsegTNC, SegNormRT,
                       RnodesT, NCT,
                       DistMax)  #find registered mesh 1 to 2 Ridges
        R12V = FeatReg(VnodesB, NCB, VNORMB, VsegmT, VsegTNC, SegNormVT,
                       VnodesT, NCT,
                       DistMax)  #find registered mesh 1 to 2 Valleys
        R21R = FeatReg(RnodesT, NCT, VNORMT, RsegmB, RsegBNC, SegNormRB,
                       RnodesB, NCB,
                       DistMax)  #find registered mesh 2 to 1 Ridges
        R21V = FeatReg(VnodesT, NCT, VNORMT, VsegmB, VsegBNC, SegNormVB,
                       VnodesB, NCB,
                       DistMax)  #find registered mesh 2 to 1 Valleys
        print 'Build topological map to disregard unmatched features'
        R12R, R21R = BuildMap(NCB, NCT, RnodesB, RnodesT, RsegmB, RsegmT, R12R,
                              R21R, percentReg)
        R12V, R21V = BuildMap(NCB, NCT, VnodesB, VnodesT, VsegmB, VsegmT, R12V,
                              R21V, percentReg)
        print 'Determine Base deformation required'
        B12R, T12R = NCB[np.array(R12R[:, 0], int), ], R12R[:, 1:4]
        B12V, T12V = NCB[np.array(R12V[:, 0], int), ], R12V[:, 1:4]
        B21R, T21R = R21R[:, 1:4], NCT[np.array(R21R[:, 0], int), ]
        B21V, T21V = R21V[:, 1:4], NCT[np.array(R21V[:, 0], int), ]
        Base_i = np.r_[B12R, B12V, B21R, B21V]
        Target_i = np.r_[T12R, T12V, T21R, T21V]
        DISP = Target_i - Base_i
        DISP_Full = np.zeros(NCB.shape)
        print 'Determine smooth defromation'
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(GaussMove))
        for j in range(0, LIM):
            calc(
                np.array(range(0, NPP1)) + j * NPP1, NCB, Base_i, DISP,
                DISP_Full, Supp)
        for j in range(0, LIM):
            DISP_Full[np.array(range(0, NPP1)) + j * NPP1, :] = results[j]
        DISP_Full[range(LIM * NPP1, N1), :] = GaussMove(
            range(LIM * NPP1, N1), NCB, Base_i, DISP, DISP_Full, Supp)
        NCB = NCB + DISP_Full
        deform = np.sum(np.sqrt(np.sum(DISP_Full * DISP_Full, 1))) / N1
        print '	TOTAL CURRENT DEFORMATION: ', deform
    # return which NODES within surface features are registered:
    keepB = np.r_[R12R[:, 0], R12V[:, 0]]
    keepT = np.r_[R21R[:, 0], R21V[:, 0]]
    keepB, keepT = np.array(keepB, int), np.array(keepT, int)
    return NCB, keepB, keepT
Exemple #21
0
flux_list = []
err_list = []
for x in freqs:
    flux_list.append(data["int_flux_"+x][brightsrcs])
# Error propagation: error on log(x) = err_x/x
    fitting_error = data["err_int_flux_"+x][brightsrcs]/data["int_flux_"+x][brightsrcs]
    err_list.append(np.sqrt(fitting_error**2 + calibration_error**2))
    
flux_array = np.transpose(np.ma.vstack(flux_list)).astype("float32")
flux_array = np.ma.log(flux_array)
flux_errors = np.transpose(np.ma.vstack(err_list)).astype("float32")
#names = data["Name"][brightsrcs]

#weights = 1/(flux_errors*flux_errors)

results = pprocess.Map(limit=cores)
calc = results.manage(pprocess.MakeParallel(fit_spectrum))

for i in range(0,len(brightsrcs)):
    calc(freq_array,flux_array[i],flux_errors[i]) # ,options.plot)

# Unpack results
alpha, err_alpha, amp, err_amp, chi2red = map(list, zip(*results))

# Convert to numpy arrays
alpha = np.array(alpha, dtype="float32")
err_alpha = np.array(err_alpha, dtype="float32")
amp = np.array(amp, dtype="float32")
err_amp = np.array(err_amp, dtype="float32")
chi2red = np.array(chi2red, dtype="float32")
Exemple #22
0
def voxel_selection(vol_surf_mapping,
                    radius,
                    source_surf=None,
                    source_surf_nodes=None,
                    distance_metric='dijkstra',
                    eta_step=10,
                    nproc=None,
                    outside_node_margin=None,
                    results_backend=None,
                    tmp_prefix='tmpvoxsel'):
    """
    Voxel selection for multiple center nodes on the surface

    Parameters
    ----------
    vol_surf_mapping: volsurf.VolSurfMapping
        Contains gray and white matter surface, and volume geometry
    radius: int or float
        Size of searchlight. If an integer, then it indicates the number of
        voxels. If a float, then it indicates the radius of the disc
    source_surf: surf.Surface or None
        Surface used to compute distance between nodes. If omitted, it is
        the average of the gray and white surfaces.
    source_surf_nodes: list of int or numpy array or None
        Indices of nodes in source_surf that serve as searchlight center.
        By default every node serves as a searchlight center.
    distance_metric: str
        Distance metric between nodes. 'euclidean' or 'dijksta' (default)
    eta_step: int
        Report progress every eta_step (default: 10).
    nproc: int or None
        Number of parallel threads. None means as many threads as the
        system supports. The pprocess is required for parallel threads; if
        it cannot be used, then a single thread is used.
    outside_node_margin: float or True or None (default)
        By default nodes outside the volume are skipped; using this
        parameter allows for a marign. If this value is a float (possibly
        np.inf), then all nodes within outside_node_margin Dijkstra
        distance from any node within the volume are still assigned
        associated voxels. If outside_node_margin is True, then a node is
        always assigned voxels regardless of its position in the volume.
    results_backend : 'native' or 'hdf5' or None (default).
        Specifies the way results are provided back from a processing block
        in case of nproc > 1. 'native' is pickling/unpickling of results by
        pprocess, while 'hdf5' would use h5save/h5load functionality.
        'hdf5' might be more time and memory efficient in some cases.
        If None, then 'hdf5' if used if available, else 'native'.
    tmp_prefix : str, optional
        If specified -- serves as a prefix for temporary files storage
        if results_backend == 'hdf5'.  Thus can specify the directory to use
        (trailing file path separator is not added automagically).

    Returns
    -------
    sel: volume_mask_dict.VolumeMaskDictionary
        Voxel selection results, that associates, which each node, the indices
        of the surrounding voxels.
    """

    # construct the intermediate surface, which is used
    # to measure distances
    intermediate_surf = (vol_surf_mapping.pial_surface * .5) + \
                        (vol_surf_mapping.white_surface * .5)

    if source_surf is None:
        source_surf = intermediate_surf
    else:
        source_surf = surf.from_any(source_surf)

    if _debug():
        debug(
            'SVS', "Generated high-res intermediate surface: "
            "%d nodes, %d faces" %
            (intermediate_surf.nvertices, intermediate_surf.nfaces))
        debug(
            'SVS', "Mapping source to high-res surface:"
            " %d nodes, %d faces" %
            (source_surf.nvertices, source_surf.nfaces))

    if distance_metric[0].lower() == 'e' and outside_node_margin:
        # euclidean distance: identity mapping
        # this is *slow*
        n = source_surf.nvertices
        xyz = source_surf.vertices
        src2intermediate = dict((i, tuple(xyz[i])) for i in xrange(n))
    else:
        # find a mapping from nodes in source_surf to those in
        # intermediate surface
        src2intermediate = source_surf.map_to_high_resolution_surf(\
                                                        intermediate_surf)

    # if no sources are given, then visit all ndoes
    if source_surf_nodes is None:
        source_surf_nodes = np.arange(source_surf.nvertices)

    n = len(source_surf_nodes)

    if _debug():
        debug('SVS', "Performing surface-based voxel selection"
              " for %d centers" % n)

    # visit in random order, for for better ETA estimate
    visitorder = list(np.random.permutation(len(source_surf_nodes)))

    # construct mapping from nodes to enclosing voxels
    n2v = vol_surf_mapping.get_node2voxels_mapping()

    if __debug__:
        debug('SVS', "Generated mapping from nodes" " to intersecting voxels")

    # build voxel selector
    voxel_selector = VoxelSelector(radius,
                                   intermediate_surf,
                                   n2v,
                                   distance_metric,
                                   outside_node_margin=outside_node_margin)

    if _debug():
        debug('SVS', "Instantiated voxel selector (radius %r)" % radius)

    # structure to keep output data. Initialize with None, then
    # make a sparse_attributes instance when we know what the attributes are
    node2volume_attributes = None

    attribute_mapper = voxel_selector.disc_voxel_indices_and_attributes

    srcs_order = [source_surf_nodes[node] for node in visitorder]
    src_trg_nodes = [(src, src2intermediate[src]) for src in srcs_order]

    if nproc is not None and nproc > 1 and not externals.exists('pprocess'):
        raise RuntimeError("The 'pprocess' module is required for "
                           "multiprocess searchlights. Please either "
                           "install python-pprocess, or reduce `nproc` "
                           "to 1 (got nproc=%i) or set to default None" %
                           nproc)

    if nproc is None:
        if externals.exists('pprocess'):
            try:
                import pprocess
                nproc = pprocess.get_number_of_cores() or 1
                if _debug():
                    debug("SVS", 'Using pprocess with %d cores' % nproc)
            except:
                if _debug():
                    debug("SVS", 'pprocess not available')

        if nproc is None:
            # importing pprocess failed - so use a single core
            nproc = 1
            debug("SVS", 'Using %d cores - pprocess not available' % nproc)

    # get the the voxel selection parameters
    parameter_dict = vol_surf_mapping.get_parameter_dict()
    parameter_dict.update(dict(radius=radius,
                               outside_node_margin=outside_node_margin,
                               distance_metric=distance_metric),
                          source_nvertices=source_surf.nvertices)

    init_output = lambda: volume_mask_dict.VolumeMaskDictionary(
        vol_surf_mapping.volgeom, intermediate_surf, meta=parameter_dict)

    if nproc > 1:
        if results_backend == 'hdf5':
            externals.exists('h5py', raise_=True)
        elif results_backend is None:
            if externals.exists(
                    'h5py') and externals.versions['hdf5'] >= '1.8.7':
                results_backend = 'hdf5'
            else:
                results_backend = 'native'
        if _debug():
            debug('SVS', "Using '%s' backend" % (results_backend, ))

        if not results_backend in ('native', 'hdf5'):
            raise ValueError('Illegal results backend %r' % results_backend)

        import pprocess
        n_srcs = len(src_trg_nodes)
        blocks = np.array_split(np.arange(n_srcs), nproc)

        results = pprocess.Map(limit=nproc)
        reducer = results.manage(pprocess.MakeParallel(_reduce_mapper))

        if __debug__:
            debug('SVS', "Starting %d child processes", (len(blocks), ))

        for i, block in enumerate(blocks):
            empty_dict = init_output()

            src_trg = []
            for idx in block:
                src_trg.append(src_trg_nodes[idx])

            if _debug():
                debug('SVS',
                      "  starting block %d/%d: %d centers" %
                      (i + 1, nproc, len(src_trg)),
                      cr=True)

            reducer(empty_dict,
                    attribute_mapper,
                    src_trg,
                    eta_step=eta_step,
                    proc_id='%d' % (i + 1, ),
                    results_backend=results_backend,
                    tmp_prefix=tmp_prefix)
        if _debug():
            debug('SVS', '')
            debug('SVS', 'Started all %d child processes' % (len(blocks)))
            tstart = time.time()

        node2volume_attributes = None
        for i, result in enumerate(results):
            if result is None:
                continue

            if results_backend == 'hdf5':
                result_fn = result
                result = h5load(result_fn)
                os.remove(result_fn)

            if node2volume_attributes is None:
                # first time we have actual results.
                # Use as a starting point
                node2volume_attributes = result
                if _debug():
                    debug('SVS', '')
                    debug(
                        'SVS', "Merging results from %d child "
                        "processes using '%s' backend" %
                        (len(blocks), results_backend))
            else:
                # merge new with current data
                node2volume_attributes.merge(result)
            if _debug():
                debug('SVS',
                      "  merged result block %d/%d" % (i + 1, nproc),
                      cr=True)

        if _debug():
            telapsed = time.time() - tstart
            debug('SVS', "")
            debug(
                'SVS', 'Merged results from %d child processed - '
                'took %s' % (len(blocks), seconds2prettystring(telapsed)))

    else:
        empty_dict = init_output()
        node2volume_attributes = _reduce_mapper(empty_dict,
                                                attribute_mapper,
                                                src_trg_nodes,
                                                eta_step=eta_step)
        debug('SVS', "")

    if _debug():
        if node2volume_attributes is None:
            msgs = [
                "Voxel selection completed: none of %d nodes have "
                "voxels associated" % len(visitorder)
            ]
        else:
            nvox_selected = np.sum(node2volume_attributes.get_mask() != 0)
            vg = vol_surf_mapping.volgeom

            msgs = [
                "Voxel selection completed: %d / %d nodes have "
                "voxels associated" %
                (len(node2volume_attributes.keys()), len(visitorder)),
                "Selected %d / %d  voxels (%.0f%%) in the mask at least once" %
                (nvox_selected, vg.nvoxels_mask,
                 100. * nvox_selected / vg.nvoxels_mask)
            ]

        for msg in msgs:
            debug("SVS", msg)

    if node2volume_attributes is None:
        warning('No voxels associated with any of %d nodes' % len(visitorder))
    return node2volume_attributes
Exemple #23
0
def elasticsurf(NCB,
                ConnectB,
                LandmB,
                LandmB_NC,
                AllowableBI,
                NCT,
                ConnectT,
                AllowableT,
                UseN_B,
                UseN_T,
                k_max,
                USENORMALS,
                gamm=2,
                sigm0=10,
                f=1.0715):
    # Elastic surface registration:
    #inputs:
    # NCB,NCT: nodal coordinates of base and target surfaces
    # ConnectB;ConnectT: Base&target connectivity
    # LandmB,LandmB_NC landmarks that have to have a 1 to 1 correspondence (input 0 no landmarks are present)
    # UseN_B & AllowableB: Feature dependant nodes on Base-mesh (indices in NCB) and allowable triangles to match.
    # UseN_T & AllowableT: Selective Feature preserving nodes and triangles (indices in NCT and ConnectT) on target mesh.
    # k_max: maximum number of iterations
    ######## ADDITIONAL SETTINGS REQUIRED ARE SET INTERNAL TO CODE #########
    print
    print "SELECTIVE MESH MORPHING ALGORITHM USING ELASTIC SURFACE REGISTRATION"
    print "	-G.J.J.v.Rensburg - 22/04/2010-"
    t_start = time.clock()
    ConnectB = np.array(ConnectB, int)
    ConnectT = np.array(ConnectT, int)
    #LandmB = np.array(LandmB[:,0],int)		# do -1 later to be consistent with python indexing, first need to do other "temporary landmarks"& check that they dont fall on actual landmark positions!

    # Settings for elastic surface registration:
    m = 20  # nearest neighbour parameter
    alph = 0.5  # normilization factor
    #gamm=2 # smoothing parameter1
    #sigm0=10 # smoothing parameter2
    #f=1.0715 # smoothing parameter3
    Tol = 0.0001  # stopping criteria

    # determine N1,N2,T1 and T2:
    N1 = NCB.shape[0]
    N2 = NCT.shape[0]
    T1 = ConnectB.shape[0]
    T2 = ConnectT.shape[0]
    NL = LandmB.shape[0]
    # For parallel programming devide Nr of computations by number of parallel processes (LIM)
    NPP1 = N1 / LIM
    NPP2 = N2 / LIM

    ################################     INITIALIZE & NODES OF CONCERN:    #################################
    ########################################################################################################
    print
    print
    print "Set up 1-ring neighbor list for all points on the generic mesh"
    #neighbList = [[0]]*N1
    #results = pprocess.Map(limit=LIM)
    #calc = results.manage(pprocess.MakeParallel(Get1neigh))
    #for j in range(0,LIM):
    #calc(np.array(range(0,NPP1))+j*NPP1,NCB,ConnectB)
    #for j in range(0,LIM):
    #neighbList[j*NPP1:(1+j)*NPP1] = results[j]
    #neighbList[LIM*NPP1:N1]=Get1neigh(np.array(range(LIM*NPP1,N1)),NCB,ConnectB)
    #np.ma.dump(neighbList,'SkullSurf_neighbList')
    neighbList = np.ma.load('SkullSurf_neighbList')

    print
    print "INITIALIZE SURFACE DEFORMATION"
    CONV = []
    print " 	enquire nodes where required displacement is checked"
    ###remove Landmarks from FDNB and SFPNT:
    #for i in range(0,NL):
    #if find_repeats(np.r_[UseN_B,LandmB[i,]])[0].size>0:
    #r=np.where(UseN_B==LandmB[i,])[0]
    #UseN_B = np.r_[UseN_B[0:r,],UseN_B[r+1:UseN_B.size,]]
    SamplingB = UseN_B.size
    SamplingT = UseN_T.size
    ## Full list of nodes used in Surface registration:
    LMB = np.r_[
        UseN_B]  #,LandmB]	# Last NL entries are reserved for Landmarks that HAVE TO FIT points on the target mesh
    LMT = np.r_[UseN_T]

    # For parallel programming devide Nr of computations by number of parallel processes (LIM)
    SBPP = SamplingB / LIM
    STPP = SamplingT / LIM
    FMorph = 0

    print
    print "COARSE SURFACE REGISTRATION"
    #print "	Compute known displacement for Base_Landmarks "
    #knownC = NCB[LandmB,]
    #knownD = LandmB_NC-knownC
    ####print "	using landmark displacements to deform using RBF"
    ####W_km1 = RBFmorph(NCB,knownC,knownD)
    ####tic = time.clock()
    ####W_km1 = MeshSmooth(W_km1,neighbList,10)
    ####print "		Smoothing done in ",time.clock()-tic," seconds"
    ####np.ma.dump(W_km1,'TempElasNodes_Iter'+str(k-1)+'_Time'+time.ctime())
    #print 'Smooth Gaussian Weight deformation to align Landmarks to target positions'
    #k=0
    #Err = 2
    #W_km1 = np.r_[NCB]
    #while (k<100)|(Err>Tol):
    #k=k+1
    #print 'Iteration : ',k
    #DS = np.zeros((N1,3))
    #knownC = W_km1[LandmB,]
    #knownD = LandmB_NC-knownC
    #knownD[np.isnan(knownD)]=0
    ## Deform mesh using Gaussian smoothing as suggested in paper by R.Bryan et al.
    #sigma_k2 = np.power(np.power(f,-k)*20,2)
    #results = pprocess.Map(limit=LIM)
    #calc = results.manage(pprocess.MakeParallel(GaussianSmooth))
    #for j in range(0,LIM):
    #calc(np.array(range(0,NPP1))+j*NPP1,W_km1,knownC,knownD,sigma_k2,gamm)
    #for j in range(0,LIM):
    #DS[np.array(range(0,NPP1))+j*NPP1,:] = results[j]
    #DS[range(LIM*NPP1,N1),:]=GaussianSmooth(np.array(range(LIM*NPP1,N1)),W_km1,knownC,knownD,sigma_k2,gamm)
    #DS[np.isnan(DS)]=0
    #W_km1 = W_km1+DS
    #Err = np.sum(np.sqrt(np.sum(DS*DS,1)),0)/N1
    #W_km1 = MeshSmooth(W_km1,neighbList,10)
    #np.ma.dump(W_km1,'TempElasNodes_Iter0_TimeWedMar14_2011_20')
    ###np.ma.dump(W_km1,'TempElasNodes_Iter0_Time'+time.ctime())
    W_km1 = NCB

    ################################    MAIN MESH DEFORMATION ALGORITHM:   #################################
    ########################################################################################################
    k = 1
    print
    print "ELASTIC SURFACE REGISTRATION"
    print "determine vertex normals of target surface"
    #Compute target-mesh triangle centroids:
    print "determining centroids of target surface triangles"
    S_2_centr = np.c_[np.sum(
        np.c_[NCT[ConnectT[:, 0], 0], NCT[ConnectT[:, 1], 0],
              NCT[ConnectT[:, 2], 0]], 1) / 3,
                      np.sum(
                          np.c_[NCT[ConnectT[:, 0], 1], NCT[ConnectT[:, 1], 1],
                                NCT[ConnectT[:, 2], 1]], 1) / 3,
                      np.sum(
                          np.c_[NCT[ConnectT[:, 0], 2], NCT[ConnectT[:, 1], 2],
                                NCT[ConnectT[:, 2], 2]], 1) / 3]
    print "determine triangle and vertex normals of target surface"
    TNORMT = np.cross(NCT[ConnectT[:, 1], :] - NCT[ConnectT[:, 0], :],
                      NCT[ConnectT[:, 2], :] - NCT[ConnectT[:, 0], :])
    TNORMT = (TNORMT.T / (np.ones(
        (3, 1)) * np.sqrt(np.sum(np.array([TNORMT * TNORMT]), 2)))).T
    VNORMT = vrtxnormal(NCT, ConnectT, S_2_centr, TNORMT)

    print "determining kd-trees of target surface centroids and nodal coordinates"
    KDT_TC = KDTree(S_2_centr, m)
    KDT_TN = KDTree(NCT, m)

    print 'initialize absolute Gaussian weight for final displacement to preserve element quality'
    GW = np.ones((SamplingB + SamplingT, 1))

    while k <= k_max:
        D1 = np.zeros((SamplingB, 3))
        D2 = np.zeros((SamplingT, 3))
        DS = np.zeros((N1, 3))
        AllowableB = np.r_[AllowableBI]
        print
        print "MESH DEFORMATION ITERATION", k
        print "	determining known displacement of landmarks"
        if NL > 0:
            knownD = LandmB_NC - W_km1[LandmB, ]
        print "	determining centroids of deforming mesh"
        W_km1_centr = np.c_[
            np.sum(
                np.c_[W_km1[ConnectB[:, 0], 0], W_km1[ConnectB[:, 1], 0],
                      W_km1[ConnectB[:, 2], 0]], 1) / 3,
            np.sum(
                np.c_[W_km1[ConnectB[:, 0], 1], W_km1[ConnectB[:, 1], 1],
                      W_km1[ConnectB[:, 2], 1]], 1) / 3,
            np.sum(
                np.c_[W_km1[ConnectB[:, 0], 2], W_km1[ConnectB[:, 1], 2],
                      W_km1[ConnectB[:, 2], 2]], 1) / 3]
        print "	determine triangle and vertex normals of deforming surface"
        TNORMB = np.cross(W_km1[ConnectB[:, 1], :] - W_km1[ConnectB[:, 0], :],
                          W_km1[ConnectB[:, 2], :] - W_km1[ConnectB[:, 0], :])
        TNORMB = (TNORMB.T / (np.ones(
            (3, 1)) * np.sqrt(np.sum(np.array([TNORMB * TNORMB]), 2)))).T
        VNORMB = vrtxnormal(W_km1, ConnectB, W_km1_centr, TNORMB)

        print "	determining kd-tree of current deforming surface centroids and nodal coordinates"
        KDT_KC = KDTree(W_km1_centr, m)
        KDT_KN = KDTree(W_km1, m)
        #if find_repeats(np.r_[USENORMALS,k])[0].size>0:
        #print " ###	Use triangle and vertex normals in setting up point correspondence"
        print "		setting up D1(i,d)"
        tic = time.clock()
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(DsetupNorm))
        for j in range(0, LIM):
            calc(
                np.array(range(0, SBPP)) + j * SBPP, W_km1, VNORMB, NCT,
                TNORMT, VNORMT, ConnectT, S_2_centr, AllowableT, LMB, D1)
        for j in range(0, LIM):
            D1[np.array(range(0, SBPP)) + j * SBPP, :] = results[j]
        D1[range(LIM * SBPP, SamplingB), :] = DsetupNorm(
            range(LIM * SBPP, SamplingB), W_km1, VNORMB, NCT, TNORMT, VNORMT,
            ConnectT, S_2_centr, AllowableT, LMB, D1)
        #D1=np.r_[D1,knownD]
        print "			", time.clock() - tic, " seconds"
        print "		update allowable triangles on generic mesh:"
        remP = D1[:, 0] + D1[:, 1] + D1[:, 2] == 0
        removeP = LMB[remP]
        print "			unregistered points on generic mesh: ", removeP.size
        print "			number of original generic triangles allowed: ", AllowableB.shape[
            0]
        for rp in removeP:
            rowsNo = np.where(AllowableB == rp)[0]
            rowsNo.sort
            for rr in rowsNo[::-1]:
                AllowableB = AllowableB[np.where(
                    range(AllowableB.shape[0]) <> rr)[0], ]
        print "			number of generic triangles allowed for current iteration: ", AllowableB.shape[
            0]
        if find_repeats(np.r_[USENORMALS, k])[0].size > 0:
            print " ###	Use triangle and vertex normals in setting up point correspondence"
            print "		setting up D2(j,c)"
            tic = time.clock()
            results = pprocess.Map(limit=LIM)
            calc = results.manage(pprocess.MakeParallel(DsetupNorm))
            for j in range(0, LIM):
                calc(
                    np.array(range(0, STPP)) + j * STPP, NCT, VNORMT, W_km1,
                    TNORMB, VNORMB, ConnectB, W_km1_centr, AllowableB, LMT, D2)
            for j in range(0, LIM):
                D2[np.array(range(0, STPP)) + j * STPP, :] = results[j]
            D2[range(LIM * STPP, SamplingT), :] = DsetupNorm(
                range(LIM * STPP, SamplingT), NCT, VNORMT, W_km1, TNORMB,
                VNORMB, ConnectB, W_km1_centr, AllowableB, LMT, D2)
            print "			", time.clock() - tic, " seconds"
        else:
            print "	Simple closest point search iteration "
            #print "		setting up D1(i,d)"
            #tic = time.clock()
            #results = pprocess.Map(limit=LIM)
            #calc = results.manage(pprocess.MakeParallel(Dsetup))
            #for j in range(0,LIM):
            #calc(np.array(range(0,SBPP))+j*SBPP,W_km1,NCT,ConnectT,S_2_centr,AllowableT,LMB,D1,KDT_TC,KDT_TN)
            #for j in range(0,LIM):
            #D1[np.array(range(0,SBPP))+j*SBPP,:] = results[j]
            #D1[range(LIM*SBPP,SamplingB),:]=Dsetup(range(LIM*SBPP,SamplingB),W_km1,NCT,ConnectT,S_2_centr,AllowableT,LMB,D1,KDT_TC,KDT_TN)
            ##D1=np.r_[D1,knownD]
            #print "			",time.clock()-tic," seconds"
            #remP = D1[:,0]+D1[:,1]+D1[:,2]==0
            #removeP = LMB[remP]
            #print "			unregistered points on generic mesh: ",removeP.size
            #print "			number of original generic triangles allowed: ",AllowableB.shape[0]
            #for rp in removeP:
            #rowsNo = np.where(AllowableB==rp)[0]
            #rowsNo.sort
            #for rr in rowsNo[::-1]:
            #AllowableB = AllowableB[np.where(range(AllowableB.shape[0])<>rr)[0],]
            #print "			number of generic triangles allowed for current iteration: ",AllowableB.shape[0]
            print "		setting up D2(j,c)"
            tic = time.clock()
            results = pprocess.Map(limit=LIM)
            calc = results.manage(pprocess.MakeParallel(Dsetup))
            for j in range(0, LIM):
                calc(
                    np.array(range(0, STPP)) + j * STPP, NCT, W_km1, ConnectB,
                    W_km1_centr, AllowableB, LMT, D2, KDT_KC, KDT_KN)
            for j in range(0, LIM):
                D2[np.array(range(0, STPP)) + j * STPP, :] = results[j]
            D2[range(LIM * STPP, SamplingT), :] = Dsetup(
                range(LIM * STPP, SamplingT), NCT, W_km1, ConnectB,
                W_km1_centr, AllowableB, LMT, D2, KDT_KC, KDT_KN)
            print "			", time.clock() - tic, " seconds"

        # Compute displacement update for each node using suggested Gaussian radial basis function:
        print "	determining smoothed displacement field"

        tic = time.clock()
        NCp = np.r_[W_km1[LMB, :], NCT[LMT, :] + D2]
        DD = np.r_[D1, -D2]
        # Mask Nan and Inf values if any:
        DD[np.isnan(DD)] = 0
        DD[np.isinf(DD)] = 0
        #keepP = DD[:,0]+DD[:,1]+DD[:,2]<>0
        #print keepP
        #NCp,DD = NCp[keepP,:],DD[keepP,:]
        #KDTp = KDTree(NCp,5)
        # Deform mesh using Gaussian smoothing as suggested in paper by R.Bryan et al.
        sigma_k2 = np.power(np.power(f, -k) * sigm0, 2)
        results = pprocess.Map(limit=LIM)
        calc = results.manage(pprocess.MakeParallel(GaussianSmooth))
        for j in range(0, LIM):
            calc(
                np.array(range(0, NPP1)) + j * NPP1, W_km1, NCp, DD, sigma_k2,
                gamm)
        for j in range(0, LIM):
            DS[np.array(range(0, NPP1)) + j * NPP1, :] = results[j]
        DS[range(LIM * NPP1, N1), :] = GaussianSmooth(
            np.array(range(LIM * NPP1, N1)), W_km1, NCp, DD, sigma_k2, gamm)
        print "			", time.clock() - tic, " seconds"

        # Mask Nan and Inf if any:
        DS[np.isnan(DS)] = 0
        DS[np.isinf(DS)] = 0

        #print 'Check if current iteration reduces element quality to below allowable and stiffen mesh accordingly'
        print
        print
        print 'Convergence History'
        print CONV
        print
        print

        # Determine Jacobian of all elements and if unsattisfied apply stiffening (Decrease GW <1) untill this doesn't happen

        # determine wheter convergence is acheived
        #TotalMorph = np.sum(np.sqrt(np.sum(DS*DS,1)),0)/NCB.shape[0]
        TotalMorph = np.sum(np.sqrt(np.sum(DD * DD, 1))) / (DD.size / 3)
        CONV = CONV + [TotalMorph]
        FMorph = (k == 1) * TotalMorph + FMorph
        print "	average nodal displacement for current deformation iteration:"
        print TotalMorph
        if (TotalMorph < Tol):
            print
            print "CONVERGED SOLUTION OBTAINED"
            #CONV = CONV + [TotalMorph]
            k = k_max * 10 + 1
            W_km1 = W_km1 + DS
        elif (k < 10) | (TotalMorph < 10 * FMorph):
            print "problem not yet converged at iteration", k
            #CONV = CONV + [TotalMorph]
            k = k + 1
            # Deform mesh:
            print "	deforming mesh (update of W_{k-1})"
            W_km1 = W_km1 + DS
            #np.ma.dump(W_km1,'Femur2NC_'+str(k))
        else:
            print "PROBLEM DIVERGING"
            k = k_max * 10 - 1

        #np.ma.dump(W_km1,'TempElasNodes_Iter'+str(k-1)+'_Time'+time.ctime())
        if (k > 2) & (np.mod(k - 1, 5) == 0):
            print
            #np.ma.dump(W_km1,'TempElasNodes_Iter'+str(k-1)+'_Time'+time.ctime())
            #W_km1 = RBFmorph(W_km1,W_km1[LandmB,],LandmB_NC-W_km1[LandmB,])
            tic = time.clock()
            W_km1 = MeshSmooth(W_km1, neighbList, 10)
            np.ma.dump(
                W_km1, 'SkullUnique2_gamm' + str(gamm) + '_sigN' + str(sigm0) +
                '_iter' + str(k - 1))
            print "		Smoothing done in ", time.clock() - tic, " seconds"
        #print "COARSE SURFACE REGISTRATION"
        #print "	using landmark displacements to deform using RBF"
        #W_km1 = RBFmorph(W_km1,W_km1[LandmB,],LandmB_NC-W_km1[LandmB,])
    print

    if k == k_max + 1:
        print
        print "SOLUTION TERMINATED: maximum iterations,(", k_max, ") reached"
    print
    print "TOTAL TIME FOR ELASTIC SURFACE REGISTRATION : ", time.clock(
    ) - t_start, "seconds"
    CONV = np.array(CONV)
    return W_km1, CONV
Exemple #24
0
def lineRST(NCB,
            RlinesB,
            VlinesB,
            NCT,
            RlinesT,
            VlinesT,
            UseFeat=0,
            UseScale=0,
            Use1Scale=1):
    # Use lines of curvature on two models to determine rigid body transformation for best fit
    # Takes as input the nodal coordinates of the two surface meshes as well as the ridge and valley lines of the two.
    # Target mesh is then rotated, scaled and translated to best fit the corresponding lines of curvature on the Base mesh
    #global RsegmB,VsegmB,RsegmT,VsegmT
    #global RnodesB,VnodesB,RnodesT,VnodesT
    if UseFeat == 1:
        [RsegmB, VsegmB, RsegmT, VsegmT] = [
            np.array([[], [], []]).reshape((0, 3)),
            np.array([[], [], []]).reshape((0, 3)),
            np.array([[], [], []]).reshape((0, 3)),
            np.array([[], [], []]).reshape((0, 3))
        ]
        [RnodesB, VnodesB, RnodesT, VnodesT] = [
            np.array([[], []]).reshape((0, 2)),
            np.array([[], []]).reshape((0, 2)),
            np.array([[], []]).reshape((0, 2)),
            np.array([[], []]).reshape((0, 2))
        ]
        # Transform Lines into linesegments with i'th segment allocated as [NC_point1,NC_point2,Line_nr]
        #	and list of nodes allocated as [Nd_nr, Line_nr]
        for i in range(1, RlinesB[0] + 1):
            Lsize = RlinesB[i].size
            RnodesB = np.array(
                np.r_[RnodesB, np.c_[RlinesB[i],
                                     np.ones((Lsize, 1)) * i]], int)
            RsegmB = np.array(
                np.r_[RsegmB, np.c_[RlinesB[i][0:Lsize - 1],
                                    RlinesB[i][1:Lsize],
                                    np.ones((Lsize - 1, 1)) * i]], int)
        for i in range(1, VlinesB[0] + 1):
            Lsize = VlinesB[i].size
            VnodesB = np.array(
                np.r_[VnodesB, np.c_[VlinesB[i],
                                     np.ones((Lsize, 1)) * i]], int)
            VsegmB = np.array(
                np.r_[VsegmB, np.c_[VlinesB[i][0:Lsize - 1],
                                    VlinesB[i][1:Lsize],
                                    np.ones((Lsize - 1, 1)) * i]], int)
        for i in range(1, RlinesT[0] + 1):
            Lsize = RlinesT[i].size
            RnodesT = np.array(
                np.r_[RnodesT, np.c_[RlinesT[i],
                                     np.ones((Lsize, 1)) * i]], int)
            RsegmT = np.array(
                np.r_[RsegmT, np.c_[RlinesT[i][0:Lsize - 1],
                                    RlinesT[i][1:Lsize],
                                    np.ones((Lsize - 1, 1)) * i]], int)
        for i in range(1, VlinesT[0] + 1):
            Lsize = VlinesT[i].size
            VnodesT = np.array(
                np.r_[VnodesT, np.c_[VlinesT[i],
                                     np.ones((Lsize, 1)) * i]], int)
            VsegmT = np.array(
                np.r_[VsegmT, np.c_[VlinesT[i][0:Lsize - 1],
                                    VlinesT[i][1:Lsize],
                                    np.ones((Lsize - 1, 1)) * i]], int)
        # find average nodal coordinate of base linesegments
        RsegBNC, VsegBNC = (NCB[RsegmB[:, 0], ] + NCB[RsegmB[:, 1], ]) / 2, (
            NCB[VsegmB[:, 0], ] + NCB[VsegmB[:, 1], ]) / 2
        # set up k-d tree of nodes and segments in Base model lines
        kdt_RBS, kdt_VBS = KDTree(RsegBNC, 5), KDTree(VsegBNC, 5)
    else:
        N1, N2 = NCB.shape[0], NCT.shape[0]
        NPP1 = N1 / LIM
        NPP2 = N2 / LIM
        kdt_Base = KDTree(NCB, 20)
    diff = 2
    k = 0
    Conv = np.zeros((100, ))
    while (diff > 0.000001) & (k < 100):
        k = k + 1
        print '		ITERATION ', k
        if UseFeat == 1:
            RsegTNC, VsegTNC = (NCT[RsegmT[:, 0], ] + NCT[RsegmT[:, 1], ]
                                ) / 2, (NCT[VsegmT[:, 0], ] +
                                        NCT[VsegmT[:, 1], ]) / 2
            kdt_RTS, kdt_VTS = KDTree(RsegTNC, 5), KDTree(VsegTNC, 5)
            R12R = LineICP(RnodesB, NCB, RnodesT, NCT, kdt_RTS,
                           RsegmT)  #find registered mesh 1 to 2 Ridges
            R12V = LineICP(VnodesB, NCB, VnodesT, NCT, kdt_VTS,
                           VsegmT)  #find registered mesh 1 to 2 Valleys
            R21R = LineICP(RnodesT, NCT, RnodesB, NCB, kdt_RBS,
                           RsegmB)  #find registered mesh 2 to 1 Ridges
            R21V = LineICP(VnodesT, NCT, VnodesB, NCB, kdt_VBS,
                           VsegmB)  #find registered mesh 2 to 1 Valleys

            # Determine translation required for best fit. Known that minnimum is at this position [D. Du et al]
            B12R, T12R = NCB[np.array(R12R[:, 0], int), ], R12R[:, 1:4]
            B12V, T12V = NCB[np.array(R12V[:, 0], int), ], R12V[:, 1:4]
            B21R, T21R = R21R[:, 1:4], NCT[np.array(R21R[:, 0], int), ]
            B21V, T21V = R21V[:, 1:4], NCT[np.array(R21V[:, 0], int), ]
            Base_i = np.r_[B12R, B12V, B21R, B21V]
            Target_i = np.r_[T12R, T12V, T21R, T21V]
        else:
            kdt_Targ = KDTree(NCT, 20)
            print 'k-d trees and closest point search'
            B2T, T2B = np.zeros((NCB.shape[0], 1)), np.zeros((NCT.shape[0], 1))
            print '	base'
            results = pprocess.Map(limit=LIM)
            calc = results.manage(pprocess.MakeParallel(FullICP))
            for j in range(0, LIM):
                calc(np.array(range(0, NPP1)) + j * NPP1, NCB, kdt_Targ, B2T)
            for j in range(0, LIM):
                B2T[np.array(range(0, NPP1)) + j * NPP1, :] = results[j]
            B2T[range(LIM * NPP1, N1), :] = FullICP(range(LIM * NPP1, N1), NCB,
                                                    kdt_Targ, B2T)
            #for i in range(0,NCB.shape[0]):
            #B2T = B2T + [kdt_Targ.query(NCB[i,])[1]]
            print '	target'
            results = pprocess.Map(limit=LIM)
            calc = results.manage(pprocess.MakeParallel(FullICP))
            for j in range(0, LIM):
                calc(np.array(range(0, NPP2)) + j * NPP2, NCT, kdt_Base, T2B)
            for j in range(0, LIM):
                T2B[np.array(range(0, NPP2)) + j * NPP2, :] = results[j]
            T2B[range(LIM * NPP2, N2), :] = FullICP(range(LIM * NPP2, N2), NCT,
                                                    kdt_Base, T2B)
            #for i in range(0,NCT.shape[0]):
            #T2B = T2B + [kdt_Base.query(NCT[i,])[1]]
            B2T, T2B = np.array(B2T, int).reshape(
                (NCB.shape[0], )), np.array(T2B, int).reshape((NCT.shape[0], ))
            Base_i = np.r_[NCB, NCB[T2B, ]]
            Target_i = np.r_[NCT[B2T, ], NCT]
        print 'translate'
        Distance = Base_i - Target_i
        Translate = np.sum(Distance, 0) / Distance.shape[0]
        # Apply translation to Target nodal coordinates and Base to Target registered pairs
        NCTT = np.c_[NCT[:, 0] + Translate[0], NCT[:, 1] + Translate[1],
                     NCT[:, 2] + Translate[2]]
        Target_i = np.c_[Target_i[:, 0] + Translate[0],
                         Target_i[:, 1] + Translate[1],
                         Target_i[:, 2] + Translate[2]]
        print '	translation = ', Translate
        # Determine Current Quadratic approximation:
        E1, E2, E3 = np.matrix([[0, -1, 0], [1, 0, 0],
                                [0, 0,
                                 0]]), np.matrix([[0, 0, 0], [0, 0, -1],
                                                  [0, 1,
                                                   0]]), np.matrix([[0, 0, 1],
                                                                    [0, 0, 0],
                                                                    [-1, 0,
                                                                     0]])
        D1, D2, D3 = np.matrix([[1, 0, 0], [0, 0, 0],
                                [0, 0,
                                 0]]), np.matrix([[0, 0, 0], [0, 1, 0],
                                                  [0, 0,
                                                   0]]), np.matrix([[0, 0, 0],
                                                                    [0, 0, 0],
                                                                    [0, 0, 1]])
        IMat = np.matrix(np.eye(3))
        #Ukm1,Skm1,Rkm1 = np.linalg.svd(AO)
        Ukm1, Skm1, Rkm1 = IMat, IMat, IMat  #*1.3
        #k2,diff2=0,2
        #while (diff2>0.1)&(k2<100):
        #k2=k2+1
        #Bi1,Bi2,Bi3 = np.array(Ukm1*E1*Skm1*Rkm1*Target_i.T).T,np.array(Ukm1*E2*Skm1*Rkm1*Target_i.T).T,np.array(Ukm1*E3*Skm1*Rkm1*Target_i.T).T
        #Bi4,Bi5,Bi6 = np.array(Ukm1*Skm1*D1*Rkm1*Target_i.T).T,np.array(Ukm1*Skm1*D2*Rkm1*Target_i.T).T,np.array(Ukm1*Skm1*D3*Rkm1*Target_i.T).T
        #Bi7,Bi8,Bi9 = np.array(Ukm1*Skm1*Rkm1*E1*Target_i.T).T,np.array(Ukm1*Skm1*Rkm1*E2*Target_i.T).T,np.array(Ukm1*Skm1*Rkm1*E3*Target_i.T).T

        #Distance = np.array(Ukm1*Skm1*Rkm1*Target_i.T-Base_i.T).T
        ## Set up Hessian matrix:
        #Hes = np.matrix([[np.sum(Bi1*Bi1),np.sum(Bi1*Bi2),np.sum(Bi1*Bi3),np.sum(Bi1*Bi4),np.sum(Bi1*Bi5),np.sum(Bi1*Bi6),np.sum(Bi1*Bi7),np.sum(Bi1*Bi8),np.sum(Bi1*Bi9)],
        #[np.sum(Bi2*Bi1),np.sum(Bi2*Bi2),np.sum(Bi2*Bi3),np.sum(Bi2*Bi4),np.sum(Bi2*Bi5),np.sum(Bi2*Bi6),np.sum(Bi2*Bi7),np.sum(Bi2*Bi8),np.sum(Bi2*Bi9)],
        #[np.sum(Bi3*Bi1),np.sum(Bi3*Bi2),np.sum(Bi3*Bi3),np.sum(Bi3*Bi4),np.sum(Bi3*Bi5),np.sum(Bi3*Bi6),np.sum(Bi3*Bi7),np.sum(Bi3*Bi8),np.sum(Bi3*Bi9)],
        #[np.sum(Bi4*Bi1),np.sum(Bi4*Bi2),np.sum(Bi4*Bi3),np.sum(Bi4*Bi4),np.sum(Bi4*Bi5),np.sum(Bi4*Bi6),np.sum(Bi4*Bi7),np.sum(Bi4*Bi8),np.sum(Bi4*Bi9)],
        #[np.sum(Bi5*Bi1),np.sum(Bi5*Bi2),np.sum(Bi5*Bi3),np.sum(Bi5*Bi4),np.sum(Bi5*Bi5),np.sum(Bi5*Bi6),np.sum(Bi5*Bi7),np.sum(Bi5*Bi8),np.sum(Bi5*Bi9)],
        #[np.sum(Bi6*Bi1),np.sum(Bi6*Bi2),np.sum(Bi6*Bi3),np.sum(Bi6*Bi4),np.sum(Bi6*Bi5),np.sum(Bi6*Bi6),np.sum(Bi6*Bi7),np.sum(Bi6*Bi8),np.sum(Bi6*Bi9)],
        #[np.sum(Bi7*Bi1),np.sum(Bi7*Bi2),np.sum(Bi7*Bi3),np.sum(Bi7*Bi4),np.sum(Bi7*Bi5),np.sum(Bi7*Bi6),np.sum(Bi7*Bi7),np.sum(Bi7*Bi8),np.sum(Bi7*Bi9)],
        #[np.sum(Bi8*Bi1),np.sum(Bi8*Bi2),np.sum(Bi8*Bi3),np.sum(Bi8*Bi4),np.sum(Bi8*Bi5),np.sum(Bi8*Bi6),np.sum(Bi8*Bi7),np.sum(Bi8*Bi8),np.sum(Bi8*Bi9)],
        #[np.sum(Bi9*Bi1),np.sum(Bi9*Bi2),np.sum(Bi9*Bi3),np.sum(Bi9*Bi4),np.sum(Bi9*Bi5),np.sum(Bi9*Bi6),np.sum(Bi9*Bi7),np.sum(Bi9*Bi8),np.sum(Bi9*Bi9)]])

        ## set up fj
        #Fj = np.matrix([[np.sum(Bi1*Distance),np.sum(Bi2*Distance),np.sum(Bi3*Distance),np.sum(Bi4*Distance),np.sum(Bi5*Distance),
        #np.sum(Bi6*Distance),np.sum(Bi7*Distance),np.sum(Bi8*Distance),np.sum(Bi9*Distance)]]).T
        print 'find rotation and scale'
        Xf = fmin_powell(costF,
                         np.array([0, 0, 0, 1, 1, 1, 0, 0, 0]),
                         args=(Base_i, Target_i, Ukm1, Skm1, Rkm1, UseScale,
                               Use1Scale))
        Xf = np.array(Xf).reshape((9, ))
        print 'u1..3,s1..3,r1..3 = ', Xf
        FvP = np.array(Ukm1 * Skm1 * Rkm1 * Target_i.T - Base_i.T).T
        FvP = np.sum(FvP * FvP)
        Conv[k] = FvP
        Ukm1 = Ukm1 + Ukm1 * np.matrix(E1 * Xf[0] + E2 * Xf[1] + E3 * Xf[2])
        if UseScale == 1:
            Skm1 = Skm1 + Skm1 * np.matrix(D1 * Xf[3] + D2 * Xf[4] +
                                           D3 * Xf[5])
        if Use1Scale == 1:
            Skm1 = Skm1 + Skm1 * np.matrix(D1 * Xf[3] + D2 * Xf[3] +
                                           D3 * Xf[3])
        print
        print Skm1
        print
        Rkm1 = Rkm1 + Rkm1 * np.matrix(E1 * Xf[6] + E2 * Xf[7] + E3 * Xf[8])
        NCTT = np.array(Ukm1 * Skm1 * Rkm1 * NCTT.T).T
        diff = np.sum((NCT - NCTT) * (NCT - NCTT)) / NCT.shape[0]
        print '	Average difference between current and previous nodal coordinates:  ', diff
        np.ma.dump(NCTT, 'Femur1NC_' + str(k))
        NCT = NCTT
    return NCT, Conv
    def __call__(self, datasets):
        """Estimate mappers for each dataset using searchlight-based
        hyperalignment.

        Parameters
        ----------
          datasets : list or tuple of datasets

        Returns
        -------
        A list of trained StaticProjectionMappers of the same length as datasets
        """

        # Perform some checks first before modifying internal state
        params = self.params
        ndatasets = len(datasets)

        if len(datasets) <= 1:
            raise ValueError("SearchlightHyperalignment needs > 1 dataset to "
                             "operate on. Got: %d" % self.ndatasets)

        if params.ref_ds in params.exclude_from_model:
            raise ValueError("Requested reference dataset %i is also "
                             "in the exclude list." % params.ref_ds)

        if params.ref_ds >= ndatasets:
            raise ValueError("Requested reference dataset %i is out of "
                             "bounds. We have only %i datasets provided" %
                             (params.ref_ds, self.ndatasets))

        # The rest of the checks are just warnings
        self.ndatasets = ndatasets

        _shpaldebug("SearchlightHyperalignment %s for %i datasets" %
                    (self, self.ndatasets))

        selected = [
            _ for _ in range(ndatasets) if _ not in params.exclude_from_model
        ]
        ref_ds_train = selected.index(params.ref_ds)
        params.hyperalignment.params.ref_ds = ref_ds_train
        warning('Using %dth dataset as the reference dataset (%dth after '
                'excluding datasets)' % (params.ref_ds, ref_ds_train))
        if len(params.exclude_from_model) > 0:
            warning("These datasets will not participate in building common "
                    "model: %s" % params.exclude_from_model)

        if __debug__:
            # verify that datasets were zscored prior the alignment since it is
            # assumed/required preprocessing step
            for ids, ds in enumerate(datasets):
                for f, fname, tval in ((np.mean, 'means', 0), (np.std, 'stds',
                                                               1)):
                    vals = f(ds, axis=0)
                    vals_comp = np.abs(vals - tval) > 1e-5
                    if np.any(vals_comp):
                        warning(
                            '%d %s are too different (max diff=%g) from %d in '
                            'dataset %d to come from a zscored dataset. '
                            'Please zscore datasets first for correct operation '
                            '(unless if was intentional)' %
                            (np.sum(vals_comp), fname, np.max(
                                np.abs(vals)), tval, ids))

        # Setting up SearchlightHyperalignment
        # we need to know which original features where comprising the
        # individual SL ROIs
        _shpaldebug('Initializing FeatureSelectionHyperalignment.')
        hmeasure = FeatureSelectionHyperalignment(
            ref_ds=params.ref_ds,
            featsel=params.featsel,
            hyperalignment=params.hyperalignment,
            full_matrix=params.combine_neighbormappers,
            use_same_features=params.use_same_features,
            exclude_from_model=params.exclude_from_model,
            dtype=params.dtype)

        # Performing SL processing manually
        _shpaldebug("Setting up for searchlights")
        if params.nproc is None and externals.exists('pprocess'):
            import pprocess
            try:
                params.nproc = pprocess.get_number_of_cores() or 1
            except AttributeError:
                warning("pprocess version %s has no API to figure out maximal "
                        "number of cores. Using 1" %
                        externals.versions['pprocess'])
                params.nproc = 1

        # XXX I think this class should already accept a single dataset only.
        # It should have a ``space`` setting that names a sample attribute that
        # can be used to identify individual/original datasets.
        # Taking a single dataset as argument would be cleaner, because the
        # algorithm relies on the assumption that there is a coarse feature
        # alignment, i.e. the SL ROIs cover roughly the same area
        queryengines = self._get_trained_queryengines(datasets,
                                                      params.queryengine,
                                                      params.radius,
                                                      params.ref_ds)
        # For surface nodes to voxels queryengines, roi_seed hardly makes sense
        qe = queryengines[(0 if len(queryengines) == 1 else params.ref_ds)]
        if isinstance(qe, SurfaceVerticesQueryEngine):
            self.force_roi_seed = False
            if not self.params.combine_neighbormappers:
                raise NotImplementedError(
                    "Mapping from voxels to surface nodes is not "
                    "implmented yet. Try setting combine_neighbormappers to True."
                )
        self.nfeatures = datasets[params.ref_ds].nfeatures
        _shpaldebug("Performing Hyperalignment in searchlights")
        # Setting up centers for running SL Hyperalignment
        if params.sparse_radius is None:
            roi_ids = self._get_verified_ids(queryengines) \
                if params.mask_node_ids is None \
                else params.mask_node_ids
        else:
            if params.queryengine is not None:
                raise NotImplementedError(
                    "using sparse_radius whenever custom queryengine is "
                    "provided is not yet supported.")
            _shpaldebug("Setting up sparse neighborhood")
            from mvpa2.misc.neighborhood import scatter_neighborhoods
            if params.mask_node_ids is None:
                scoords, sidx = scatter_neighborhoods(
                    Sphere(params.sparse_radius),
                    datasets[params.ref_ds].fa.voxel_indices,
                    deterministic=True)
                roi_ids = sidx
            else:
                scoords, sidx = scatter_neighborhoods(
                    Sphere(params.sparse_radius),
                    datasets[params.ref_ds].fa.voxel_indices[
                        params.mask_node_ids],
                    deterministic=True)
                roi_ids = [params.mask_node_ids[sid] for sid in sidx]

        # Initialize projections
        _shpaldebug('Initializing projection matrices')
        self.projections = [
            csc_matrix((self.nfeatures, self.nfeatures), dtype=params.dtype)
            for isub in range(self.ndatasets)
        ]

        # compute
        if params.nproc is not None and params.nproc > 1:
            # split all target ROIs centers into `nproc` equally sized blocks
            nproc_needed = min(len(roi_ids), params.nproc)
            params.nblocks = nproc_needed \
                if params.nblocks is None else params.nblocks
            params.nblocks = min(len(roi_ids), params.nblocks)
            node_blocks = np.array_split(roi_ids, params.nblocks)
            # the next block sets up the infrastructure for parallel computing
            # this can easily be changed into a ParallelPython loop, if we
            # decide to have a PP job server in PyMVPA
            import pprocess
            p_results = pprocess.Map(limit=nproc_needed)
            if __debug__:
                debug(
                    'SLC', "Starting off %s child processes for nblocks=%i" %
                    (nproc_needed, params.nblocks))
            compute = p_results.manage(pprocess.MakeParallel(self._proc_block))
            seed = mvpa2.get_random_seed()
            for iblock, block in enumerate(node_blocks):
                # should we maybe deepcopy the measure to have a unique and
                # independent one per process?
                compute(block,
                        datasets,
                        copy.copy(hmeasure),
                        queryengines,
                        seed=seed,
                        iblock=iblock)
        else:
            # otherwise collect the results in an 1-item list
            _shpaldebug('Using 1 process to compute mappers.')
            if params.nblocks is None:
                params.nblocks = 1
            params.nblocks = min(len(roi_ids), params.nblocks)
            node_blocks = np.array_split(roi_ids, params.nblocks)
            p_results = [
                self._proc_block(block, datasets, hmeasure, queryengines)
                for block in node_blocks
            ]
        results_ds = self.__handle_all_results(p_results)
        # Dummy iterator for, you know, iteration
        list(results_ds)

        _shpaldebug(
            'Wrapping projection matrices into StaticProjectionMappers')
        self.projections = [
            StaticProjectionMapper(proj=proj, recon=proj.T)
            if params.compute_recon else StaticProjectionMapper(proj=proj)
            for proj in self.projections
        ]
        return self.projections
def compute_misfit_write_SEM(iteration, compute_scalar_only=False):
    import input_parameters, os, pprocess, sys
    import numpy as np
    print "Computing adjoint source"
    print "Assuming the default SPECFEM3D format for data: 2 columns of ascii text, < time value, recorded waveform >"

    def process_adj_file(line):  #syn_file,data_file,adj_file):
        syn_file = line.split()[0]
        data_file = line.split()[1]
        adj_file = line.split()[2]
        syn_trace = np.loadtxt(syn_file)
        data_trace = np.loadtxt(data_file)

        if len(syn_trace) > len(data_trace):
            print "ERROR: synthetic traces have more time samples thatn data traces!"
            sys.exit(1)
        elif len(syn_trace) < len(data_trace):
            # Assuming that synthetics and data are sampled at equivalent intervals
            # Synthetics can be shorter time series than data.  In this case we just read the first <length of synthetic trace> samples of data
            data_trace = data_trace[:len(syn_trace), :]

        pressure_difference = syn_trace[:, 1] - data_trace[:, 1]
        adj_source = np.zeros(len(syn_trace))
        # for acoustic FWI, L2 adjoint source is the second derivative of pressure difference
        # See Peter et al. 2011, GJI, eq. (A8)
        for i in range(1, len(adj_source) - 1):
            # do a simple central finite difference
            adj_source[i] = pressure_difference[
                i + 1] - 2 * pressure_difference[i] + pressure_difference[i -
                                                                          1]
        dt = syn_trace[1, 0] - syn_trace[0, 0]
        adj_source *= 1.0 / (dt**2)
        return_data_misfit = 0.5 * sum(adj_source**2) * dt
        if compute_scalar_only == False:
            ftemp = open(adj_file, 'w')
            for i in range(len(adj_source)):
                ftemp.write('%e %e\n' % (syn_trace[i, 0], adj_source[i]))
            ftemp.close()
            if iteration == 0:  # schiemenz, gotta be a better way to do this
                os.system('ln -s ' + adj_file + ' ' + adj_file[:-5] + 'Y.adj')
                os.system('ln -s ' + adj_file + ' ' + adj_file[:-5] + 'Z.adj')
        return return_data_misfit

    results_misfit = pprocess.Map(limit=input_parameters.total_processors,
                                  reuse=1)
    parfun = results_misfit.manage(pprocess.MakeReusable(process_adj_file))
    f = open('SEM_file_list', 'r')
    SEM_file_list = f.readlines()
    f.close()
    for line in SEM_file_list:
        parfun(line)
    results_misfit.finish()

    total_misfit = 0.0
    for i in range(len(SEM_file_list)):
        total_misfit += results_misfit[i]

    fmisfit = open('L2_misfit', 'a')
    if iteration == 0:
        fmisfit.write("Iteration, L2-misfit\n")
    fmisfit.write('%i, %e\n' % (iteration, total_misfit))
    fmisfit.close()
Exemple #27
0
#NCS[outer,] = NCDef[outer,]
#np.ma.dump(NCS,fname[0:6]+'NC_Ainit')
#NCS = ptet.LaplacMesh(inner,NCS,neighbListTet,100,1)
#np.ma.dump(NCS,fname[0:6]+'NC_AinitLapl')

NCSprev = np.r_[NCS]
DispOuter = (NCouter - NCTnc[outer, ]) / Steps
for inc in range(Steps):
    #NCS[outer,] = NCS[outer,]+DispOuter	#update boundary displacement
    # Deform mesh using Gaussian smoothing as suggested in paper by R.Bryan et al.
    NCp = NCS[outer, ]
    DD = DispOuter
    DS = np.zeros(NCS.shape)
    print 'Do Gaussian Smooth on internal nodes'
    sigma_k2 = np.power(np.power(1.0715, -(inc + 1)) * 10, 2)
    results = pprocess.Map(limit=LIM)
    calc = results.manage(pprocess.MakeParallel(ptet.GaussianSmooth))
    for j in range(0, LIM):
        calc(
            np.array(range(0, NPP1)) + j * NPP1, np.r_[NCS], NCp, DD, sigma_k2,
            2)
    for j in range(0, LIM):
        DS[np.array(range(0, NPP1)) + j * NPP1, :] = results[j]
    DS[range(LIM * NPP1, N1), :] = ptet.GaussianSmooth(
        np.array(range(LIM * NPP1, N1)), np.r_[NCS], NCp, DD, sigma_k2, 2)
    NCS[outer, ] = NCS[outer, ] + DispOuter
    NCS[inner, ] = NCS[inner, ] + DS[inner, ]

    EQ, delt, Sn2, Sig = qu.elemQual_mu(np.array(range(TetT.shape[0])), NCS,
                                        TetT)
    print '					Average Element Quality: 	', np.average(EQ)
            #os.chdir(dirname)
            cmd = root_dir + '/../bin/xcombine_vol_data 0 ' + str(
                procnum -
                1) + ' ' + filetype + ' ' + model_dir + ' ' + model_dir + ' 1'
            print cmd
            os.system(cmd)
            #os.chdir(root_dir)


filenames = ['vp']

# link true model for comparison
os.system('ln -s ../../models/true_model .')

model_dirs = ['true_model/']
for i in range(20):
    model_dirs.append('m' + str(i) + '/')
results = pprocess.Map(limit=len(model_dirs), reuse=1)
parallel_function = results.manage(pprocess.MakeReusable(model_loop))
for model in model_dirs:
    parallel_function(model)
results.finish()

print "Moving vtk files"
vtus = []
for d in model_dirs:
    vtus += glob(d + '*.vtk')
for f in vtus:
    fnew = f[:f.find('/')] + '.vtk'
    os.system('mv ' + f + ' ' + fnew)
Exemple #29
0
def cross_corr(max_ts=5.):
    """
    create a 'cc.txt' file for the waveforms in the first path and 
    the second path by measuring the cross correlation coefficient and
    the time shift.
    'cc.txt' is located in the same folder as compareDMT.py
    """

    global input

    identity_all = input['net'] + '.' + input['sta'] + '.' + \
                    input['loc'] + '.' + input['cha']
    ls_first = glob.glob(os.path.join(input['first_path'], identity_all))
    ls_second = glob.glob(os.path.join(input['second_path'], identity_all))

    if os.path.isfile('./cc.txt'):
        print '----------------------------------------------------'

        usr_input = raw_input(\
                    '"cc.txt" exists in the directory, do you want to:\n\n' + \
                    'A. append to the existing "cc.txt"\n' + \
                    'N. generate a new one\n\n' + \
                    'please enter A or N based on your ' + \
                    'decision:\n').upper()

        if usr_input == 'A':
            print '###################################'
            print 'Continue with appending to "cc.txt"'
            print '###################################'

        elif usr_input == 'N':
            os.remove('./cc.txt')
            print '"cc.txt" is removed'
        print '----------------------------------------------------'

    # open the cc.txt file that exists in the directory OR create a new one
    cc_open = open('./cc.txt', 'a')
    cc_open.writelines(str(len(ls_first)) + ',\n')
    cc_open.close()

    if input['cc_parallel'] == 'Y':
        # Parallel Cross Correlation
        import pprocess

        print "###################"
        print "Parallel Request"
        print "Number of Nodes: " + str(input['cc_np'])
        print "###################"

        # using pprocess.Map to define the parallel job
        parallel_results = pprocess.Map(limit=input['cc_np'], reuse=1)
        parallel_job = parallel_results.manage(pprocess.MakeReusable(cc_core))

        for i in range(0, len(ls_first)):
            parallel_job(ls_first = ls_first[i], ls_second = ls_second, \
                            identity_all = identity_all, max_ts = max_ts,
                            print_sta = str(i+1) + '/' + str(len(ls_first)))

        parallel_results.finish()

    else:
        for i in range(0, len(ls_first)):
            #for i in range(0, 20):
            cc_core(ls_first = ls_first[i], ls_second = ls_second, \
                            identity_all = identity_all, max_ts = max_ts,
                            print_sta = str(i+1) + '/' + str(len(ls_first)))
Exemple #30
0
        'http://www.python.org/getit/', 'http://www.python.org/community/',
        'https://wiki.python.org/moin/', 'http://planet.python.org/',
        'https://wiki.python.org/moin/LocalUserGroups',
        'http://www.python.org/psf/', 'http://docs.python.org/devguide/',
        'http://www.python.org/community/awards/'
        # etc..
    ]

    # Serial computation
    start = time.time()
    serial_results = [takeuptime(args) for args in list_of_args]
    print "%f s for traditional, serial computation." % (time.time() - start)

    # Parallel computation
    nproc = 4  # maximum number of simultaneous processes desired
    results = pprocess.Map(limit=nproc, reuse=1)
    parallel_function = results.manage(pprocess.MakeReusable(takeuptime))
    start = time.time()
    # Start computing things
    for args in list_of_args:
        parallel_function(args)
    parallel_results = results[:]
    print "%f s for parallel computation." % (time.time() - start)

    # Multithreading computation
    nthead = 4  # number of threads
    threads = [
        threading.Thread(target=takeuptime, args=(list_of_args[i], ))
        for i in range(nthead)
    ]
    start = time.time()