Esempio n. 1
0
def gen_movie(paths,
              obs_map,
              init_pos,
              goals,
              file_base,
              conn_8=True,
              increments=1):
    """Parallelized animation code

    saves image files for each frame in the animation

    paths   - joint path of the system
              [ [[x1(0), y1(0)]...], [[x1(1), y1(1)] ]
    obs_map - description of environment.  Matrix with 0 indicating free
              cell, 1 indicating an obstacle
    goal    - [[x1, y1], [x2, y2], ...] joint initial configuration
    goal    - [[x1, y1], [x2, y2], ...] joint goal configuration
    file_base - base name for image files
    conn_8  - whether to use an 8-connected depiction
    increments - How many intermediate frames to generate between step
                 and step + 1
    """
    parmap.parmap(
        lambda x: animate_result_step(x,
                                      paths,
                                      obs_map,
                                      init_pos,
                                      goals,
                                      file_base=file_base,
                                      conn_8=conn_8,
                                      increments=increments),
        xrange(len(paths)))
Esempio n. 2
0
    def _CAT_transform(self, X):
        if not len(self.CAT_encoder[0]) == 0:

            def do_job(job):
                if isinstance(job, int):
                    feature = X[:, job]
                else:
                    feature = self._get_hash_feature(X[:, job[0]], X[:,
                                                                     job[1]])
                return np.reshape(self._get_count_feature(feature), [-1, 1])

            X = np.concatenate([
                X,
                np.concatenate(parmap(do_job, self.CAT_encoder[0]), axis=1)
            ],
                               axis=1)

        if not len(self.CAT_encoder[1]) == 0:

            def do_job(job):
                feature = self._get_hash_feature(X[:, job[0]], X[:, job[1]])
                feature = self._get_count_feature(feature)
                feature_f = self._get_count_feature(X[:, job[0]])
                feature = feature / (feature_f + 0.01)
                return np.reshape(feature, [-1, 1])

            X = np.concatenate([
                X,
                np.concatenate(parmap(do_job, self.CAT_encoder[1]), axis=1)
            ],
                               axis=1)

        if not len(self.CAT_encoder[2]) == 0:

            def do_job(job):
                feature = self._get_count_feature(X[:, job[0]])
                feature_n = self._get_nunique_feature(X[:, job[0]], X[:,
                                                                      job[1]])
                feature = feature / (feature_n + 0.01)
                return np.reshape(feature, [-1, 1])

            X = np.concatenate([
                X,
                np.concatenate(parmap(do_job, self.CAT_encoder[2]), axis=1)
            ],
                               axis=1)

        if not len(self.CAT_encoder[3]) == 0:

            def do_job(job):
                feature = self._get_nunique_feature(X[:, job[0]], X[:, job[1]])
                return np.reshape(feature, [-1, 1])

            X = np.concatenate([
                X,
                np.concatenate(parmap(do_job, self.CAT_encoder[3]), axis=1)
            ],
                               axis=1)
        return X.astype(np.float32)
Esempio n. 3
0
def build_gwts(module_name, *args, **dict_p):
    """
    module_name: the web module which should define list of gwt modules in  its build file
    """
    build(module_name)
    module = modules.mod_name_mapping[module_name]
    if hasattr(module, 'gwt_modules'):
        cores = multiprocessing.cpu_count()
        params = [ (module_name, gwt_mod, dict_p) for gwt_mod in module.gwt_modules]
        parmap.parmap(gwt_compile, cores, params)
Esempio n. 4
0
    def getSpectra(self,Quantity):

        self.offsets=np.arange(self.offsetRange[1]-self.offsetRange[0]+1)+self.offsetRange[0]

        if self.Lx == None or self.Ly == None: # We need to read the data once on the master to get the ranges.
            self.offset=self.offsets[0]
            self.readData()

        self.dataFFT = parmap(Quantity,self.offsets,np=self.np)
        print '========== got dataFFT =========='
        self.spectra = parmap(self.shellAverage,self.dataFFT,np=self.np)
        self.getk1D()
Esempio n. 5
0
    def getSpectra(self,Quantity):

        self.offsets=np.arange(self.offsetRange[1]-self.offsetRange[0]+1)+self.offsetRange[0]

        if self.Lx == None or self.Ly == None or self.Lz == None: # We need to read the data once on the master to get the ranges.
            self.offset=self.offsets[0]
            self.readData()

        self.dataFFT = parmap(Quantity,self.offsets,np=self.np)
        print '========== got dataFFT =========='
        self.spectra = parmap(self.shellAverage,self.dataFFT,np=self.np)
        self.getk1D()
Esempio n. 6
0
def collect(beg,end,filenameout='data',type='pvtu',dumpfile='radii.dat'):

    offsets=np.arange(end-beg+1)+beg

    radii=[]
    radii=parmap(lambda offset, filenameout=filenameout,type=type:
              getFromOffset(offset,filenameout=filenameout,type=type),
                 offsets,np=4)
        #    for offset in offsets:
        #        data = read.load(offset,file=filenameout,type=type)
        #        myradii=get(data)
        #        myradii['offset'] = offset
        #        radii.append(myradii)

    file=open(dumpfile,'wb') 
    pickle.dump(radii,file)
    file.close()

    return radii
Esempio n. 7
0
def collect(beg, end, filenameout='data', type='pvtu', dumpfile='radii.dat'):

    offsets = np.arange(end - beg + 1) + beg

    radii = []
    radii = parmap(lambda offset, filenameout=filenameout, type=type:
                   getFromOffset(offset, filenameout=filenameout, type=type),
                   offsets,
                   np=4)
    #    for offset in offsets:
    #        data = read.load(offset,file=filenameout,type=type)
    #        myradii=get(data)
    #        myradii['offset'] = offset
    #        radii.append(myradii)

    file = open(dumpfile, 'wb')
    pickle.dump(radii, file)
    file.close()

    return radii
Esempio n. 8
0
    def _traverse(self, fn):
        def probe(l):
            if not isinstance(l, pvectorc.PVector):
                return [l]
            else:
                result = []
                for x in l:
                    tmp = probe(x)
                    result += tmp
                return result

        # print('stack:', self.stack)
        top = self.stack[-1]
        # print('top:', top)
        queue = probe(top)
        # print('queue:', queue)

        # multicore parallel map
        result = parmap(fn, queue, cpu_count)
        # singlecore map
        # result = list(map(fn, queue))

        # run garbage collector
        # because there's a memory leak from parmap (may be)
        gc.collect()

        # print('result:', result)
        result_pt = 0

        def run(l):
            if not isinstance(l, pvectorc.PVector):
                nonlocal result_pt
                result_pt += 1
                return result[result_pt - 1]
            else:
                return pvector(list(map(lambda x: run(x), l)))

        new_stack = self.stack[:-1].append(run(top))
        return new_stack
Esempio n. 9
0
 def climb(points, precision):
     climber = partial(hill_climber, precision=precision)
     summits = parmap(climber, points)
     return summits
Esempio n. 10
0
    def _CAT_fit_transform(self, X, y):
        X[X == 0] = 1
        feature_num = np.shape(X)[1]
        encoder, scores = [], [
            self._feature_importance(X[:, i], y) for i in range(feature_num)
        ]

        index_unary = [
            i[0] for i in heapq.nlargest(
                self.top_unary, enumerate(scores), key=lambda x: x[1])
        ]
        print('index_unary', index_unary)
        index_binary = index_unary[:self.top_binary]

        thr_index = 30
        if len(index_binary) < thr_index:
            thr_index = len(index_binary)
        print('scores', scores)
        threshold = np.sort(np.array(scores))[::-1][thr_index - 2]
        print('threshold ', threshold)

        #####################################  二阶 count start   ##############################################
        def do_job(job):
            if isinstance(job, int):
                feature = X[:, job]
            else:
                feature = self._get_hash_feature(X[:, job[0]], X[:, job[1]])
            feature = self._get_count_feature(feature)
            score = self._feature_importance(feature, y)
            if score > threshold:
                return np.reshape(feature, [-1, 1]), job, score
            else:
                return None, None, None

        print('-' * 10, 'FE starts two order count')
        res = parmap(do_job, list(combinations(index_binary, 2)) + index_unary)
        X_count = [r[0] for r in res if not r[0] is None]
        encoder_count = [r[1] for r in res if not r[1] is None]
        scores_count = [r[2] for r in res if not r[2] is None]

        ############################## 相对 count percent start   #############################################

        def do_job(job):

            feature = self._get_hash_feature(X[:, job[0]], X[:, job[1]])
            feature_f1 = X[:, job[0]]

            feature = self._get_count_feature(feature)
            feature_f1 = self._get_count_feature(feature_f1)
            feature = feature / (feature_f1 + 0.01)
            score = self._feature_importance(feature, y)
            if score > threshold:
                return np.reshape(feature, [-1, 1]), job, score
            else:
                return None, None, None

        print('-' * 10, 'FE starts relatively count percent')
        res = parmap(do_job, list(permutations(index_binary, 2)))
        X_count_percent = [r[0] for r in res if not r[0] is None]
        encoder_count_percent = [r[1] for r in res if not r[1] is None]
        scores_count_percent = [r[2] for r in res if not r[2] is None]

        ############################## 相对 count count / nunique start   #############################################
        def do_job(job):
            feature_f1 = X[:, job[0]]
            feature_f2 = X[:, job[1]]

            feature = self._get_count_feature(feature_f1)
            feature_f1 = self._get_nunique_feature(feature_f1, feature_f2)

            feature = feature / (feature_f1 + 0.01)

            score = self._feature_importance(feature, y)
            if score > threshold:
                return np.reshape(feature, [-1, 1]), job, score
            else:
                return None, None, None

        print('-' * 10, 'FE starts relatively count / nunnique')
        res = parmap(do_job, list(permutations(index_binary, 2)))
        X_count_nunique_divide = [r[0] for r in res if not r[0] is None]
        encoder_count_nunique_divide = [r[1] for r in res if not r[1] is None]
        scores_count_nunique_divide = [r[2] for r in res if not r[2] is None]

        #####################################  二阶 nunique start   ##############################################
        def do_job(job):
            feature = self._get_nunique_feature(X[:, job[0]], X[:, job[1]])
            score = self._feature_importance(feature, y)
            if score > threshold:
                return np.reshape(feature, [-1, 1]), job, score
            else:
                return None, None, None

        print('-' * 10, 'FE starts two order nunique')
        res = parmap(do_job, permutations(index_binary, 2))
        X_nunique = [r[0] for r in res if not r[0] is None]
        encoder_nunique = [r[1] for r in res if not r[1] is None]
        scores_nunique = [r[2] for r in res if not r[2] is None]

        print('-' * 10, 'FE ends feature generation')

        index = [
            i[0]
            for i in heapq.nlargest(self.max_keep,
                                    enumerate(scores_count +
                                              scores_count_percent +
                                              scores_count_nunique_divide +
                                              scores_nunique),
                                    key=lambda x: x[1])
        ]
        index_count = [i for i in index if i < len(scores_count)]
        index_count_percent = [
            i - len(scores_count) for i in index if i < len(scores_count) +
            len(scores_count_percent) and i >= len(scores_count)
        ]
        index_count_nunique_divide = [
            i - len(scores_count) - len(scores_count_percent) for i in index
            if i < len(scores_count) + len(scores_count_percent) +
            len(scores_count_nunique_divide) and i >= len(scores_count) +
            len(scores_count_percent)
        ]
        index_nunique = [
            i - len(scores_count) - len(scores_count_percent) -
            len(scores_count_nunique_divide) for i in index
            if i >= len(scores_count) + len(scores_count_percent) +
            len(scores_count_nunique_divide)
        ]

        encoder_count, encoder_count_percent, encoder_count_nunique_divide, encoder_nunique = [
            encoder_count[i] for i in index_count
        ], [encoder_count_percent[i] for i in index_count_percent], [
            encoder_count_nunique_divide[i] for i in index_count_nunique_divide
        ], [encoder_nunique[i] for i in index_nunique]
        print('index_count ', len(index_count), 'index_count_percent',
              len(encoder_count_percent), 'index_count_nunique_divide',
              len(index_count_nunique_divide), ' index_nunique ',
              len(index_nunique))

        if len(index_count) > 0:
            X = np.concatenate(
                [X,
                 np.concatenate([X_count[i] for i in index_count], axis=1)],
                axis=1)
        if len(index_count_percent) > 0:
            X = np.concatenate([
                X,
                np.concatenate(
                    [X_count_percent[i] for i in index_count_percent], axis=1)
            ],
                               axis=1)
        if len(index_count_nunique_divide) > 0:
            X = np.concatenate([
                X,
                np.concatenate([
                    X_count_nunique_divide[i]
                    for i in index_count_nunique_divide
                ],
                               axis=1)
            ],
                               axis=1)
        if len(index_nunique) > 0:
            X = np.concatenate([
                X,
                np.concatenate([X_nunique[i] for i in index_nunique], axis=1)
            ],
                               axis=1)

        return X.astype(np.float32), [
            encoder_count, encoder_count_percent, encoder_count_nunique_divide,
            encoder_nunique
        ]
Esempio n. 11
0
def AverageR(n, getR):
    res = parmap(lambda i: getR(), range(0, n))
    return np.average(res, axis=0)