Exemple #1
0
    def __init__(self, inner, **kwargs):
        super().__init__(**kwargs)
        on = "qid"
        self.inner = inner
        self.disable = False
        if CACHE_DIR is None:
            init()

        # we take the md5 of the __repr__ of the pipeline to make a unique identifier for the pipeline
        # all different pipelines should return unique __repr_() values, as these are intended to be
        # unambiguous
        trepr = repr(self.inner)
        if "object at 0x" in trepr:
            warn(
                "Cannot cache pipeline %s has a component has not overridden __repr__"
                % trepr)
            self.disable = True

        uid = hashlib.md5(bytes(trepr, "utf-8")).hexdigest()
        destdir = path.join(CACHE_DIR, uid)
        os.makedirs(destdir, exist_ok=True)
        definition_file = path.join(destdir, DEFINITION_FILE)
        if not path.exists(definition_file):
            with open(definition_file, "w") as f:
                f.write(trepr)
        self.chest = Chest(
            path=destdir,
            dump=lambda data, filename: pd.DataFrame.to_pickle(data, filename)
            if isinstance(data, pd.DataFrame) else pickle.dump(
                data, filename, protocol=1),
            load=lambda filehandle: pickle.load(filehandle)
            if ".keys" in filehandle.name else pd.read_pickle(filehandle))
        self.hits = 0
        self.requests = 0
Exemple #2
0
def post_frame(ans, params, args):
    # Analysis!
    from numpy.linalg import eigh
    from numpy import argsort

    ev, vecs = eigh(ans["overlap"])
    idx = argsort(ev)[::-1]
    ans["ev"] = ev[idx] / params["snapshots"]
    ans["vecs"] = vecs[:, idx]

    print("Sanity check volume: {:f}".format(ans["volume"]))
    print(
        ans["x_min"],
        ans["x_max"],
        ans["y_min"],
        ans["y_max"],
        ans["z_min"],
        ans["z_max"],
    )
    print(ans["overlap"])
    print("Singular values:")
    print(ans["ev"])
    print(ans["vecs"])

    from chest import Chest
    cpath = '{:s}-chest-{:03d}'.format(args.chest_path, ans["frame"])
    c = Chest(path=cpath)
    for key in ans.keys():
        c[ans['time'], key] = ans[key]
    ans.clear()
    c.flush()

    ans["cpath"] = cpath

    return
Exemple #3
0
def post_frame(ans, params, args):
  # Analysis! 
  from numpy.linalg import eigh
  from numpy import argsort

  ev, vecs = eigh(ans["overlap"])
  idx = argsort(ev)[::-1]
  ans["ev"] = ev[idx] / params["snapshots"]
  ans["vecs"] = vecs[:,idx]

  print("Sanity check volume: {:f}".format(ans["volume"]))
  print(ans["x_min"], ans["x_max"], ans["y_min"], ans["y_max"],ans["z_min"], ans["z_max"],)
  print(ans["overlap"])
  print("Singular values:")
  print(ans["ev"])
  print(ans["vecs"])

  from chest import Chest
  cpath = '{:s}-chest-{:03d}'.format(args.chest_path, ans["frame"])
  c = Chest(path=cpath)
  for key in ans.keys():
    c[ans['time'], key] = ans[key]
  ans.clear()
  c.flush()

  ans["cpath"] = cpath

  return 
Exemple #4
0
def construire(phrases):
    classes = Chest(path="classes", dump=dump, load=load)
    features = Chest(path="features", dump=dump, load=load)

    for p in phrases:
        vecteur = Chest(path=p, dump=dump, load=load)
        vecteur_log = Chest(path=p, dump=dump, load=load)
Exemple #5
0
def outer_process(job):
    """
  Process to be executed in the outer IPython.parallel map
  """

    # Split the arguments
    args, params, frame = job

    # always need these
    from importlib import import_module
    MR = import_module(args.mapreduce)

    # Initialize the MapReduce data with base cases
    # Returns job list to pass to map
    jobs = MR.MR_init(args, params, frame)
    # Copy a base case in which to reduce the results
    from copy import deepcopy
    ans = deepcopy(jobs[0][4])

    # Map!
    import time as time_
    ttime = time_.time()
    if args.thread < 2:
        results = map(inner_process, jobs)
    else:
        from multiprocessing import Pool
        p = Pool(processes=args.thread)
        results = p.imap_unordered(inner_process, jobs, chunksize=1)
    if args.verbose:
        print('  Map took {:f}s on {:d} processes'.format(
            time_.time() - ttime, args.thread))

    # Reduce!
    ttime = time_.time()
    for r in results:
        MR.reduce_(ans, r)
    if args.thread >= 2:
        p.close()
    if args.verbose:
        print('  Reduce took {:f}s on {:d} processes'.format(
            time_.time() - ttime, args.thread))

    ans["frame"] = frame

    # Analysis!
    post = import_module(args.post)
    post.post_frame(ans, params, args)
    post.plot_frame(ans, params, args)

    # Save the results to file!
    from chest import Chest
    cpath = '{:s}-chest-{:03d}'.format(args.name, frame)
    c = Chest(path=cpath)
    for key in ans.keys():
        c[ans['time'], key] = ans[key]
    c.flush()

    return cpath
Exemple #6
0
def outer_process(job):  
  """
  Process to be executed in the outer IPython.parallel map
  """

  # Split the arguments
  args, params, frame = job

  # always need these
  from importlib import import_module
  MR = import_module(args.mapreduce)

  # Initialize the MapReduce data with base cases
  # Returns job list to pass to map
  jobs = MR.MR_init(args, params, frame)
  # Copy a base case in which to reduce the results
  from copy import deepcopy
  ans = deepcopy(jobs[0][4])

  # Map!
  import time as time_
  ttime = time_.time()
  if args.thread < 2:
    results = map(inner_process, jobs)
  else:
    from multiprocessing import Pool
    p = Pool(processes=args.thread)
    results = p.imap_unordered(inner_process, jobs, chunksize = 1)
  if args.verbose:
    print('  Map took {:f}s on {:d} processes'.format(time_.time()-ttime, args.thread))

  # Reduce!
  ttime = time_.time()
  for r in results:
    MR.reduce_(ans, r)
  if args.thread >= 2:
    p.close()
  if args.verbose:
    print('  Reduce took {:f}s on {:d} processes'.format(time_.time()-ttime, args.thread))

  ans["frame"] = frame

  # Analysis! 
  post = import_module(args.post)
  post.post_frame(ans, params, args)
  post.plot_frame(ans, params, args)

  # Save the results to file!
  from chest import Chest
  cpath = '{:s}-chest-{:03d}'.format(args.name, frame)
  c = Chest(path=cpath)
  for key in ans.keys():
    c[ans['time'], key] = ans[key]
  c.flush()

  return cpath
Exemple #7
0
    def __init__(self, arg1, shape=None, chunksize=(int(1e4), int(1e4)),
                 dtype=None):
        if shape is None:
            raise ValueError('Demand shape for now')
        if not isinstance(arg1, dict):
            raise TypeError('support other things later')

        self.shape = shape
        self.chunksize = chunksize
        self.the_dict = Chest(arg1)
Exemple #8
0
def post_frame(ans, params, args):

  from chest import Chest
  cpath = '{:s}-chest-{:03d}'.format(args.chest_path, ans["frame"])
  c = Chest(path=cpath)
  for key in ans.keys():
    c[ans['time'], key] = ans[key]
  ans.clear()
  c.flush()

  ans["cpath"] = cpath

  return 
Exemple #9
0
def main2():
    from nltk.corpus import gutenberg
    from chest import Chest
    from heapdict import heapdict
    struc = Chest()
    texte = gutenberg.raw('carroll-alice.txt')
    phrases = iter(texte.split('\n'))
    # print(list(map(lambda x: list(generate_regex(OptimString(point=Point('.'), seq="".join(x)))), everygrams(next(phrases)))))

    for element in ngrams(next(phrases), 10):
        chaine = OptimString(point=Point('.'), seq="".join(element))
        for x in generate_regex(chaine):
            # print(str(x))
            struc[x] = True
    print(len(list(struc.keys())))
    def __init__(self, name, maxLength, location=os.getcwd()):
        assert isinstance(maxLength, int), "\'maxLength\' must be an integer!"
        assert isinstance(location, str) | isinstance(location, PurePath), "\'location\' must be a string or PurePath!"
        assert isinstance(name, str), "\'name\' must be a string!"
        assert maxLength >= 1, "\'maxLength\' must be at-least one!"

        # Private variables
        self.__maxLength = 0
        self.__data = deque([], maxLength)
        self.__name = name
        self.__location = location
        self.__storage = Chest(path=location) # Change dumping/loading method here

        self.maxLength = maxLength
        self.load()
    def __init__(self, difficulty, player):

        self.hasMonster = False
        self.hasBoss = False
        self.hasChest = False
        self.isDone = False
        self.difficulty = difficulty
        self.settings = Settings()
        self.goal = self.settings.getGoal()
        self.player = player
        self.chest = Chest(self.player)
        self.monster = Monster()
        self.inspected = False
        self.handler = Stringhandler()
        self.boss = Endboss()
        self.endboss = self.boss.randomBoss(player)
Exemple #12
0
 def set_map(self):
     self.player = Player(336, 672, self.block, self.block,
                          self.character_frontpng, 0, 4, 0, 0)
     self.chests = [
         Chest(360, 48, self.block, self.block, self.chestpng, 0),
         Chest(360, 300, self.block, self.block, self.chestpng, 0),
     ]
     self.stool = GameObjects(100, 200, self.block, self.block,
                              self.stoolpng, 1)
     self.enemies = [
         Enemy(0, 144, self.block, self.block, self.keypng, 1, 2),
         Enemy(0, 144 + self.block, self.block, self.block, self.keypng, 1,
               2),
         Enemy(0, 144 - self.block, self.block, self.block, self.keypng, 1,
               2),
     ]
Exemple #13
0
class DOK:
    def __init__(self, arg1, shape=None, chunksize=(int(1e4), int(1e4)),
                 dtype=None):
        if shape is None:
            raise ValueError('Demand shape for now')
        if not isinstance(arg1, dict):
            raise TypeError('support other things later')

        self.shape = shape
        self.chunksize = chunksize
        self.the_dict = Chest(arg1)

    def __getitem__(self, index):
        """If key=(i,j) is a pair of integers, return the corresponding
        element.  
        
        TODO:
        If either i or j is a slice or sequence, return a new sparse
        matrix with just these elements.

        MUSINGS ON THIS:
        How can we construct a new DOK in a parallel out of core way?

        We need to create a new Chest with elements of another, moving them through memory.
        This sounds like a perfect job for dask!
        """
        if ((isinstance(index, tuple)) and (len(index) == 2) and 
                (all(map(lambda x: isinstance(x, int), index)))):
            return self.the_dict[index]
        if isinstance(index, slice):
            pass

    def __contains__(self, key):
        return self.the_dict.__contains__(key)
Exemple #14
0
def fft(x, axis=-1, chunksize=2**26, available_memory=(4 * 1024**3), cache=None):
    """Simple wrapper for DAFT FFT function

    This function calls the DAFT function, but also performs the computation of
    the FFT, and returns the result as a numerical array.

    Parameters
    ----------
    x : array_like
        Input array, can be complex.
    axis : int, optional
        Axis over which to compute the FFT. If not given, the last axis is used.
    chunksize : int, optional
        Chunksize to use when splitting up the input array.  Default is 2**24,
        which is about 64MB -- a reasonable target that reduces memory usage.
    available_memory : int, optional
        Maximum amount of RAM to use for caching during computation.  Defaults
        to 4*1024**3, which is 4GB.

    """
    if cache is None:
        from chest import Chest  # For more flexible caching
        cache = Chest(available_memory=available_memory)
    X_dask = DAFT(x, axis=axis, chunksize=chunksize)
    return X_dask.compute(cache=cache)
Exemple #15
0
def post_frame(ans, params, args):
  # Analysis! 
  ans['TAbs'] = max(ans['TMax'], -ans['TMin'])
  ans['PeCell'] = ans['UAbs']*ans['dx_max']/params['conductivity']
  ans['ReCell'] = ans['UAbs']*ans['dx_max']/params['viscosity']

  # Mixing height
  L = params["extent_mesh"][2] - params["root_mesh"][2]
  h = 0.
  tmax = np.max(ans["t_proj_z"].to_array())
  tmin = np.min(ans["t_proj_z"].to_array())
  tzero = (tmax + tmin) / 2
  h_cabot = 0.
  for i in range(ans["t_proj_z"].to_array().shape[0]):
    if ans["t_proj_z"].to_array()[i] < tzero:
      h_cabot += (ans["t_proj_z"].to_array()[i] - tmin) 
    else:
      h_cabot += (tmax - ans["t_proj_z"].to_array()[i]) 
  ans["h"] = h_cabot

  zs = ans['z_z'].to_array() 
  from utils.my_utils import find_root
  h_visual = ( find_root(zs, ans["t_proj_z"].to_array(), y0 = tmax - (tmax - tmin)*.01)
             - find_root(zs, ans["t_proj_z"].to_array(), y0 = tmin + (tmax - tmin)*0.1)) / 2.

  h_exp = find_root(zs, np.array(ans["t_max_z"].to_array()), y0 = 0.0)

  ans["H"] = h_visual
  ans["H_exp"] = h_exp
  plot_frame(ans, params, args)

  from interfaces.abstract import AbstractSlice

  from chest import Chest
  cpath = '{:s}-chest-{:03d}'.format(args.chest_path, ans["frame"])
  c = Chest(path=cpath)
  for key in ans.keys():
    if isinstance(ans[key], AbstractSlice):
      c[ans['time'], key] = ans[key].to_array()
    else:
      c[ans['time'], key] = ans[key]
  ans.clear()
  c.flush()

  ans["cpath"] = cpath

  return 
Exemple #16
0
 def max_memory(self, max_memory):
     # set Cache to protect memory spilling
     if max_memory is not None:
         available_memory = max_memory
     else:
         available_memory = psutil.virtual_memory().available
     self.__max_memory = available_memory
     self.cache = Chest(available_memory=available_memory)
Exemple #17
0
def main():
    dico = Chest(path='francais_tatoeba_5bis-char-max')
    dico2 = defaultdict(set)
    i = 0
    with open(
            "/Users/korantin/Documents/Projects/Lexiques/francais_col123.txt",
            'r') as ba:
        tmp = ba.read().splitlines()
        for phrase in tmp:
            (ind, ln, phrase) = phrase.strip().split('\t')
            if len(phrase) <= 10:
                print(phrase)
                print(i, len(tmp))
                estime(phrase, tmp, memoire=dico, d=dico2)
            i += 1
        for x, y in dico2.items():
            dico[x] *= (len(tmp) / len(y))
        dico.flush()
    print(dico.items(), sep='\n')
Exemple #18
0
 def check_if_collided(self):
     for enemy in self.enemies:
         if self.detect_collision(self.player, enemy):
             self.set_map()
             return True
     for chest in self.chests:
         if self.detect_collision(self.player, chest):
             Chest(chest.x, chest.y, self.block, self.block,
                   'crossgame/assets/chest_open.png', 0)
             self.game_window.blit(chest.image, (chest.x, chest.y))
     return False
Exemple #19
0
def generation_room(x_initial, y_initial, room):
    x = x_initial
    y = y_initial
    for i in range(len(room)):
        for j in range(len(room[0])):
            if room[i][j] == "f":
                pf = Floor(x, y, random.randint(0,5))
                layer_0.add(pf)
            if room[i][j] == "w":
                pf = Wall(x, y, 4)
                layer_1.add(pf)
            if room[i][j] == "d":
                pf = Wall(x, y, 1)
                layer_2.add(pf)
            if room[i][j] == "v":
                pf = Wall(x, y, 2)
                layer_2.add(pf)
            if room[i][j] == "x":
                pf = Wall(x, y, 0)
                layer_0.add(pf)
                wall.append(pf)
            if room[i][j] == "u":
                pf = Wall(x, y, 3)
                layer_1.add(pf)
                r = random.randint(0, 100)
                if r <= 5:
                    torch = Torch(x, y, 3)
                    layer_1.add(torch)
            if room[i][j] == "c":
                r = random.randint(3, 10)
                if r >= 5:
                    pf = Floor(x, y, 6)
                    layer_0.add(pf)
                    pf = Chest(x, y-6, random.randint(1, 2))
                    layer_1.add(pf)
                    chest.append(pf)
                else:
                    pf = Floor(x, y, 6)
                    layer_0.add(pf)
            if room[i][j] == "z":
                pf = Monster_spawn(x, y)
                layer_0.add(pf)
                r = random.randint(2, 5)
                for i in range(r):
                    x_ = random.randint(pf.rect.x-32, pf.rect.x+64)
                    y_ = random.randint(pf.rect.y-32, pf.rect.y+64)
                    pf = Monster(x_, y_, random.randint(1, 4))
                    layer_1.add(pf)
                    layer_monster.add(pf)
                    layer_all_monster_and_layer.add(pf)

            x += 32
        x = x_initial
        y += 32
Exemple #20
0
def test_cache_options():
    try:
        from chest import Chest
    except ImportError:
        return
    cache = Chest()
    def inc2(x):
        assert 'y' in cache
        return x + 1

    with dask.set_options(cache=cache):
        get_sync({'x': (inc2, 'y'), 'y': 1}, 'x')
Exemple #21
0
    def read_geno(bfile,
                  freq_thresh,
                  threads,
                  check=False,
                  max_memory=None,
                  usable_snps=None,
                  normalize=False):
        # set Cache to protect memory spilling
        if max_memory is not None:
            available_memory = max_memory
        else:
            available_memory = psutil.virtual_memory().available
        cache = Chest(available_memory=available_memory)
        (bim, fam, g) = read_plink(bfile)  # read the files using pandas_plink
        g_std = g.std(axis=1)
        if check:
            with ProgressBar(), dask.config.set(pool=ThreadPool(threads)):
                print('Removing invariant sites')
                idx = (g_std != 0).compute(cache=cache)
            g = g[idx, :]
            bim = bim[idx].copy().reset_index(drop=True)
            bim.i = bim.index.tolist()
            del idx
            gc.collect()
        if usable_snps is not None:
            idx = bim[bim.snp.isin(usable_snps)].i.tolist()
            g = g[idx, :]
            bim = bim[bim.i.isin(idx)].copy().reset_index(drop=True)
            bim.i = bim.index.tolist()
        mafs = g.sum(axis=1) / (2 * n) if freq_thresh > 0 else None
        # Filter MAF
        if freq_thresh > 0:
            print('Filtering MAFs smaller than', freq_thresh)
            print('    Genotype matrix shape before', g.shape)
            assert freq_thresh < 0.5
            good = (mafs <
                    (1 - float(freq_thresh))) & (mafs > float(freq_thresh))
            with ProgressBar():
                with dask.config.set(pool=ThreadPool(threads)):
                    good, mafs = dask.compute(good, mafs, cache=cache)
            g = g[good, :]
            print('    Genotype matrix shape after', g.shape)
            bim = bim[good]
            bim['mafs'] = mafs[good]
            del good
            gc.collect()
        if normalize:
            mean = g.mean(axis=1)
            g = (g.T - mean) / g_std
        else:
            g = g.T

        return g, bim, fam
Exemple #22
0
    def do_pca(g, n_comp):
        """
        Perform a PCA on the genetic array and return n_comp of it

        :param g: Genotype array
        :param n_comp: Number of components sought
        :return: components array
        """
        cache = Chest(available_memory=available_memory, path=os.getcwd())
        pca = PCA(n_components=n_comp)
        pca = pca.fit_transform(g)
        return pca.compute(cache=cache)
Exemple #23
0
    def __init__(self, n, stocha, obs):

        self.gold_mines = []
        self.forests = []
        self.obstacles = []
        self.board = []
        self.quotas = [False for k in range(NB_RESOURCES)]
        self.n= n
        self.time = 0
        self.reward = 0
        self.stocha = stocha

        # Board instantiation
        for i in range(n):
            for j in range(n):
                if (i, j) in OBSTACLES and obs:
                    obstacle = Obstacle([i, j])
                    self.board.append(obstacle)
                    self.obstacles.append(obstacle)
                elif (i,j) in GOLD_MINES:
                    gold_mine = GoldMine([i,j])
                    self.board.append(gold_mine)
                    self.gold_mines.append(gold_mine)
                elif (i,j) in FORESTS:
                    forest = Forest([i,j])
                    self.board.append(forest)
                    self.forests.append(forest)
                elif (i,j) == PLAYER:
                    self.player = Player([i,j], NOTHING)
                    self.board.append(FreeTile([i,j]))
                elif (i,j) == CHEST:
                    self.chest = Chest([i,j])
                    self.chest_next = True
                    self.board.append(self.chest)
                else:
                    self.board.append(FreeTile([i,j]))

        self.gold_mines_next = [False for k in self.gold_mines]
        self.forests_next = [False for k in self.forests]
def test_cache_options():
    try:
        from chest import Chest
    except ImportError:
        return
    cache = Chest()

    def inc2(x):
        assert "y" in cache
        return x + 1

    with dask.config.set(cache=cache):
        get_sync({"x": (inc2, "y"), "y": 1}, "x")
Exemple #25
0
def exec_dag(dag,
             num_workers=None,
             cache_size=1e9,
             scheduler=dask.threaded.get,
             show_progress=False):
    cache = Chest(available_memory=cache_size)
    if show_progress:
        with ProgressBar(), dask.set_options(cache=cache,
                                             num_workers=num_workers):
            result = dag.compute(get=scheduler)
    else:
        with cache:
            result = dag.compute(get=scheduler, num_workers=num_workers)
    return result
Exemple #26
0
def estime(ba, memoire: Chest):
    """
        L'estimation maximale des paramètre se fait par le calcul du powerset sur un ensemble BA (Base d'Apprentissage).
        On fait donc un tf-idf mais en ayant en vocabulaire les composantes du powerset
    :param BA:
    :return:
    """
    len_ba = sum(pow(2, len(x)) for x in ba)
    with Pool(processes=5) as p:
        for element in ba:
            for x in p.map(lambda i: powerset(element, i),
                           range(len(element))):
                for y in x:
                    if not memoire.get(x):
                        memoire[x] = frequence_brute(y, element, True)
    for x in memoire:
        memoire[x] /= len(memoire)
def load_from_archive(names, arch):
    cs = []
    for name in names:
        cs.append(Chest(path      = "{:s}-results".format(name),
                        open      = partial(glopen,      endpoint=arch),
                        open_many = partial(glopen_many, endpoint=arch),
                        available_memory = 1e12))
    scs = [CachedSlict(c) for c in cs]

    ps = []
    for name in names:
        with glopen(
                    "{:s}.json".format(name), mode='r',
                    endpoint = arch,
                    ) as f:
            ps.append(json.load(f))
    if len(names) == 1:
        return cs[0], scs[0], ps[0]
    return cs, scs, ps
Exemple #28
0
def generate_regex(seq: OptimString, memo: Chest = None):
    file = deque()
    sortie = deque()

    file.appendleft(seq)

    while file:
        current = file.pop()
        if not all(x == '.' for x in str(current)):
            if not memo.get(current):
                yield current
        if not isinstance(current.data_pointe[-1], Point):
            current = current.add_point()
            file.appendleft(current)
            for _ in range(
                    current.control.get(current.get_point)[-1] + 1,
                    len(current.data)):
                current = current.deplace_point()
                file.appendleft(current)
def enter():
    global boy, ground, background, spikeList, Hp_Bar, Monster_Bear_List, Monster_Mage_List, ChestList, CHicken,\
        GUI, FlatFormList, FLAG

    background = Background()
    boy = Boy(background)
    Hp_Bar = HP_BAR()
    ground = [Ground(i, background) for i in range(len(Ground.groundList))]
    Monster_Bear_List = [
        Monster_bear(i, background) for i in range(len(Monster_bear.posList))
    ]
    Monster_Mage_List = [
        Monster_mage(i) for i in range(len(Monster_mage.posList))
    ]
    spikeList = [Spike(i, background) for i in range(len(Spike.spikeList))]
    ChestList = [Chest(i, background) for i in range(len(Chest.chestList))]
    GUI = Gui()
    FlatFormList = [
        Flatform(i, background) for i in range(len(Flatform.flatFormList))
    ]
    FLAG = Flag(background)

    game_world.add_object(background, 0)
    for i in range(len(ground)):
        game_world.add_object(ground[i], 0)
    game_world.add_object(boy, 2)
    game_world.add_object(Hp_Bar, 1)
    for i in range(len(Monster_Bear_List)):
        game_world.add_object(Monster_Bear_List[i], 1)
    for i in range(len(Monster_Mage_List)):
        game_world.add_object(Monster_Mage_List[i], 1)
    for i in range(len(ChestList)):
        game_world.add_object(ChestList[i], 1)
    for i in range(len(spikeList)):
        game_world.add_object(spikeList[i], 1)
    game_world.add_object(GUI, 1)
    for i in range(len(FlatFormList)):
        game_world.add_object((FlatFormList[i]), 1)
    game_world.add_object(FLAG, 1)

    background.set_center_object(boy)
    boy.set_background(background)
Exemple #30
0
def fft_to_hdf5(x, filename, axis=-1, chunksize=2**26, available_memory=(4 * 1024**3), cache=None):
    """Simple wrapper for DAFT FFT function that writes to HDF5

    This function calls the DAFT function, but also performs the computation of
    the FFT, and outputs the result into the requested HDF5 file

    Parameters
    ----------
    x : array_like
        Input array, can be complex.
    filename : string
        Relative or absolute path to HDF5 file.  If this string contains a
        colon, the preceding part is taken as the filename, while the following
        part is taken as the dataset group name.  The default group name is 'X'.
    axis : int, optional
        Axis over which to compute the FFT. If not given, the last axis is used.
    chunksize : int, optional
        Chunksize to use when splitting up the input array.  Default is 2**24,
        which is about 64MB -- a reasonable target that reduces memory usage.
    available_memory : int, optional
        Maximum amount of RAM to use for caching during computation.  Defaults
        to 4*1024**3, which is 4GB.

    """
    from h5py import File
    from dask import set_options
    from dask.array import store
    if cache is None:
        from chest import Chest  # For more flexible caching
        cache = Chest(available_memory=available_memory)
    if ':' in filename:
        filename, groupname = filename.split(':')
    else:
        groupname = 'X'
    X_dask = DAFT(x, axis=axis, chunksize=chunksize)
    with set_options(cache=cache):
        with File(filename, 'w') as f:
            output = f.create_dataset(groupname, shape=X_dask.shape, dtype=X_dask.dtype)
            store(X_dask, output)
    return
        required=True,
        help="directory where to find SQL SELECT statements,"
        " one single statement per file. Can be configured with monthly_check.source"
    )
    parser.add_argument(
        '-o',
        '--output',
        conf_key="monthly_check.output",
        required=True,
        help=
        "Where to write the report. Can be configured with monthly_check.output."
    )
    parser.add_argument(
        '-m',
        '--month',
        conf_key="monthly_check.month",
        default=month_date.previous(),
        type=month_date.from_str,
        help=
        "6 digits, 4 for the year then 2 for the month (ex.: 201901 is Jan 2019)."
        " Default value is last month.")
    CursorProvider.add_arguments_to(parser)

    args = parser.parse_args()
    Chest(vars(args))

    logging.basicConfig(level=config["logging"]["level"],
                        format=config["logging"]["format"])

    monthly_check(args.month, args.source, args.output)
Exemple #32
0
# -*- coding: utf-8 -*-
__author__ = 'Bruno Konrad'

import sys
from chest import Chest
from chest.chest_dropbox import DropboxAuthorizator, DropboxStorager


if __name__ == "__main__":
    if len(sys.argv) == 3:
        dropboxStorager = DropboxStorager()
        dropboxAuthorizator = DropboxAuthorizator()

        chest = Chest(sys.argv, dropboxStorager)
        chest.set_authorizator(dropboxAuthorizator)
        chest.start()
    else:
        print("Você precisa dizer o arquivo que deseja salvar e em que pasta no Dropbox.")
from chest import Chest
c = Chest()

# Acts like a normal dictionary
c['x'] = [1, 2, 3]
print c['x']

Exemple #34
0
# Set up the frame arguments
from mapcombine import outer_process
jobs = [[args, params, i] for i in range(args.frame, args.frame_end+1)]

# schedule the frames, one IPython process each
# if only one process or parallel not set, use normal map
import time
start_time = time.time()
if len(jobs) > 1 and args.parallel:
  from IPython.parallel import Client
  p = Client(profile='mpi')
  stuff = p.load_balanced_view().map_async(outer_process, jobs)
else:
  stuff =  map(outer_process, jobs)

# insert new results into the out-of-core dictionary (Chest)
nelm = params["shape_mesh"][0] * params["shape_mesh"][1] * params["shape_mesh"][2]
from chest import Chest
for i, res in enumerate(stuff):
  c1 = Chest(path=res['cpath'])
  c = Chest(path=args.chest_path)
  c.update(c1)
  c.flush()
  c1.drop()

  # Print a progress update
  run_time = time.time() - start_time
  print("Processed {:d}th frame after {:f}s ({:f} eps)".format(i, run_time, (i+1)*nelm/run_time))