Esempio n. 1
0
def post_frame(ans, params, args):
  # Analysis! 
  ans['TAbs'] = max(ans['TMax'], -ans['TMin'])
  ans['PeCell'] = ans['UAbs']*ans['dx_max']/params['conductivity']
  ans['ReCell'] = ans['UAbs']*ans['dx_max']/params['viscosity']

  # Mixing height
  L = params["extent_mesh"][2] - params["root_mesh"][2]
  h = 0.
  tmax = np.max(ans["t_proj_z"].to_array())
  tmin = np.min(ans["t_proj_z"].to_array())
  tzero = (tmax + tmin) / 2
  h_cabot = 0.
  for i in range(ans["t_proj_z"].to_array().shape[0]):
    if ans["t_proj_z"].to_array()[i] < tzero:
      h_cabot += (ans["t_proj_z"].to_array()[i] - tmin) 
    else:
      h_cabot += (tmax - ans["t_proj_z"].to_array()[i]) 
  ans["h"] = h_cabot

  zs = ans['z_z'].to_array() 
  from utils.my_utils import find_root
  h_visual = ( find_root(zs, ans["t_proj_z"].to_array(), y0 = tmax - (tmax - tmin)*.01)
             - find_root(zs, ans["t_proj_z"].to_array(), y0 = tmin + (tmax - tmin)*0.1)) / 2.

  h_exp = find_root(zs, np.array(ans["t_max_z"].to_array()), y0 = 0.0)

  ans["H"] = h_visual
  ans["H_exp"] = h_exp
  plot_frame(ans, params, args)

  from interfaces.abstract import AbstractSlice

  from chest import Chest
  cpath = '{:s}-chest-{:03d}'.format(args.chest_path, ans["frame"])
  c = Chest(path=cpath)
  for key in ans.keys():
    if isinstance(ans[key], AbstractSlice):
      c[ans['time'], key] = ans[key].to_array()
    else:
      c[ans['time'], key] = ans[key]
  ans.clear()
  c.flush()

  ans["cpath"] = cpath

  return 
Esempio n. 2
0
def main():
    dico = Chest(path='francais_tatoeba_5bis-char-max')
    dico2 = defaultdict(set)
    i = 0
    with open(
            "/Users/korantin/Documents/Projects/Lexiques/francais_col123.txt",
            'r') as ba:
        tmp = ba.read().splitlines()
        for phrase in tmp:
            (ind, ln, phrase) = phrase.strip().split('\t')
            if len(phrase) <= 10:
                print(phrase)
                print(i, len(tmp))
                estime(phrase, tmp, memoire=dico, d=dico2)
            i += 1
        for x, y in dico2.items():
            dico[x] *= (len(tmp) / len(y))
        dico.flush()
    print(dico.items(), sep='\n')
Esempio n. 3
0
def load_from_archive(names, arch):
    cs = []
    for name in names:
        cs.append(Chest(path      = "{:s}-results".format(name),
                        open      = partial(glopen,      endpoint=arch),
                        open_many = partial(glopen_many, endpoint=arch),
                        available_memory = 1e12))
    scs = [CachedSlict(c) for c in cs]

    ps = []
    for name in names:
        with glopen(
                    "{:s}.json".format(name), mode='r',
                    endpoint = arch,
                    ) as f:
            ps.append(json.load(f))
    if len(names) == 1:
        return cs[0], scs[0], ps[0]
    return cs, scs, ps
Esempio n. 4
0
def enter():
    global boy, ground, background, spikeList, Hp_Bar, Monster_Bear_List, Monster_Mage_List, ChestList, CHicken,\
        GUI, FlatFormList, FLAG

    background = Background()
    boy = Boy(background)
    Hp_Bar = HP_BAR()
    ground = [Ground(i, background) for i in range(len(Ground.groundList))]
    Monster_Bear_List = [
        Monster_bear(i, background) for i in range(len(Monster_bear.posList))
    ]
    Monster_Mage_List = [
        Monster_mage(i) for i in range(len(Monster_mage.posList))
    ]
    spikeList = [Spike(i, background) for i in range(len(Spike.spikeList))]
    ChestList = [Chest(i, background) for i in range(len(Chest.chestList))]
    GUI = Gui()
    FlatFormList = [
        Flatform(i, background) for i in range(len(Flatform.flatFormList))
    ]
    FLAG = Flag(background)

    game_world.add_object(background, 0)
    for i in range(len(ground)):
        game_world.add_object(ground[i], 0)
    game_world.add_object(boy, 2)
    game_world.add_object(Hp_Bar, 1)
    for i in range(len(Monster_Bear_List)):
        game_world.add_object(Monster_Bear_List[i], 1)
    for i in range(len(Monster_Mage_List)):
        game_world.add_object(Monster_Mage_List[i], 1)
    for i in range(len(ChestList)):
        game_world.add_object(ChestList[i], 1)
    for i in range(len(spikeList)):
        game_world.add_object(spikeList[i], 1)
    game_world.add_object(GUI, 1)
    for i in range(len(FlatFormList)):
        game_world.add_object((FlatFormList[i]), 1)
    game_world.add_object(FLAG, 1)

    background.set_center_object(boy)
    boy.set_background(background)
Esempio n. 5
0
def fft_to_hdf5(x, filename, axis=-1, chunksize=2**26, available_memory=(4 * 1024**3), cache=None):
    """Simple wrapper for DAFT FFT function that writes to HDF5

    This function calls the DAFT function, but also performs the computation of
    the FFT, and outputs the result into the requested HDF5 file

    Parameters
    ----------
    x : array_like
        Input array, can be complex.
    filename : string
        Relative or absolute path to HDF5 file.  If this string contains a
        colon, the preceding part is taken as the filename, while the following
        part is taken as the dataset group name.  The default group name is 'X'.
    axis : int, optional
        Axis over which to compute the FFT. If not given, the last axis is used.
    chunksize : int, optional
        Chunksize to use when splitting up the input array.  Default is 2**24,
        which is about 64MB -- a reasonable target that reduces memory usage.
    available_memory : int, optional
        Maximum amount of RAM to use for caching during computation.  Defaults
        to 4*1024**3, which is 4GB.

    """
    from h5py import File
    from dask import set_options
    from dask.array import store
    if cache is None:
        from chest import Chest  # For more flexible caching
        cache = Chest(available_memory=available_memory)
    if ':' in filename:
        filename, groupname = filename.split(':')
    else:
        groupname = 'X'
    X_dask = DAFT(x, axis=axis, chunksize=chunksize)
    with set_options(cache=cache):
        with File(filename, 'w') as f:
            output = f.create_dataset(groupname, shape=X_dask.shape, dtype=X_dask.dtype)
            store(X_dask, output)
    return
Esempio n. 6
0
    def __init__(self, n, stocha, obs):

        self.gold_mines = []
        self.forests = []
        self.obstacles = []
        self.board = []
        self.quotas = [False for k in range(NB_RESOURCES)]
        self.n= n
        self.time = 0
        self.reward = 0
        self.stocha = stocha

        # Board instantiation
        for i in range(n):
            for j in range(n):
                if (i, j) in OBSTACLES and obs:
                    obstacle = Obstacle([i, j])
                    self.board.append(obstacle)
                    self.obstacles.append(obstacle)
                elif (i,j) in GOLD_MINES:
                    gold_mine = GoldMine([i,j])
                    self.board.append(gold_mine)
                    self.gold_mines.append(gold_mine)
                elif (i,j) in FORESTS:
                    forest = Forest([i,j])
                    self.board.append(forest)
                    self.forests.append(forest)
                elif (i,j) == PLAYER:
                    self.player = Player([i,j], NOTHING)
                    self.board.append(FreeTile([i,j]))
                elif (i,j) == CHEST:
                    self.chest = Chest([i,j])
                    self.chest_next = True
                    self.board.append(self.chest)
                else:
                    self.board.append(FreeTile([i,j]))

        self.gold_mines_next = [False for k in self.gold_mines]
        self.forests_next = [False for k in self.forests]
Esempio n. 7
0
# Get the current date and today's holiday
# Copyright (c) Akos Polster. All rights reserved.

from datetime import datetime
import holidays
import json
import requests
import sys
import traceback
import urllib
from ip2geotools.databases.noncommercial import DbIpCity
from chest import Chest

settings = Chest()


def get_country_code():
    country_last = "US"
    if "country_last" in settings:
        country_last = settings["country_last"]
    country_last_updated = datetime.fromtimestamp(0)
    if "country_last_updated" in settings:
        country_last_updated = settings["country_last_updated"]

    now = datetime.now()
    delta = now - country_last_updated
    if delta.total_seconds() < 86400:
        return country_last

    try:
        myIp = urllib.request.urlopen('http://icanhazip.com/',
Esempio n. 8
0
args = command_line_ui()

# load params from genrun.py input dictionary
import json
#from utils.custom_json import CustomDecoder
with open("{:s}.json".format(args.name), 'r') as f:
    params = json.load(f)

# insert new results into the dictionary
fname = '{:s}-results.dat'.format(args.name)
#with open(fname, 'r') as f:
#  results = json.load(f, cls=CustomDecoder)
from chest import Chest
from slict import CachedSlict

results = CachedSlict(Chest(path="{:s}-results".format(args.name)))

from importlib import import_module

xx = import_module(args.post)
import time as clock

start_time = clock.time()
i = 0
#for time in results[:,"frame"].keys():
#  xx.plot_frame(results[time,:], params, args)
#  i = i + 1
#  print("Processed t={:f} ({:f} fps)".format(time, (clock.time() - start_time) / i))

# Post-post process the contents of the results dictionary
xx.post_series(results, params, args)
Esempio n. 9
0
def get_ld(rgeno,
           rbim,
           tgeno,
           tbim,
           kbwindow=1000,
           threads=1,
           max_memory=None,
           justd=False,
           extend=False):
    """
    Get the LD blocks from snp overlap between two populations

    :param rgeno: Genotype array of the reference populartion
    :param rbim: Mapping variant info and the genotype array position for ref
    :param tgeno: Genotype array of the target populartion
    :param tbim: Mapping variant info and the genotype array position for tar
    :param kbwindow: Size of the window in KB
    :param threads: Number of threads to use for computation
    :param max_memory: Memory limit
    :param justd: Return only the raw LD matrices or the tagging/cotagging
    :param extend: 'Circularize' the genome by extending both ends
    :return: A list of tuples (or dataframe if not justd) with the ld per block
    """
    # # Set CPU limits
    # soft, hard = resource.getrlimit(resource.RLIMIT_NPROC)
    # resource.setrlimit(resource.RLIMIT_NPROC, (threads, hard))
    # soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
    # print('Soft limit changed to :', soft)

    # set Cache to protect memory spilling
    rp = 'r.pickle'
    if os.path.isfile(rp):
        with open(rp, 'rb') as pckl:
            r = pickle.load(pckl)
    else:
        if max_memory is not None:
            available_memory = max_memory
        else:
            available_memory = psutil.virtual_memory().available
        cache = Chest(available_memory=available_memory)
        if os.path.isfile('ld.matrix'):
            print('Loading precomputed LD matrix')
            r = dd.read_parquet('ld.matrix')
        else:
            print('Computing LD score per window')
            # Get the overlapping snps and their info
            shared = ['chrom', 'snp', 'pos']
            mbim = rbim.merge(tbim, on=shared, suffixes=['_ref', '_tar'])
            assert mbim.i_ref.values.shape == mbim.i_tar.values.shape
            # Get the number of bins or loci to be computed
            nbins = np.ceil(max(mbim.pos) / (kbwindow * 1000)).astype(int)
            # Get the limits of the loci
            bins = np.linspace(0,
                               max(mbim.pos) + 1,
                               num=nbins,
                               endpoint=True,
                               dtype=int)
            if bins.shape[0] == 1:
                # Fix the special case in which the window is much bigger than
                # the range
                bins = np.append(bins, kbwindow * 1000)
            # Get the proper intervals into the dataframe
            mbim['windows'] = pd.cut(mbim['pos'], bins, include_lowest=True)
            # Compute each locus in parallel
            dask_rgeno = dask.delayed(rgeno)
            dask_tgeno = dask.delayed(tgeno)
            delayed_results = [
                dask.delayed(single_window)(df, rg, tg, threads, max_memory,
                                            justd, extend)
                for rg, tg, ridx, tidx, df in window_yielder(
                    dask_rgeno, dask_tgeno, mbim)
            ]
            opts = dict(num_workers=threads,
                        cache=cache,
                        pool=ThreadPool(threads))
            with ProgressBar(), dask.config.set(**opts), open(rp, 'wb') as pck:
                r = tuple(dask.compute(*delayed_results))
                pickle.dump(r, pck)
    r = tuple(x for x in r if x is not None)
    if justd:
        return r
    r = pd.concat(r)
    dd.to_parquet(r, 'ld.matrix')
    return r
Esempio n. 10
0
def prune_it(df,
             geno,
             pheno,
             label,
             step=10,
             threads=1,
             beta='slope',
             max_memory=None,
             n=None):
    """
    Prune and score a dataframe of sorted snps

    :param n: Max number of records to prune
    :param max_memory: Maximum available memory
    :param str beta: Column with the effect size
    :param int threads: Number of threads to use
    :param int step: Step of the pruning
    :param str label: Name of the current prunning
    :param pheno: Phenotype array
    :param geno: Genotype array
    :param df: sorted dataframe
    :return: scored dataframe
    """
    # # Set CPU limits
    # soft, hard = resource.getrlimit(resource.RLIMIT_NPROC)
    # resource.setrlimit(resource.RLIMIT_NPROC, (threads, hard))
    # soft, hard = resource.getrlimit(resource.RLIMIT_NPROC)
    # print('Soft limit changed to :', soft)
    # set Cache to protect memory spilling
    if max_memory is not None:
        available_memory = max_memory
    else:
        available_memory = psutil.virtual_memory().available
    cache = Chest(available_memory=available_memory)
    print('Prunning %s...' % label)
    opts = dict(num_workers=threads, cache=cache, pool=ThreadPool(threads))
    if n is not None:
        print('Just prunning', n)
        tup = (df.iloc[:n], geno, pheno, label, beta)
        delayed_results = [dask.delayed(single_score)(*tup)]
        with ProgressBar(), dask.config.set(**opts):
            res = list(dask.compute(*delayed_results))
    else:
        # Process the first 200 snps at one step regardless of the step passed.
        # This is done to have a finer grain in the first part of the prunning
        # where most of the causals should be captured
        print('First 200')
        # Create a generator with the subset and the arguments for the single
        # function
        gen = ((df.iloc[:i], geno, pheno, label, beta)
               for i in range(1, min(201, df.shape[0] + 1), 1))
        # Run the scoring in parallel threads
        delayed_results = [dask.delayed(single_score)(*i) for i in gen]
        with ProgressBar(), dask.config.set(**opts):
            res = list(dask.compute(*delayed_results))
        print('Processing the rest of variants')
        if df.shape[0] > 200:
            ngen = ((df.iloc[:i], geno, pheno, label)
                    for i in range(201, df.shape[0] + 1, int(step)))
            delayed_results = [dask.delayed(single_score)(*i) for i in ngen]
            with ProgressBar(), dask.config.set(**opts):
                res += list(dask.compute(*delayed_results))
    return pd.DataFrame(res)
Esempio n. 11
0
# Set up the frame arguments
from mapcombine import outer_process
jobs = [[args, params, i] for i in range(args.frame, args.frame_end + 1)]

# schedule the frames, one IPython process each
# if only one process or parallel not set, use normal map
import time
start_time = time.time()
if len(jobs) > 1 and args.parallel:
    from IPython.parallel import Client
    p = Client(profile='mpi')
    stuff = p.load_balanced_view().map_async(outer_process, jobs)
else:
    stuff = map(outer_process, jobs)

# insert new results into the out-of-core dictionary (Chest)
nelm = params["shape_mesh"][0] * params["shape_mesh"][1] * params[
    "shape_mesh"][2]
from chest import Chest
for i, res in enumerate(stuff):
    c1 = Chest(path=res['cpath'])
    c = Chest(path=args.chest_path)
    c.update(c1)
    c.flush()
    c1.drop()

    # Print a progress update
    run_time = time.time() - start_time
    print("Processed {:d}th frame after {:f}s ({:f} eps)".format(
        i, run_time, (i + 1) * nelm / run_time))
 def __init__(self):
     self.connection = connector.connect(
         host=Chest()["host"],
         user=Chest()["user"],
         passwd=getpass.getpass("Password for {user}@{host}: ".format(
             user=Chest()["user"], host=Chest()["host"])))
Esempio n. 13
0
from os.path import join
workdirs = [join(getcwd(), x["name"]) for x in overrides]
configs = [
    configure(base, override, workdir)
    for override, workdir in zip(overrides, workdirs)
]

data_table = {}

max_index = -1
height = 'H_exp'
for p, wd in zip(configs, workdirs):
    path = join(wd, "{}-results".format(p['name']))
    print(path)
    if exists(path):
        c = Chest(path=path)
        sc = CachedSlict(c)
        times = sc[:, height].keys()[:max_index]
        data_table[p['viscosity'], p['conductivity'], 'time'] = np.array(times)
        data_table[p['viscosity'], p['conductivity'],
                   'height'] = np.array([sc[t, height] for t in times])
        data_table[p['viscosity'], p['conductivity'], 'atwood'] = np.array(
            [4 * np.mean(sc[t, 't_abs_proj_z']) for t in times])
        for k, v in p.items():
            data_table[p['viscosity'], p['conductivity'], k] = v

import pickle
with open("data_table.p", "wb") as f:
    pickle.dump(data_table, f)
print(data_table)
Esempio n. 14
0
 def read_geno(bfile, freq_thresh, threads, check=False, max_memory=None,
               usable_snps=None, normalize=False, prefix='my_geno',
               thinning=None):
     chunks = (10000, 10000)
     # set Cache to protect memory spilling
     if max_memory is not None:
         available_memory = max_memory
     else:
         available_memory = psutil.virtual_memory().available
     cache = Chest(available_memory=available_memory)
     (bim, fam, g) = read_plink(bfile)  # read the files using pandas_plink
     g_std = da.nanstd(g, axis=1)
     if check:
         with ProgressBar():
             print('Removing invariant sites')
             idx = (g_std != 0).compute(cache=cache)
         g = g[idx, :]
         bim = bim[idx].copy().reset_index(drop=True)
         bim.i = bim.index.tolist()
         g_std = g_std[idx]
         del idx
         gc.collect()
     if usable_snps is not None:
         print('Restricting genotype to user specified variants')
         idx = sorted(bim[bim.snp.isin(usable_snps)].i.values)
         g = g[idx, :]
         bim = bim[bim.i.isin(idx)].copy().reset_index(drop=True)
         bim.i = bim.index.tolist()
     mafs = g.sum(axis=1) / (2 * g.shape[0]) if freq_thresh > 0 else None
     # Filter MAF
     if freq_thresh > 0:
         print('Filtering MAFs smaller than', freq_thresh)
         print('    Genotype matrix shape before', g.shape)
         assert freq_thresh < 0.5
         good = (mafs < (1 - float(freq_thresh))) & (mafs > float(
             freq_thresh))
         with ProgressBar():
             with dask.config.set(pool=ThreadPool(threads)):
                 good, mafs = dask.compute(good, mafs, cache=cache)
         g = g[good, :]
         print('    Genotype matrix shape after', g.shape)
         bim = bim[good]
         bim['mafs'] = mafs[good]
         bim.reset_index(drop=True, inplace=True)
         bim.i = bim.index.tolist()
         del good
         gc.collect()
     if not is_transposed(g, bim.shape[0], fam.shape[0]):
         g = g.T
     if normalize:
         print('Normalizing to mean 0 and sd 1')
         mean = da.nanmean(g.T, axis=1)
         g = (g - mean) / g_std
     if thinning is not None:
         print("Thinning genotype to %d variants" % thinning)
         idx = np.linspace(0, g.shape[1], num=thinning, dtype=int,
                           endpoint=False)
         bim = bim.reindex(index=idx)
         g = g[:, idx].rechunk('auto')
         bim['i'] = range(thinning)
     h5 = '%s.hdf5' % prefix
     if not os.path.isfile(h5):
         with ProgressBar(), h5py.File(h5) as hd5:
             print("Sending processed genotype to HDF5")
             chroms = sorted(bim.chrom.unique().astype(int))
             gr = bim.groupby('chrom')
             for chrom in chroms:
                 df = gr.get_group(str(chrom))
                 ch = g[:, df.i.values]
                 ch = ch.rechunk(estimate_chunks(ch.shape, threads,
                                                 memory=available_memory))
                 print('\tChromosome %s: %d individuals %d  variants' % (
                     chrom, ch.shape[0], ch.shape[1]))
                 hd5.create_dataset('/%s' % chrom,  data=ch.compute())
                 del ch
             del gr
     return g, h5, bim, fam #g, bim, fam
Esempio n. 15
0
    #                          '5) number of nodes to use, as a comma separated '
    #                          'string (e.g. --SLURM def-account,32,00:30:00,'
    #                          '32GB)')

    args = parser.parse_args()
    # if args.SLURM is not None:
    #     project, cpus, t, mem = args.SLURM.split(',')
    #     cluster = SLURMCluster(cores=cpus, project=project, memory=mem, time=t)
    # else:
    #     cluster = LocalCluster(n_workers=args.threads, processes=False)
    # client = Client(cluster)
    if args.maxmem is not None:
        available_memory = args.maxmem
    else:
        available_memory = psutil.virtual_memory().available
    cache = Chest(available_memory=available_memory, path=os.getcwd())
    cs = (available_memory >> 20) / args.threads
    assert cs > 0
    arr = dask.config.get('array')
    arr.update({'chunk-size': cs})
    dask.config.set(scheduler='threads', num_workers=args.threads,
                    memory=args.maxmem, cache=cache, array=arr)
    # cluster = LocalCluster()
    # print(cluster)
    # client = Client(cluster)
    main(args.geno, args.pheno, args.prefix, args.pval_range, args.ld_range,
         gwas=args.sumstats, check=args.nocheck, threads=args.threads,
         covs=args.covs, memory=args.maxmem, validate=args.validate,
         freq_thresh=args.f_thr, snp_subset=args.snp_subset,
         thinning=args.thinning)
Esempio n. 16
0

punkts = [(350, 300, u'Play', (11, 0, 77), (250, 250, 30), 0),
          (350, 340, u'Exit', (11, 0, 77), (250, 250, 30), 1)]
game = Menu(punkts)
game.menu()
#Распознаватор объектов 3000!
##А чего не 4000? :')
for row in laval:
    for column in row:
        if column == "-":
            block = Block(XX, top)
            blocks.add(block)
            allsprites.add(block)
        if column == "+":
            chest = Chest(XX, top)
            chests.add(chest)
            allsprites.add(chest)
        XX += block_width
    top += block_height
    XX = 0

clock = pygame.time.Clock()

game_over = False
sar = False
exit_program = False


#отрисовка экрана. Заменить/пофиксить
#мигание уже не надо(точки не нужны)
Esempio n. 17
0
def read_geno(bfile,
              freq_thresh,
              threads,
              flip=False,
              check=False,
              max_memory=None,
              usable_snps=None):
    """
    Read the plink bed fileset, restrict to a given frequency (optional,
    freq_thresh), flip the sequence to match the MAF (optional; flip), and check
    if constant variants present (optional; check)

    :param max_memory: Maximum allowed memory
    :param bfile: Prefix of the bed (plink) fileset
    :param freq_thresh: If greater than 0, limit MAF to at least freq_thresh
    :param threads: Number of threads to use in computation
    :param flip: Whether to check for flips and to fix the genotype file
    :param check: Whether to check for constant sites
    :return: Dataframes (bim, fam) and array corresponding to the bed fileset
    """
    # set Cache to protect memory spilling
    if max_memory is not None:
        available_memory = max_memory
    else:
        available_memory = psutil.virtual_memory().available
    cache = Chest(available_memory=available_memory)
    (bim, fam, g) = read_plink(bfile)  # read the files using pandas_plink
    m, n = g.shape  # get the dimensions of the genotype
    # remove invariant sites
    if check:
        g_std = g.std(axis=1)
        with ProgressBar():
            print('Removing invariant sites')
            with dask.config.set(pool=ThreadPool(threads)):
                idx = (g_std != 0).compute(cache=cache)
        g = g[idx, :]
        bim = bim[idx].copy().reset_index(drop=True)
        bim.i = bim.index.tolist()
        del g_std, idx
        gc.collect()
    if usable_snps is not None:
        idx = bim[bim.snp.isin(usable_snps)].i.tolist()
        g = g[idx, :]
        bim = bim[bim.i.isin(idx)].copy().reset_index(drop=True)
        bim.i = bim.index.tolist()
    # compute the mafs if required
    mafs = g.sum(axis=1) / (2 * n) if flip or freq_thresh > 0 else None
    if flip:
        # check possible flips
        flips = np.zeros(bim.shape[0], dtype=bool)
        flips[np.where(mafs > 0.5)[0]] = True
        bim['flip'] = flips
        vec = np.zeros(flips.shape[0])
        vec[flips] = 2
        # perform the flipping
        g = abs(g.T - vec)
        del flips
        gc.collect()
    else:
        g = g.T
    # Filter MAF
    if freq_thresh > 0:
        print('Filtering MAFs smaller than', freq_thresh)
        print('    Genotype matrix shape before', g.shape)
        assert freq_thresh < 0.5
        good = (mafs < (1 - float(freq_thresh))) & (mafs > float(freq_thresh))
        with ProgressBar():
            with dask.config.set(pool=ThreadPool(threads)):
                good, mafs = dask.compute(good, mafs, cache=cache)
        g = g[:, good]
        print('    Genotype matrix shape after', g.shape)
        print(bim.shape)
        bim = bim[good]
        bim['mafs'] = mafs[good]
        del good
        gc.collect()
    bim = bim.reset_index(drop=True)  # Get the indices in order
    # Fix the i such that it matches the genotype indices
    bim['i'] = bim.index.tolist()
    # Get chunks apropriate with the number of threads
    g = g.rechunk(estimate_chunks(g.shape, threads, memory=available_memory))
    del mafs
    gc.collect()
    return bim, fam, g
Esempio n. 18
0
    def init_tile(self):
        dirs = {
            'N': (0, 1),
            'NW': (-1, 1),
            'NE': (1, 1),
            'W': (-1, 0),
            'E': (1, 0),
            'S': (0, -1),
            'SE': (1, -1),
            'SW': (-1, -1)
        }

        floors = {
            key: self.tile_at(*dirs[key], self.TYPE_FLOOR)
            for key in dirs.keys()
        }
        walls = {key: self.tile_at(*dirs[key]) for key in dirs.keys()}
        surround_count = sum([int(walls[k]) for k in ['N', 'E', 'S', 'W']])
        offset = self.game.dungeon_offset / self.game.TILE_WIDTH

        if self.will_spawn_entity and self.can_spawn_entity:
            enemy_type = self.enemies[0] if random.random(
            ) >= self.game.world.character.level * 0.08 - 0.05 else self.enemies[
                1]
            self.entities.append(
                enemy_type(self.game, self.tx + offset, self.ty + offset,
                           self.group))

        if self.will_spawn_chest:

            if (surround_count == 3
                    and not walls['E']) or (walls['N'] and walls['W']
                                            and surround_count == 2
                                            and walls['SW']):
                ori = Chest.LEFT
                cx, cy = Chest.OFFSET[ori]
                self.hitboxes.add((cx + 2, cy, cx + 8, cy + 21))
            elif surround_count == 3 and not walls['W'] or (
                    walls['N'] and walls['E'] and surround_count == 2
                    and walls['NE']):
                ori = Chest.RIGHT
                cx, cy = Chest.OFFSET[ori]
                self.hitboxes.add((cx, cy, cx + 6, cy + 21))
            elif surround_count == 3 and not walls['N']:
                ori = Chest.BOTTOM
                cx, cy = Chest.OFFSET[ori]
                self.hitboxes.add((cx, cy, cx + 17, cy + 7))
            elif walls['N'] or (floors['N'] and floors['S'] and floors['E']
                                and floors['W']):
                ori = Chest.TOP
                cx, cy = Chest.OFFSET[ori]
                self.hitboxes.add((cx, cy, cx + 17, cy + 10))
            elif walls['W']:
                ori = Chest.LEFT
                cx, cy = Chest.OFFSET[ori]
                self.hitboxes.add((cx + 2, cy, cx + 8, cy + 21))
            elif walls['E']:
                ori = Chest.RIGHT
                cx, cy = Chest.OFFSET[ori]
                self.hitboxes.add((cx, cy, cx + 6, cy + 21))
            else:
                ori = Chest.BOTTOM
                cx, cy = Chest.OFFSET[ori]
                self.hitboxes.add((cx, cy, cx + 17, cy + 7))

            self.chest = Chest(self.game, self.tx + offset, self.ty + offset,
                               self.group, ori, self.chest_type)

        if self.type == self.TYPE_WALL:
            empty = {
                key: self.tile_at(*dirs[key], self.TYPE_EMPTY)
                for key in dirs.keys()
            }

            corners = []

            if walls['E'] and walls['S']:
                if empty['SE']:
                    self.hitboxes.add((self.width - 12, 0, self.width, 17))
                    corners.append(self.OUTER_TOP_LEFT)
                elif walls['SE']:
                    self.hitboxes.add((0, 0, self.width, self.height))
                    corners.append(self.TOP_LEFT_FILL)
                else:
                    self.hitboxes.add((0, 0, 12, self.height))
                    self.hitboxes.add(
                        (0, self.height - 17, self.width, self.height))
                    corners.append(self.TOP_LEFT)
            elif walls['W'] and walls['S']:
                if empty['SW']:
                    self.hitboxes.add((0, 0, 12, 17))
                    corners.append(self.OUTER_TOP_RIGHT)
                elif walls['SW']:
                    self.hitboxes.add((0, 0, self.width, self.height))
                    corners.append(self.TOP_RIGHT_FILL)
                else:
                    self.hitboxes.add(
                        (self.width - 12, 0, self.width, self.height))
                    self.hitboxes.add(
                        (0, self.height - 17, self.width, self.height))
                    corners.append(self.TOP_RIGHT)
            elif walls['W'] and walls['N']:
                if empty['NW']:
                    self.hitboxes.add((0, self.height - 16, 12, self.height))
                    corners.append(self.OUTER_BOTTOM_RIGHT)
                elif walls['NW']:
                    self.hitboxes.add((0, 0, self.width, self.height))
                    corners.append(self.BOTTOM_RIGHT_FILL)
                else:
                    self.hitboxes.add(
                        (self.width - 12, 0, self.width, self.height))
                    self.hitboxes.add((0, 0, self.width, 17))
                    corners.append(self.BOTTOM_RIGHT)
            elif walls['E'] and walls['N']:
                if empty['NE']:
                    self.hitboxes.add((self.width - 12, self.height - 16,
                                       self.width, self.height))
                    corners.append(self.OUTER_BOTTOM_LEFT)
                elif walls['NE']:
                    self.hitboxes.add((0, 0, self.width, self.height))
                    corners.append(self.BOTTOM_LEFT_FILL)
                else:
                    self.hitboxes.add((0, 0, 12, self.height))
                    self.hitboxes.add((0, 0, self.width, 17))
                    corners.append(self.BOTTOM_LEFT)

            if surround_count == 1 and floors['S']:
                self.hitboxes.add((0, 0, self.width, self.height))
                return self.tile_sprite(self.CENTER_BOTTOM)
            if surround_count <= 2 and floors['S'] and floors['N']:
                self.hitboxes.add((0, 0, self.width, self.height))
                return self.tile_sprite(self.CENTER_BOTTOM)

            if len(corners) > 0:
                if surround_count >= 3:
                    if not walls['S'] or (not walls['SE'] and not walls['SW']):
                        self.hitboxes.add((0, 0, self.width, self.height))
                        return self.tile_sprite(self.CENTER_BOTTOM)
                    else:
                        self.hitboxes.add((0, 0, self.width, self.height))
                        return self.tile_sprite(self.CENTER)

                [self.tile_sprite(corner) for corner in corners]

            elif walls['N'] and walls['S']:
                if not floors['E']:
                    self.hitboxes.add(
                        (self.width - 12, 0, self.width, self.height))
                    self.tile_sprite(self.RIGHT)
                elif not floors['W']:
                    self.hitboxes.add((0, 0, 12, self.height))
                    self.tile_sprite(self.LEFT)
            elif walls['E'] and walls['W']:
                if not floors['N']:
                    self.hitboxes.add(
                        (0, self.height - 17, self.width, self.height))
                    self.tile_sprite(self.TOP)
                elif not floors['S']:
                    self.hitboxes.add((0, 0, self.width, 17))
                    self.tile_sprite(self.BOTTOM)

        if self.type != self.TYPE_EMPTY:
            self.tile_sprite(self.FLOOR)

            if self.spawn_exit:
                self.tile_sprite("exit")
Esempio n. 19
0
def single_window(df,
                  rg,
                  tg,
                  threads=1,
                  max_memory=None,
                  justd=False,
                  extend=False):
    """
    Helper function to compute the correlation between variants from a genotype
    array
    :param df: Merged dataframe mapping of the positions in the genotypes
    :param rg: slice of Genotype array of the reference population
    :param tg: slice of Genotype array of the target population
    :param threads: Number of threads to estimate memory use
    :param max_memory: Memory limit
    :param justd: Return the raw LD matrices insted of its dot product
    :param extend: 'Circularize' the genome by extending both ends
    :return:
    """
    if not df.empty:
        # set Cache to protect memory spilling
        if max_memory is not None:
            available_memory = max_memory
        else:
            available_memory = psutil.virtual_memory().available / 2
        cache = Chest(available_memory=available_memory)
        # Make sure chunks make sense
        chunk_opts = dict(threads=threads, memory=available_memory)
        if not isinstance(rg, np.ndarray):
            rg = rg.rechunk(estimate_chunks(shape=rg.shape, **chunk_opts))
            tg = tg.rechunk(estimate_chunks(shape=tg.shape, **chunk_opts))
        # extend the genotype at both end to avoid edge effects
        if extend:
            # get the indices of the subset genotype array
            nidx = np.arange(rg.shape[1])
            # Split the array in half (approximately)
            idx_a, idx_b = np.array_split(nidx, 2)
            # Get the extednded indices
            i = np.concatenate([idx_a[::-1][:-1], nidx, idx_b[::-1][1:]])
            # Re-subset the genotype arrays with the extensions
            rg, tg = rg[:, i], tg[:, i]
            assert rg.shape[1] == tg.shape[1]
            # Compute the correltion as X'X/N
            rho_r = da.dot(rg.T, rg) / rg.shape[0]
            rho_t = da.dot(tg.T, tg) / tg.shape[0]
            # remove the extras
            idx = np.arange(i.shape[0])[idx_a.shape[0] - 1:(nidx.shape[0] +
                                                            idx_b.shape[0])]
            rho_r, rho_t = rho_r[idx, :], rho_t[idx, :]
            rho_r, rho_t = rho_r[:, idx], rho_t[:, idx]
            # Make sure the shape match
            assert rho_r.shape[1] == rho_r.shape[0]
            assert rho_t.shape[1] == rho_t.shape[0]
        else:
            # Just compute the correlations
            rho_r = da.dot(rg.T, rg) / rg.shape[0]
            rho_t = da.dot(tg.T, tg) / tg.shape[0]
        if justd:
            # return the raw LD matrices
            return df.snp, rho_r, rho_t
        gc.collect()
        # compute the cotagging/tagging scores
        cot = da.diag(da.dot(rho_r, rho_t))
        ref = da.diag(da.dot(rho_r, rho_r))
        tar = da.diag(da.dot(rho_t, rho_t))
        stacked = da.stack([df.snp, ref, tar, cot], axis=1)
        c_h_u_n_k_s = estimate_chunks(stacked.shape, threads, max_memory)
        stacked = da.rechunk(stacked, chunks=c_h_u_n_k_s)
        columns = ['snp', 'ref', 'tar', 'cotag']
        return dd.from_dask_array(stacked,
                                  columns=columns).compute(cache=cache)
Esempio n. 20
0
 def memory(self, memory):
     if memory is not None:
         self.__memory = memory
     else:
         self.__memory = psutil.virtual_memory().available / 2
     self.cache = Chest(available_memory=self.__memory)
from chest import Chest
c = Chest()

# Acts like a normal dictionary
c['x'] = [1, 2, 3]
print c['x']

Esempio n. 22
0
def main():
    from multiprocessing import Queue, Process
    # import argparse

    # parser = argparse.ArgumentParser(prog="perceptron", description="perceptron à execution asynchrone")
    #
    # parser.add_argument(
    #     "corpus",
    #     type=argparse.FileType(
    #         mode='r',
    #         encoding='utf-8'
    #     ),
    #     help="ensemble de couple (objet, classe)"
    # )
    #
    # parser.add_argument(
    #     "iterMax",
    #     type=int,
    #     default=5,
    #     help="nombre de tours que le classifieur va tourner"
    # )
    #
    # parser.add_argument(
    #     "-s",
    #     "--sequential",
    #     action='store_true',
    #     default=False
    # )
    #
    # parser.add_argument(
    #     "-v",
    #     "--verbose",
    #     help="méthode affichant étape par étape ce qui se passe."
    # )

    # args = parser.parse_args()

    # corpus = args.corpus

    corpus = [
        ("carrément", "ADV"),
        ("constitutition", "ADV"),
        ("véritablement", "ADV"),
        ("camarade", "NOUN"),
        ("véritable", "ADJ"),
        ("cartable", "NOUN"),
        ("plage", "NOUN"),
        ("vite", "ADJ")
    ]

    examples = Chest(path='examples')
    weights = Chest(path='weights')
    iterMax = 5
    i = 0

    # counting_queue = Queue()
    # examples_queue = Queue()


    # phase d'initialisation

    # sequence_queue, counting_queue, examples_queue, examples
    sequence_queue = Queue()
    list(map(lambda y: sequence_queue.put(y), corpus))
    process_powerset = Process(target=traiter_sequence,
                               args=(sequence_queue, examples, list(map(lambda x: x[0], corpus))), name="powerset")
    process_powerset.start()
    process_powerset.join()
        required=True,
        help="directory where to find SQL SELECT statements,"
        " one single statement per file. Can be configured with monthly_check.source"
    )
    parser.add_argument(
        '-o',
        '--output',
        conf_key="monthly_check.output",
        required=True,
        help=
        "Where to write the report. Can be configured with monthly_check.output."
    )
    parser.add_argument(
        '-m',
        '--month',
        conf_key="monthly_check.month",
        default=month_date.previous(),
        type=month_date.from_str,
        help=
        "6 digits, 4 for the year then 2 for the month (ex.: 201901 is Jan 2019)."
        " Default value is last month.")
    CursorProvider.add_arguments_to(parser)

    args = parser.parse_args()
    Chest(vars(args))

    logging.basicConfig(level=config["logging"]["level"],
                        format=config["logging"]["format"])

    monthly_check(args.month, args.source, args.output)
Esempio n. 24
0
def main():
    parser = argparse.ArgumentParser(description="P0 Adventure")
    parser.add_argument(
        "--savefile",
        dest="savefile",
        default="game.json",
        help="The save file. default: 'game.json'",
    )
    parser.add_argument(
        "--new-game",
        dest="new_game",
        default=False,
        action="store_true",
        help="Create a new save file.",
    )
    parser.add_argument(
        "-b",
        dest="bonus_tasks",
        default=False,
        action="store_true",
        help="enable bonus tasks",
    )
    parser.add_argument(
        "--print-bonus",
        dest="print_bonus",
        default=False,
        action="store_true",
        help="print bonus task list and exit",
    )
    args = parser.parse_args()

    if args.print_bonus:
        print_bonus_tasks()
        return

    # your code starts here
    save = args.savefile
    if args.new_game:
        user = Player()
        user.createNewCharacter()
        gamedata = GameData(player=user, savefile=save, bonus_tasks=args.bonus_tasks)
        if args.bonus_tasks:
            totengraeber = Gravedigger()
            gamedata.gravedigger = totengraeber
            truhe = Chest()
            gamedata.chest = truhe

    else:
        gamedata = load_gamedata(save)
        user = gamedata.player
        if args.bonus_tasks:
            totengraeber = gamedata.gravedigger
            truhe = gamedata.chest

    schmied = Shopkeeper(name="blacksmith", inventory=blacksmith_items)
    druide = Shopkeeper(name="druid", inventory=druid_items)
    prog0 = Village(
        player=user, bonus_tasks=args.bonus_tasks, blacksmith=schmied, druid=druide
    )

    if args.bonus_tasks:
        prog0.gravedigger = totengraeber
        prog0.chest = truhe

    while True:
        user_choice = village(prog0)

        if user_choice == 5:
            dung0 = Dungeon(player=user, bonus_tasks=args.bonus_tasks)
            if args.bonus_tasks:
                dung0.gravedigger = totengraeber
            dungeon(dung0)
        elif user_choice == 6:
            save_gamedata(gamedata, save),
            print("Game saved to {}".format(save))
        elif user_choice == 0:
            quit(gamedata, save)
            break
        else:
            raise KeyError(
                "main.py Something went wrong with the user choosing what to do!"
            )

    sys.exit(0)