def make_rows_for_stats(stats):
     _rows = [None] * 2
     _rows[0] = [''] * (1 + len(all_rooms) * len(all_kinds) * len(all_methods))
     for i_roomkind, (room, kind) in enumerate(iterprod(all_rooms, all_kinds)):
         _rows[0][1 + len(all_methods) * i_roomkind] = f'room{room} ({kind})'
     _rows[1] = [''] + all_methods * len(all_rooms) * len(all_kinds)
     for metric in all_metrics:
         _row = [f'Δ{metric}' if method_rev == 'delta' else metric]
         for room, kind, method in iterprod(all_rooms, all_kinds, all_methods):
             _row.append(stats.get(Metadata(method, room, kind, metric), '-'))
         _rows.append(_row)
     return _rows
Example #2
0
    def _generate_conformers(self):
        """
        Generate rigid body conformers of a complex by (1) Fixing the first m
        olecule, (2) initialising the second molecule's COM evenly on the points
        of a sphere around the first with a random rotation and (3) iterating
        until all molecules in the complex have been added
        """
        if len(self.molecules) < 2:
            # Single (or zero) molecule complex only has a single *rigid body*
            # conformer
            self.conformers = [get_conformer(name=self.name, species=self)]

            return None

        n_molecules = len(self.molecules)  # Number of molecules in the complex
        self.conformers = []
        n = 0  # Current conformer number

        points_on_sphere = get_points_on_sphere(
            n_points=Config.num_complex_sphere_points)

        for _ in iterprod(range(Config.num_complex_random_rotations),
                          repeat=n_molecules - 1):
            # Generate the rotation thetas and axes
            rotations = [
                np.random.uniform(-np.pi, np.pi, size=4)
                for _ in range(n_molecules - 1)
            ]

            for points in iterprod(points_on_sphere, repeat=n_molecules - 1):

                conformer = get_conformer(species=self,
                                          name=f'{self.name}_conf{n}')
                atoms = get_complex_conformer_atoms(self.molecules, rotations,
                                                    points)
                conformer.set_atoms(atoms)

                self.conformers.append(conformer)
                n += 1

                if n == Config.max_num_complex_conformers:
                    logger.warning(
                        f'Generated the maximum number of complex conformers ({n})'
                    )
                    return None

        logger.info(f'Generated {n} conformers')
        return None
Example #3
0
    def generate(self, ds):
        # check whether the ds is balanced
        unique_super = ds.sa[self.attr].unique
        nunique_subord = []
        for usuper in unique_super:
            mask = ds.sa[self.attr].value == usuper
            nunique_subord.append(len(np.unique(ds[mask].sa[self.partitioner.attr].value)))
        if len(np.unique(nunique_subord)) != 1:
            warnings.warn('One or more superordinate attributes do not have the same '
                    'number of subordinate attributes. This could yield to '
                    'unbalanced partitions.', category=RuntimeWarning)

        # make a fake ds from the first feature to use the attributes
        fakeds = ds[:, 0]
        if self.selection_strategy != 'equidistant':
            raise NotImplementedError("This strategy is not yet implemented")

        attr_value = ds.sa[self.attr].value
        uattr = ds.sa[self.attr].unique
        uattr_masks = [attr_value == u for u in uattr]

        for partitionings in iterprod(*[self.partitioner.generate(fakeds[uattr_mask]) for uattr_mask in uattr_masks]):
            pds = ds.copy(deep=False)
            target_partitioning = np.zeros(len(pds), dtype=int)
            for uattr_mask, partitioning in zip(uattr_masks, partitionings):
                target_partitioning[uattr_mask] = partitioning.sa[self.partitioner.space].value
            pds.sa[self.space] = target_partitioning
            yield pds
Example #4
0
    def make_wtvsparamval(self, allparams=True, paramlist=None):
        '''
        template usage vs parameter value
        '''
        if allparams:
            paramlist = set(self.test_metadata.colnames) & set(
                pca.metadata.colnames)
            paramlist = [
                n for n in paramlist
                if self.test_metadata[n].shape == (len(self.test_metadata), )
            ]
            paramlist = sorted(paramlist)

        gs, fig = figures_tools.gen_gridspec_fig(len(paramlist),
                                                 border=(1., 1., 0.5, 0.5),
                                                 space=(0.7, 0.4),
                                                 spsize=(2.5, 1.25))
        subplot_inds = iterprod(range(gs._nrows), range(gs._ncols))

        fig_axes = {
            n: fig.add_subplot(gs[ii, jj])
            for (ii, jj), n in zip(subplot_inds, paramlist)
        }

        for n, ax in fig_axes.items():
            ax.hist(self.trn_metadata[n].data,
                    weights=self.trn_usage_cts,
                    histtype='step',
                    density=True)
            ax.set_yscale('log')
            ax.set_ylabel(r'density', size='x-small')
            ax.tick_params(labelsize='x-small')
            ax.set_xlabel(n.replace('_', '\_'))

        figures_tools.savefig(fig, fname='wtsvsparams.png', fdir=self.workdir)
Example #5
0
    def make_wtvsparamval(self, allparams=True, paramlist=None):
        '''
        template usage vs parameter value
        '''
        if allparams:
            paramlist = set(self.test_metadata.colnames) & set(pca.metadata.colnames)
            paramlist = [n for n in paramlist
                         if self.test_metadata[n].shape == (len(self.test_metadata), )]
            paramlist = sorted(paramlist)

        gs, fig = figures_tools.gen_gridspec_fig(
            len(paramlist), border=(1., 1., 0.5, 0.5), space=(0.7, 0.4),
            spsize=(2.5, 1.25))
        subplot_inds = iterprod(range(gs._nrows), range(gs._ncols))

        fig_axes = {n: fig.add_subplot(gs[ii, jj])
                    for (ii, jj), n in zip(subplot_inds, paramlist)}

        for n, ax in fig_axes.items():
            ax.hist(self.trn_metadata[n].data, weights=self.trn_usage_cts,
                       histtype='step', density=True)
            ax.set_yscale('log')
            ax.set_ylabel(r'density', size='x-small')
            ax.tick_params(labelsize='x-small')
            ax.set_xlabel(n.replace('_', '\_'))

        figures_tools.savefig(fig, fname='wtsvsparams.png', fdir=self.workdir)
def convert_xticklabels(suffix, selected_rooms, selected_kinds):
    if suffix == 'unseens':
        return [str(i + 1) for i in range(len(selected_rooms))]
    else:
        return [f'room{i // 2 * 2 + 2}\n({kind})'
                for i, kind in iterprod(range(len(selected_rooms)), selected_kinds)
                ]
Example #7
0
    def generate(self, ds):
        # check whether the ds is balanced
        unique_super = ds.sa[self.attr].unique
        nunique_subord = []
        for usuper in unique_super:
            mask = ds.sa[self.attr].value == usuper
            nunique_subord.append(
                len(np.unique(ds[mask].sa[self.partitioner.attr].value)))
        if len(np.unique(nunique_subord)) != 1:
            warnings.warn(
                'One or more superordinate attributes do not have the same '
                'number of subordinate attributes. This could yield to '
                'unbalanced partitions.',
                category=RuntimeWarning)

        # make a fake ds from the first feature to use the attributes
        fakeds = ds[:, 0]
        if self.selection_strategy != 'equidistant':
            raise NotImplementedError("This strategy is not yet implemented")

        attr_value = ds.sa[self.attr].value
        uattr = ds.sa[self.attr].unique
        uattr_masks = [attr_value == u for u in uattr]

        for partitionings in iterprod(*[
                self.partitioner.generate(fakeds[uattr_mask])
                for uattr_mask in uattr_masks
        ]):
            pds = ds.copy(deep=False)
            target_partitioning = np.zeros(len(pds), dtype=int)
            for uattr_mask, partitioning in zip(uattr_masks, partitionings):
                target_partitioning[uattr_mask] = partitioning.sa[
                    self.partitioner.space].value
            pds.sa[self.space] = target_partitioning
            yield pds
Example #8
0
def reduce(field,target):
    outfield = field.copy()
    maxx, maxy = field.shape
    
    for i in iterprod(arange(0,maxx),arange(0,maxy)):
        if field[i] == target:
            if identical_neighbours(field,i):
                outfield[i] += 1
                
    return outfield
Example #9
0
def wordbreak(s):
    s = s.lower()
    if s in arpabet:
        return arpabet[s]
    middle = len(s) / 2
    partition = sorted(list(range(len(s))), key=lambda x: (x - middle)**2 - x)
    for i in partition:
        pre, suf = (s[:i], s[i:])
        if pre in arpabet and wordbreak(suf) is not None:
            return [x + y for x, y in iterprod(arpabet[pre], wordbreak(suf))]
    return None
Example #10
0
def make_rows_for_stats(stats):
    _rows = [None] * 2
    _rows[0] = [''] * (1 + len(all_kinds) * len(all_methods))
    for i_kind, kind in enumerate(all_kinds):
        _rows[0][1 + len(all_methods) * i_kind] = kind
    _rows[1] = [''] + all_methods * len(all_kinds)
    for metric in all_metrics:
        _row = [metric]
        for kind, method in iterprod(all_kinds, all_methods):
            _row.append(stats[MethodKindMetric(method, kind, metric)])
        _rows.append(_row)
    return _rows
Example #11
0
def buildorder(formation):
    """ Determine bounds of formation """
    minx = formation[0][0]
    maxx = formation[0][0]
    miny = formation[0][1]
    maxy = formation[0][1]
    for x,y in formation:
        if minx > x:
            minx = x
        elif maxx < x:
            maxx = x
        if miny > y:
            miny = y
        elif maxy < y:
            maxy = y

    """ Create Required Arrays """
    """ Both arrays are 2 larger than te formation requires"""
    field = ones((maxx - minx + 3, maxy - miny + 3))
    form = zeros((maxx - minx + 3, maxy - miny + 3))
    fmaxx, fmaxy = field.shape

    """ Filling array with truth values according to formation """
    for x,y in map(lambda x: x - array([minx - 1,miny - 1]), formation):
        form[x,y] = 1

    form = form == 1

    """ Paint-like fill operation on 0,0, sets them at 0 """
    field = fill(field, form)

    """ Will perform a reduction operation on each square with value of target"""
    """ It will add 1 to the square if all it's neighbors also have the value of target"""
    """ This results in a height/distance-from-border map type situation"""
    target = 0
    while target != amax(field):
        target += 1
        field = reduce(field, target)
    
    m = amax(field)

    outformation = []

    """ Recreates formation as a tuple of coordinates and the height/distance"""
    for i in iterprod(arange(0,field.shape[0]),arange(0,field.shape[1])):
        if form[i]:
            outformation.append((m - field[i],array(i)))

    """ Sorts the order based on lexicographical ordering and height/distance"""
    outformation = sorted(outformation, key=lambda x: x[1][0] + x[1][1]*fmaxx + x[0]*fmaxx*fmaxy)

    """ Restore Origin"""
    return [(x[0], x[1] + array([minx -1, miny -1])) for x in outformation]
Example #12
0
def wordbreak(s):
    # print(f's: "{s}"')
    s = s.replace("'", "").replace('"', '')
    s = s.lower()
    if s in arpabet:
        return arpabet[s]
    middle = len(s) / 2
    partition = sorted(list(range(len(s))), key=lambda x: (x - middle)**2 - x)
    # print(f'partition: "{partition}"')
    for i in partition:
        pre, suf = (s[:i], s[i:])
        if pre in arpabet and wordbreak(suf) is not None:
            return [x + y for x, y in iterprod(arpabet[pre], wordbreak(suf))]
    return None
Example #13
0
 def requires(self):
     dates = utils.get_valid_dates(self.start, self.end)  # Add Mods Product
     if self.edos:
         munis = pu.get_munis(self.edos)
         for cutoff, cve_muni in iterprod(self.cutoffs, munis):
             yield CreateCluster(dates=dates,
                                 cutoff=cutoff,
                                 cve_muni=cve_muni)
     elif cve_muni:
         for cutoff in self.cutoffs:
             yield CreateCluster(dates=dates,
                                 cutoff=cutoff,
                                 cve_muni=self.cve_muni)
     else:
         pass
Example #14
0
def wordbreak(s):
    try:
        arpabet = nltk.corpus.cmudict.dict()
    except LookupError:
        nltk.download('cmudict')
        arpabet = nltk.corpus.cmudict.dict()
    s = s.lower()
    if s in arpabet:
        return arpabet[s]
    middle = len(s) / 2
    partition = sorted(list(range(len(s))), key=lambda x: (x - middle)**2 - x)
    for i in partition:
        pre, suf = (s[:i], s[i:])
        if pre in arpabet and wordbreak(suf) is not None:
            return [x + y for x, y in iterprod(arpabet[pre], wordbreak(suf))]
    return None
Example #15
0
    def __init__(self, fracs):
        self.fracs = fracs
        self.nfracs = len(fracs)

        # load figure and gridspec object
        self.gs, self.fig = gen_gridspec_fig(
            self.nfracs, spsize=(2.5, 1.))
        self.subplot_inds = list(iterprod(range(self.gs._nrows), range(self.gs._ncols)))

        self.fracs_plot_ix = dict(zip(sorted(fracs), self.subplot_inds))
        self.fig_axes = {f: self.fig.add_subplot(self.gs[ix[0], ix[1]])
                         for f, ix in self.fracs_plot_ix.items()}

        for f_, ax_ in self.fig_axes.items():
            ax_.tick_params(labelsize='x-small')
            ax_.set_xscale('log')
            ax_.set_xlim([.1, 200.])
            ax_.text(x=.2, y=.1, s='f = {:.2f}'.format(f_), size='x-small')

        self.fig.suptitle('SNR vs PDF population', size='x-small')
Example #16
0
def test_xrandom_iterprod():
    # just some exhaustive one
    all_12ab = [[1, 'a'], [1, 'b'], [2, 'a'], [2, 'b']]
    assert_equal(sorted(xrandom_iterprod(10, [1, 2], 'ab')), all_12ab)

    # Let's do a few of some long ones, random ones and verify that come out correctly
    for i in xrange(10):
        ns = random.randint(1, 5)
        seqs = [range(random.randint(1, 5)) for i in range(ns)]
        all_prods = set(map(tuple, iterprod(*seqs)))

        for count in [random.randint(0, 8) for i in range(3)]:
            real_count = min(count, np.prod(map(len, seqs)))
            r_prods = set(map(tuple, xrandom_iterprod(count, *seqs)))
            assert_equal(len(r_prods), real_count)

            # assert that they are all unique
            assert_equal(len(set(r_prods)), real_count)
            # assert that they are all a part of all prods
            assert all_prods.issuperset(r_prods)
Example #17
0
def test_xrandom_iterprod():
    # just some exhaustive one
    all_12ab = [[1, 'a'], [1, 'b'], [2, 'a'], [2, 'b']]
    assert_equal(sorted(xrandom_iterprod(10, [1, 2], 'ab')), all_12ab)

    # Let's do a few of some long ones, random ones and verify that come out correctly
    for i in xrange(10):
        ns = random.randint(1, 5)
        seqs = [range(random.randint(1, 5)) for i in range(ns)]
        all_prods = set(map(tuple, iterprod(*seqs)))

        for count in [random.randint(0, 8) for i in range(3)]:
            real_count = min(count, np.prod(map(len, seqs)))
            r_prods = set(map(tuple, xrandom_iterprod(count, *seqs)))
            assert_equal(len(r_prods), real_count)

            # assert that they are all unique
            assert_equal(len(set(r_prods)), real_count)
            # assert that they are all a part of all prods
            assert all_prods.issuperset(r_prods)
Example #18
0
def rec_int(f, ranges, max_num_evals):
    """Evaluates the integral of the function 'f' over the coordinate ranges
    indicated by 'ranges'. Uses the rectangle rule and no more than
    'max_num_evals' evaluations."""

    from itertools import product as iterprod
    from math import pow, floor, fsum

    num_dims = len(ranges)
    points_per_dim = floor(pow(max_num_evals, 1.0 / num_dims))
    num_evals = points_per_dim**num_dims

    coord_ranges = [[lo + (hi - lo) / (2 * points_per_dim) +\
                     ((hi - lo) / points_per_dim) * i
                     for i in range(points_per_dim)]
                    for lo, hi in ranges]

    accum = fsum(f(x) for x in iterprod(*coord_ranges))
    volume = mul(hi - lo for lo, hi in ranges)

    return accum * volume / num_evals
Example #19
0
def rec_int(f, ranges, max_num_evals):
    """Evaluates the integral of the function 'f' over the coordinate ranges
    indicated by 'ranges'. Uses the rectangle rule and no more than
    'max_num_evals' evaluations."""

    from itertools import product as iterprod
    from math import pow, floor, fsum
    
    num_dims = len(ranges)
    points_per_dim = floor(pow(max_num_evals, 1.0 / num_dims))
    num_evals = points_per_dim ** num_dims
    
    coord_ranges = [[lo + (hi - lo) / (2 * points_per_dim) +\
                     ((hi - lo) / points_per_dim) * i
                     for i in range(points_per_dim)]
                    for lo, hi in ranges]

    accum = fsum(f(x) for x in iterprod(*coord_ranges))
    volume = mul(hi - lo for lo, hi in ranges)

    return accum * volume / num_evals
Example #20
0
def get_raw_hjortevillt_df():
    """Get the data from hjorteviltregisteret
    """
    animals = [
        'Elg',
        'Hjort']

    data_sets = [
        'Sett',
        'Felt',
        'Slaktevekt',
    ]
    # download the data and turn each table into an individual frame
    with ThreadPool(6) as pool:
        frames = pool.map(download_from_hjortevilltregisteret,
                          iterprod(animals, data_sets))
    # join the frames into one single large frame
    df = pd.concat([frames[0].iloc[:, :3]]
                   + [i.iloc[:, 3:] for i in frames],
                   axis=1)
    df.to_csv(RAW_FILENAME, index=False)
    return df
Example #21
0
    def generate_order_formation(self, n):
        n = n + 1
        mindist = maxsize
        for i in range(1,n):
            if abs(floor(n/i + 1) - (i*2 -1)) < mindist:
                mindist = abs(floor(n/i + 0.5) - (i*2 -1))
            else:
                y = floor(n/(i - 1) + 1)
                x = i - 1
                break

        l = list(array(x) for x in iterprod(arange(-1*floor(y/2),floor(y/2)+1),
                                               arange(-1*floor((x*2-1)/2), floor((x*2-1)/2)+1, 2)))

        for i in range(0,len(l)):
            if array_equal(l[i],array([0,0])):
                del l[i]
                break

        n = n - 1

        return l[0:n]
Example #22
0
    def drawocclusionshading(self, cc, state):
        if self.selectedbee is not None:
            pos = next((x[0] for x in state if x[1] is self.selectedbee), None)
            positions, bees, movement, communication = map(list, zip(*state))

            xmin,ymin = self.fi(array([0,0]))
            xmax,ymax = self.fi(array([1,1]))

            for square in iterprod(range(int(xmin)-2,int(xmax)+2),
                                        range(int(ymin)-2,int(ymax)+2)):
                square = array(square)

                if not lineofsight(pos, square, positions):
                    x,y = self.f(square)
                    
                    cc.save()
                    cc.translate(x,y)
                    cc.scale(1/self.worldsize[0], 1/self.worldsize[1])

                    cc.rectangle(-0.5,-0.5,1,1)
                    
                    cc.restore()
                    cc.set_source_rgb(0.9, 0.9, 0.9)
                    cc.fill()
            _rows[0][1 + len(all_methods) * i_roomkind] = f'room{room} ({kind})'
        _rows[1] = [''] + all_methods * len(all_rooms) * len(all_kinds)
        for metric in all_metrics:
            _row = [f'Δ{metric}' if method_rev == 'delta' else metric]
            for room, kind, method in iterprod(all_rooms, all_kinds, all_methods):
                _row.append(stats.get(Metadata(method, room, kind, metric), '-'))
            _rows.append(_row)
        return _rows

    path_csv = (path_root / fname_result).with_suffix('.csv')
    with path_csv.open('w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        rows = []
        for method in all_methods:
            rows.append([method])
            for room, kind, (i, metric) in iterprod(all_rooms, all_kinds, enumerate(all_metrics)):
                values = all_arrays.get(Metadata(method, room, kind, metric), ['-'])
                rows.append([
                    f'room{room} ({kind})' if i == 0 else '',
                    f'Δ{metric}' if method_rev == 'delta' else metric,
                    *values,
                ])

        rows += [['means'], *make_rows_for_stats(all_means)]
        rows += [['stds'], *make_rows_for_stats(all_stds)]
        for r in rows:
            writer.writerow(r)


# %% plotting functions
Example #24
0
def main(task_info, paths):
    print """
    """

    task = {
        'KIND_RUN' : 'TONAME',
        'FILE_INIT': 'GENERATOR_GLOBAL',
        'LENGTH_FS': 3,
        'INITIALIZATION': 'DIABATIC',
        'NUMBER_CONFIG'        : 1,
        'NUMBER_REPEAT'  :  1,
        'LIGHT' : False,
        'FIRST_MOL_CHAIN' : [1, 1, 1],
        'LAST_MOL_CHAIN'  : [2, 1, 1]
    }
    task_info.update(task)


    cp2k_param = [
        #[ 'PROPAGATION', 'FSSH'],
        ['PROPAGATION', 'FSSH'],
        #['DECO', 'NO_DECO_CORR','INSTANT_COLLAPSE','DAMPING']
        [ 'DECO', 'DAMPING'],
        #['METHOD_RESCALING','NACV','SIMPLE_QSYS']
        [ 'METHOD_RESCALING', 'NACV'],
        #[ 'METHOD_ADIAB_NACV', 'FAST','TOTAL']
        [ 'METHOD_ADIAB_NACV', 'FAST'],
        #['METHOD_REVERSAL', 'NEVER', 'ALWAYS', 'TRUHLAR', 'SUBOTNIK']
        [ 'METHOD_REVERSAL', 'ALWAYS'],
        #[ 'SCALING', 0.05, 0.03, 0.01, 0.008, 0.005, 0.003, 0.001, 0.0005, 0.0001, 0.00005],
        [ 'SCALING', 0.00005],
        #['SCALING', 0.003, 0.00005],
        #[ 'TIMESTEP', 0.01, 0.05, 0.1, 0.5],
        [ 'TIMESTEP', 0.1],
        #['EDC_E0', 0.01, 0.1, 1.0],
        ['EDC_E0', 0.1],
        #['ELECTRONIC_STEPS', 5, 10, 50],
        ['ELECTRONIC_STEPS', 5],
        [ 'TEMPLATE_FILE', 'FSSH_CORE.template'],
        [ 'FORCEFIELD_FILE', 'FSSH_FF.template'],
        ['INITIALIZATION', 'DIABATIC'],
        ['INIT'] + range(1, task_info.get('NUMBER_CONFIG') + 1),
        ['REPEAT'] + range(task_info.get('NUMBER_REPEAT'))
    ]

    # BUILD THE MEGA_LISTS
    second_list = [ sublist[1:] for sublist in cp2k_param]
    total_list  = list(iterprod(*second_list))
    mega_list = []
    for sublist in total_list:
        subdict = {}
        for index in range(len(sublist)):
            subdict.update({
                cp2k_param[index][0] : sublist[index]
            })
        mega_list.append(subdict)


    # SET_UP THE DIRECTORY, CHECK ANY SUBDIR IS PRESENT
    bucket = Bucket(task_info)
    bucket.name()
    paths.update({'bucket': bucket.path})

    task = Dir(task_info.get('INPUT_INFO'))
    paths.update( {'task' : task.path} )

    templates = Dir('templates', paths)
    templates.checkdir()
    templates.clean()

    supinitial = Dir('initial')
    supinitial.checkdir()

    bin = Dir('bin', paths)
    bin.checkdir()


    # PREPARE THE MEGA_LIST FOR POOL
    for ndir in range(len(mega_list)):
        mega_list[ndir].update({ 'NDIR' : ndir,
                                 'PATHS_DICT' : paths,
                                 'INPUTS_DICT' : task_info
                                 })


    # RUN THE CALCULATIONS, SERIE OR PARALLEL ACCORDING TO THE NWORKER VARIABLE
    nworker = task_info['NWORKER']
    if nworker == 0:
        for cp2k_info in mega_list:
            run_fssh(cp2k_info)
    else:
        from multiprocessing import Pool, cpu_count
        if nworker == -1:
            nworker = cpu_count()
        pool = Pool(nworker)
        pool.map( run_fssh, mega_list)
def formations():
    formations = []
    
    formations.append(list(iterprod(arange(0,3),arange(0,3))))
    
    formations.append(list(iterprod(arange(0,4),arange(0,4))))
    
    f = list(iterprod(arange(0,5),arange(0,5)))
    for x in [18, 17, 16, 13, 11, 8, 7, 6 ]:
        del f[x]
    formations.append(f)
    
    formations.append(list(iterprod(arange(0,8,2),arange(0,8,2))))
    
    f = list(iterprod(arange(0,3),arange(0,3)))
    f = f + list(iterprod(arange(8,11),arange(5,8)))
    formations.append(f)

    f = list(iterprod(arange(0,3),arange(0,3)))
    f = f + list(iterprod(arange(1,4),arange(1,4)))
    for x in [14, 13, 11, 10]:
        del f[x - 1]
    formations.append(f)

    i = 101
    random.seed(i)
    formations.append(random.sample(
        list(iterprod(arange(0,5),arange(0,5)))
        , 10))
    i += 1

    random.seed(i)
    formations.append(random.sample(
        list(iterprod(arange(0,10),arange(0,10)))
        , 10))
    i +=1
    
    random.seed(i)
    formations.append(random.sample(
        list(iterprod(arange(0,10),arange(0,10)))
        , 15))
    i += 1

    random.seed(i)
    formations.append(random.sample(
        list(iterprod(arange(0,15),arange(0,15)))
        , 15))
    i +=1
    
    
    return formations
Example #26
0
def iter_bin_vec(n):
    for i, z in enumerate(iterprod(*[[0, 1]] * n)):
        yield z
Example #27
0
    def plot_all_predictions(self, path_stem):
        a_dict = {}
        params_path = path_stem + ".params"
        with open(params_path,'r') as f:
            for step, line in enumerate(f.readlines()):
                a_dict[step] = literal_eval(line)
        #print(a_dict)

        # For the moment, if a list of years is given, we are only looking at the first year.
        year_path = f"{self.vis_target}/{a_dict[0]['YEAR'][0]}"

        def make_month_path(base_path, month, dict0, seed):
            suffix = ""
            if dict0["SUPER"]:
                suffix += "-LvlUp"
            if dict0["BUFFER"]:
                suffix += f"-{dict0['BUFFER']}meter"
            if dict0["USE_PCA"]:
                suffix += "-PCA"
            if dict0["VALIDATE"]:
                suffix += f"-{dict0['VALIDATE'] - 1:.2f}_{seed}"
            return f"{base_path}/month{month}{suffix}"

        month_paths = {}
        for month in a_dict[1]:
            month_paths[month] = make_month_path(year_path, month, a_dict[0], a_dict[1][month]["seed"])
        #print(month_paths)

        region_paths = {}
        for month in month_paths:
            region_paths[month] = {}
            for region in a_dict[0]["REG_LIST"]:
                reg_string = "_".join(region)
                region_paths[month][reg_string] = f"{month_paths[month]}/{reg_string}"
        #print(region_paths)

        method_paths = {}
        for month in a_dict[1]:
            print(f"month: {month}")
            method_paths[month] = {}
            reg_list = f"{path_stem}.{month}reg"
            with open(reg_list, 'r') as regions:
                for line in regions.readlines():#region in a_dict[0]["REG_LIST"]:
                    region = "_".join(line.strip().split(","))
                    #print(f"region: {region}")
                    method_paths[month][region] = {}
                    image_origen = f"{region_paths[month][region]}/{SUB_FIGS}/{SUB_PRED}/origen.png"
                    display(Image(filename=image_origen))
                    for method in a_dict[0]["MODICT"]:
                        #print(f"method: {method}")
                        paramdict = a_dict[0]["MODICT"][method]
                        params = paramdict.keys()
                        argums = paramdict.values()
                        arg_combos = iterprod(*argums)
                        # Don't print the following! It exhausts the generator.
                        ##print(f"The possible combinations of arguments are: {list(arg_combos)}")
                        bash_suffixes = [zip(params, ac) for ac in arg_combos]
                        file_suffixes = ["".join([f"{par}{arg}" for par, arg in bashix]) for bashix in bash_suffixes]
                        for suffix in file_suffixes:
                            image_path = f"{region_paths[month][region]}/{SUB_FIGS}/{SUB_PRED}/{method}{suffix}-plot.png"
                            if os.path.exists(image_path):
                                #print(image_path)
                                display(Image(filename=image_path))
                                with open(f"{region_paths[month][region]}/{SUB_PRED}/{method}{suffix}.log", 'r') as f:
                                    print("Method completion time:", f.readlines()[-1])
Example #28
0
def main(task_info, paths):
    print """
    """

    task = {
        'KIND_RUN': 'TONAME',
        'FILE_INIT':
        'TASK279-SAMPLE-BO-CORRECT-TEMP-100ps-20dabab-170601-8da556911f813378f0577abfd206e148',
        'FILE_DICT':
        'TASK279-SAMPLE-BO-CORRECT-TEMP-100ps-20dabab-170601-8da556911f813378f0577abfd206e148-1706121812',
        'LENGTH_FS': 1,
        'INITIALIZATION': 'ADIABATIC',
        'NUMBER_ADIABAT': 2,
        'NUMBER_CONFIG': 100,
        'FIRST_CONFIG': 200,
        'FINAL_CONFIG': 500,
        'NUMBER_REPEAT': 10,
        'LIGHT': True
    }
    task_info.update(task)

    seed()
    cp2k_param = [
        ['PROPAGATION', 'FSSH'],
        #['TRIVIAL', 'TRIVIAL_HOP_CORRECT','UNMODIFIED_SURF_HOP']
        ['TRIVIAL', 'TRIVIAL_HOP_CORRECT', 'UNMODIFIED_SURF_HOP'],
        # ['DECO', 'NO_DECO_CORR','INSTANT_COLLAPSE','DAMPING']
        ['DECO', 'DAMPING'],
        #['METHOD_RESCALING','NACV','SIMPLE_QSYS']
        ['METHOD_RESCALING', 'NACV'],
        #[ 'METHOD_ADIAB_NACV', 'FAST','TOTAL']
        ['METHOD_ADIAB_NACV', 'FAST'],
        #['METHOD_REVERSAL', 'NEVER', 'ALWAYS', 'TRUHLAR', 'SUBOTNIK']
        ['METHOD_REVERSAL', 'ALWAYS'],
        #[ 'SCALING', 0.05, 0.03, 0.01, 0.008, 0.005, 0.003, 0.001, 0.0005, 0.0001, 0.00005],
        #[ 'SCALING', 0.03, 0.02, 0.01, 0.008, 0.003, 0.0005, 0.00005],
        ['SCALING', 0.03],
        #['SCALING', 0.003, 0.00005],
        #[ 'TIMESTEP', 0.01, 0.05, 0.1, 0.5],
        ['TIMESTEP', 0.5],
        #['EDC_E0', 0.01, 0.1, 1.0],
        ['EDC_E0', 0.1],
        #['ELECTRONIC_STEPS', 5, 10, 50],
        ['ELECTRONIC_STEPS', 5],
        ['TEMPLATE_FILE', 'FSSH_CORE.template'],
        ['FORCEFIELD_FILE', 'FSSH_FF.template'],
        ['INITIALIZATION', 'ADIABATIC'],
        ['INIT_CONFIG'] +
        [(ind + 1,
          range(task_info['FIRST_CONFIG'], task_info['FINAL_CONFIG'],
                (task_info['FINAL_CONFIG'] - task_info['FIRST_CONFIG']) /
                task_info['NUMBER_CONFIG'])[ind])
         for ind in range(task_info['NUMBER_CONFIG'])],
        ['REPEAT'] + range(task_info.get('NUMBER_REPEAT')),
    ]

    print "How many run?", len(cp2k_param)

    dict_for_equilibrium = {}
    with open(
            'initial/result-thermodynamics-population-extract-scaling-adiabat-%s/Simulation-1.dat'
            % task_info['FILE_DICT']) as file_dict:
        for line in file_dict.readlines()[1:]:
            if '#' in line:
                pass
            else:
                dict_for_equilibrium[float(
                    line.split()[0])] = [float(x) for x in line.split()[1:]]

    print dict_for_equilibrium

    # BUILD THE MEGA_LISTS
    second_list = [sublist[1:] for sublist in cp2k_param]
    total_list = list(iterprod(*second_list))
    mega_list = []
    for sublist in total_list:
        subdict = {}
        for index in range(len(sublist)):
            subdict.update({cp2k_param[index][0]: sublist[index]})
        mega_list.append(subdict)

    # SET_UP THE DIRECTORY, CHECK ANY SUBDIR IS PRESENT
    bucket = Bucket(task_info)
    bucket.name()
    paths.update({'bucket': bucket.path})

    task = Dir(task_info.get('INPUT_INFO'))
    paths.update({'task': task.path})

    templates = Dir('templates', paths)
    templates.checkdir()
    templates.clean()

    supinitial = Dir('initial')
    supinitial.checkdir()

    bin = Dir('bin', paths)
    bin.checkdir()

    # PREPARE THE MEGA_LIST FOR POOL
    for ndir in range(len(mega_list)):
        mega_list[ndir].update({
            'NDIR': ndir,
            'PATHS_DICT': paths,
            'INPUTS_DICT': task_info,
            'DICT_EQUILIBRIUM': dict_for_equilibrium
        })

    # RUN THE CALCULATIONS, SERIE OR PARALLEL ACCORDING TO THE NWORKER VARIABLE
    nworker = task_info['NWORKER']
    if nworker == 0:
        for cp2k_info in mega_list:
            run_fssh(cp2k_info)
    else:
        from multiprocessing import Pool, cpu_count
        if nworker == -1:
            nworker = cpu_count()
        pool = Pool(nworker)
        pool.map(run_fssh, mega_list)
    with (path_root / fname).open('rb') as f:
        dict_pickled = pickle.load(f)

    rooms = set()
    kinds = set()
    for key, value in dict_pickled.items():
        _, room, kind, metric = key
        rooms.add(room)
        kinds.add(kind)
        all_metrics.add(metric)

    for metric in all_metrics:
        means = []
        for i_method, method in enumerate(all_methods):
            array = []
            for room, kind in iterprod(rooms, kinds):
                array += dict_pickled[method, room, kind, metric]
            means.append(np.mean(array))

        if metric not in all_means:
            all_means[metric] = dict()
        all_means[metric][phasetype] = np.array(means)

# %% draw bar plots

plt.style.use('default')
plt.rc('font', family='Arial', size=18)

figs = []
for metric, means in all_means.items():
    fig: plt.Figure = None
Example #30
0
def model(REGION, TRAIN_DIR, EVAL_DIR, OUT_DIR, MODELS, NOTE):

    TRAVIS_MODEL = pathlib.Path("hypppo7.py").resolve()
    MARIO_MODEL = pathlib.Path("2b-kknn.R").resolve()
    RF_MODEL = pathlib.Path("2c-rf.R").resolve()

    # If 0 is given instead of a specified region,
    # create list of all regions in the TRAIN_DIR
    if not REGION:
        REGIONS = listdir(TRAIN_DIR)
    else:
        REGIONS = [REGION]
    #print(REGIONS)

    # Prepare the list of parameter combinations for each model
    suffixes = {}
    for MODEL in MODELS:
        paramdict = MODELS[MODEL]
        params = paramdict.keys()
        argums = paramdict.values()
        arg_combos = iterprod(*argums)
        # Don't print the following! It exhausts the generator.
        ##print(f"The possible combinations of arguments are: {list(arg_combos)}")
        bash_suffixes = [list(zip(params, ac)) for ac in arg_combos]
        file_suffixes = [
            "".join([f"{par}{arg}" for par, arg in bashix])
            for bashix in bash_suffixes
        ]
        suffixes[MODEL] = list(zip(bash_suffixes, file_suffixes))
    for REGION in REGIONS:
        # Specify train and predi files
        TR = TRAIN_DIR.joinpath(REGION)
        EV = EVAL_DIR.joinpath(REGION)

        # Create an output folder for the region, with predictions and logs subfolders
        OUT = OUT_DIR.joinpath(REGION).with_suffix("")
        PRED = OUT.joinpath(SUB_PRED)
        if not PRED.is_dir():
            PRED.mkdir(parents=True)

        for MODEL in MODELS:

            # The following has been extracted outside the REGIONS loop for a one-time creation
            #paramdict = MODELS[MODEL]
            #params = paramdict.keys()
            #argums = paramdict.values()
            #arg_combos = iterprod(*argums)
            ## Don't print the following! It exhausts the generator.
            ###print(f"The possible combinations of arguments are: {list(arg_combos)}")
            #
            #for ac in arg_combos:
            #    file_name = f"{MODEL}"#{NOTE}"
            #    bash_suffix = []
            #    for p, a in zip(params, ac):
            #        file_name += f"{p}{a}"
            #        bash_suffix.extend([p, a])

            for bash_suf, file_suf in suffixes[MODEL]:

                t0 = time()

                # Specify paths of output files
                file_name = MODEL + file_suf
                PRD = PRED.joinpath(f"{file_name}.csv")
                LOG = PRED.joinpath(f"{file_name}.log")

                # Open the log file and start writing
                with open(LOG, "w") as log:
                    log.write(f"t0={t0}\n")
                    log.write(f"Prediction file: {PRD}\n")
                    log.write(f"Log file: {LOG}\n")
                    log.write(f"{REGION} {MODEL}\n")
                    log.write(f"bash_suffix: {bash_suf}\n")

                if (MODEL in ["HYPPO", "KNN", "SBM"]):
                    bash_args = [
                        TRAVIS_MODEL, "-t", TR, "-e", EV, "-m", MODEL, "-o",
                        PRD, "-l", LOG
                    ]
                elif (MODEL == "1NN"):
                    bash_args = [
                        TRAVIS_MODEL, "-t", TR, "-e", EV, "-m", "KNN", "-o",
                        PRD, "-s", 0, "-S", "1,2", "-v", 2, "-k", 1, "-l", LOG
                    ]
                elif (MODEL == "KKNN"):
                    bash_args = [
                        MARIO_MODEL, "-t", TR, "-e", EV, "-o", PRD, "-l", LOG
                    ]
                elif (MODEL == "RF"):
                    bash_args = [
                        RF_MODEL, "-t", TR, "-e", EV, "-o", PRD, "-l", LOG
                    ]
                else:
                    print(f"Model \"{MODEL}\" unknown!")
                    bash_args = ["echo", f"Model \"{MODEL}\" unknown!"]

                for bashix in bash_suf:
                    bash_args.extend(bashix)
                with open(LOG, "a") as log:
                    log.write(f"bash_args: {bash_args}\n")
                bash(bash_args)

                t1 = time()
                with open(LOG, "a") as log:
                    log.write(f"t1={t1}\n")
                    log.write(f"t={t1 - t0}\n")
Example #31
0
def main(task_info, paths):

    task = {
        'KIND_RUN'  : 'TASK234-SAMPLE-BO',
        'INITIALIZATION' : 'SAMPLE_BO',
        'NUMBER_CONFIG'     : 1,
        'FILE_INIT' : {
            1 :'config-GENERATOR_GLOBAL',
            2 :'config-GENERATOR_GLOBAL',
            3 :'config-GENERATOR_GLOBAL'
        },
        'LENGTH_FS': 100000,
        'OUTPUT_CONFIG' : 1
            }
    task_info.update(task)


    cp2k_param = [
        [ 'PROPAGATION', 'BORN_OPPENHEIMER'],
        [ 'INIT'] + range(1, task_info.get('NUMBER_CONFIG') + 1),
        ['SCALING', 0.00005],
        #[ 'SCALING', 0.05, 0.03, 0.02, 0.01, 0.008, 0.005, 0.003, 0.001, 0.0005, 0.0001, 0.00005],
        [ 'TIMESTEP', 0.1],
        [ 'TEMPLATE_FILE', 'FSSH_CORE.template'],
        [ 'FORCEFIELD_FILE', 'FSSH_FF.template'],
        ['FIRST_ADIABAT',1,2,3],
        ['DENSITY', 0.001],
        ['CC_CHARGED', 1.369],
        ['INITIALIZATION', 'ADIABATIC']
    ]


    # BUILD THE MEGA_LISTS
    second_list = [ sublist[1:] for sublist in cp2k_param]
    total_list  = list(iterprod(*second_list))
    mega_list = []
    for sublist in total_list:
        subdict = {}
        for index in range(len(sublist)):
            subdict.update({
                cp2k_param[index][0] : sublist[index]
            })
        mega_list.append(subdict)



    # SET_UP THE DIRECTORY, CHECK ANY SUBDIR IS PRESENT
    bucket = Bucket(task_info)
    bucket.name()
    paths.update({'bucket': bucket.path})

    task = Dir(task_info.get('INPUT_INFO'))
    paths.update( {'task' : task.path} )

    templates = Dir('templates', paths)
    templates.checkdir()
    templates.clean()

    bin = Dir('bin', paths)
    bin.checkdir()

    output_total = Dir('output')
    output_total.rm_mkdir()


    # PREPARE THE MEGA_LIST FOR POOL
    for ndir in range(len(mega_list)):
        mega_list[ndir].update({ 'NDIR' : ndir,
                                 'PATHS_DICT' : paths,
                                 'INPUTS_DICT' : task_info
                                 })


    # RUN THE CALCULATIONS, SERIE OR PARALLEL ACCORDING TO THE NWORKER VARIABLE
    nworker = task_info['NWORKER']
    if nworker == 0:
        for cp2k_info in mega_list:
            run_fssh(cp2k_info)
    else:
        from multiprocessing import Pool, cpu_count
        if nworker == -1:
            nworker = cpu_count()
        pool = Pool(nworker)
        pool.map( run_fssh, mega_list)
Example #32
0
            'density': [150],
            'coh_mean': [40],
            'coh_stdev': [10],
            'direction': ['left'],
            'num_frames': [24],
            'diameter': [8],
            'cp_time': [.2]
        })

        num_trials = 100

        # edit params so that shorter entries are recycled

        # I found inspiration here: https://stackoverflow.com/a/12913336
        dummy_list = list(params.values())
        combinations = list(iterprod(*dummy_list))
        num_comb = len(combinations)

        import pprint

        for dset_idx in range(num_comb):
            curr_dict = {k: combinations[dset_idx][ix] for ix, k in enumerate(params.keys())}
            S = DotsStimulus(**curr_dict)

            # only create the file at first iteration
            write_stimulus_to_file(S, num_trials, file_name, create_file=file_does_not_exist)

        print("--- {} seconds ---".format(time.time() - start_time))
        
        pprint.pprint(inspect_db(file_name), width=120)
Example #33
0
for init in info['FILE_INIT']:
    system_info = (InputFile('%s/system.info' % init).dict)
    system_info.update({'AOM_RADIUS': info['AOM_RADIUS']})
    system_info.update({'FILE_INIT': init})
    system_info = add_list_activated(system_info, init)
    list_config_init = range(
        0, system_info['NPROD_INIT'],
        system_info['NPROD_INIT'] / system_info['NCONFIG_INIT'])
    cp2k_param_here = list(cp2k_param)
    cp2k_param_here.append(['INIT'] + [
        list_config_init[x]
        for x in range(0, len(list_config_init),
                       len(list_config_init) / info['NUMBER_CONFIG'])
    ])
    second_list = [sublist[1:] for sublist in cp2k_param_here]
    total_list = list(iterprod(*second_list))
    for sublist in total_list:
        subdict = {}
        for index in range(len(sublist)):
            subdict.update({cp2k_param_here[index][0]: sublist[index]})
        subdict.update(system_info)
        subdict.update({'ARCHER': archer})
        mega_list.append(subdict)
for ndir in range(len(mega_list)):
    mega_list[ndir].update({
        'NDIR': ndir,
        'PATHS_DICT': paths,
        'INPUTS_DICT': info
    })

# RUN THE CALCULATIONS, SERIE OR PARALLEL ACCORDING TO THE NWORKER VARIABLE
                                                    'list_fname'),
                                    chars_as_strings=True,
                                    squeeze_me=True)
        flist_speech = metadata_ref['path_all_speech']
        flist_speech = [Path(p.rstrip()) for p in flist_speech]
        n_speech = len(flist_speech)
        list_fname_ref = metadata_ref['list_fname']
        list_feature: List[Tuple] = list_fname_to_feature(list_fname_ref)
        n_feature = len(list_feature)
    else:
        flist_speech = list(path_speech.glob('**/*.WAV')) + list(
            path_speech.glob('**/*.wav'))
        n_speech = len(flist_speech)
        list_feature = [
            (i_speech, hp.room_create, i_loc)
            for i_speech, i_loc in iterprod(range(n_speech), range(n_loc))
        ]

        # uniformly random sample
        if args.kind_data.lower() == 'train':
            n_feature = hp.n_data_per_room
        else:
            n_feature = hp.n_test_per_room
        idx_choice = np.random.choice(len(list_feature),
                                      n_feature,
                                      replace=False)
        idx_choice.sort()
        list_feature: List[Tuple] = [list_feature[i] for i in idx_choice]

    if n_feature < args.from_idx:
        raise ArgumentError
Example #35
0
File: funcs.py Project: eneftci/eCD
def iter_bin_vec(n):
    for i,z in enumerate(iterprod(*[[0,1]]*n)):
        yield z
Example #36
0
    _rows[1] = [''] + all_methods * len(all_kinds)
    for metric in all_metrics:
        _row = [metric]
        for kind, method in iterprod(all_kinds, all_methods):
            _row.append(stats[MethodKindMetric(method, kind, metric)])
        _rows.append(_row)
    return _rows


path_csv = (path_root / fstem_analysis).with_suffix('.csv')
with path_csv.open('w', newline='', encoding='utf-8') as f:
    writer = csv.writer(f)
    rows = []
    for method in all_methods:
        rows.append([method])
        for kind, (i, metric) in iterprod(all_kinds, enumerate(all_metrics)):
            rows.append([
                kind if i == 0 else '',
                metric,
                *all_arrays[MethodKindMetric(method, kind, metric)],
            ])

    rows += [['means'], *make_rows_for_stats(all_means)]
    rows += [['stds'], *make_rows_for_stats(all_stds)]
    for r in rows:
        writer.writerow(r)

# %% plotting functions


def _graph_initialize(means: ndarray, stds: ndarray,