Ejemplo n.º 1
0
Archivo: cli.py Proyecto: coecms/era5
def download(oformat, param, stream, year, month, timestep, back, queue,
             urgent):
    """ 
    Download ERA5 variables, to be preferred 
    if adding a new variable,
    if month argument is not passed 
    then the entire year will be downloaded. 
    By default downloads hourly data in netcdf format.
    \f
    Grid and other stream settings are in the era5_<stream>_<timestep>.json files.
    """
    if back and stream != 'wfde5' and timestep not in ['mon', 'day']:
        print(
            'You can the backwards option only with monthly and some daily data'
        )
        sys.exit()
    valid_format = list(
        iproduct(['tgz', 'zip'], ['cems_fire', 'agera5', 'wfde5']))
    valid_format.extend(
        list(
            iproduct(['netcdf', 'grib'],
                     ['pressure', 'surface', 'land', 'wave'])))
    if (oformat, stream) not in valid_format:
        print(f'Download format {oformat} not available for {stream} product')
        sys.exit()
    if queue:
        dump_args(oformat, stream, list(param), list(year), list(month),
                  timestep, back, urgent)
    else:
        api_request(oformat, stream, list(param), list(year), list(month),
                    timestep, back)
        def _base(j, k, c):

            assert k - j == 1
            aajk = subbasis(j, k)
            assert all(a.order() in (1, p) for a in aajk)
            idxs = [i for i, a in enumerate(aajk) if a.order() == p]

            rs = [([0], [0]) for i in range(len(aajk))]
            for i in range(len(idxs)):
                rs[idxs[i]] = (range(p), [0]) if i % 2 else ([0], range(p))
            if len(idxs) % 2:
                m = ceil(sqrt(p))
                rs[idxs[-1]] = range(0, p, m), range(m)

            tab = {}
            for x in iproduct(*(r for r, _ in rs)):
                key = dotprod(x, aajk)
                if hasattr(key, 'set_immutable'):
                    key.set_immutable()
                tab[key] = vector(x)
            for y in iproduct(*(r for _, r in rs)):
                key = c - dotprod(y, aajk)
                if hasattr(key, 'set_immutable'):
                    key.set_immutable()
                if key in tab:
                    return tab[key] + vector(y)

            raise TypeError('Not in group')
Ejemplo n.º 3
0
def main():
    print('case', 'condition', 'temperature')
    for i, data in enumerate(iproduct(range(1, 4),
                                      map(lambda x: 0.5*x,
                                          range(-1, 2)))):
         print(i + 1, *data)
    return 0
Ejemplo n.º 4
0
def find_interfaces_ntypes(landscape):
    '''Determines internal boundaries for landscapes with many habitat types.

    Parameters
    ----------
    landscape : 2-d array

    Returns
    -------
    Bx, By : tuple of two dicts
        each key is a tuple (i,j) corresponding to the numbers of the habitat
        types, and the value corresponds to the indices where a boundary
        between them appears, along either the x or y direction

    '''
    n = np.unique(landscape).astype(int)
    A = 2**(landscape.astype(int))
    Ax = A[1:, :] - A[:-1, :]
    Ay = A[:, 1:] - A[:, :-1]
    Bx = {}
    By = {}
    for i, j in iproduct(n, repeat=2):
        if i != j:
            Bx[(i, j)] = np.where(Ax == 2**j - 2**i)
            By[(i, j)] = np.where(Ay == 2**j - 2**i)

    return Bx, By
Ejemplo n.º 5
0
def run_landscapes(par,
                   dx,
                   cover,
                   frag,
                   size,
                   runs=1,
                   radius=1,
                   norm='taxicab',
                   processes=7,
                   filename=None):
    if processes is None or processes > 0:
        pool = Pool(processes=processes)
        mymap = pool.map
    else:
        mymap = map

    newpars = []
    for obj in (par, dx, cover, frag, size):
        if not type(obj) is list:
            newpars.append([obj])
        else:
            newpars.append(obj)

    newpars += [[radius], [norm]]

    works = list(iproduct(*newpars))
    # print(works)
    solutions = mymap(analyze_random_landscape, works)

    if filename:
        with open(filename, 'w') as f:
            dump([parameters, solutions], f)

    return solutions
def encode(dic, degree, num_types):
    dic = {k if type(v) in num_types else str(str(k) + "=" + str(v)):
           float(v) if type(v) in num_types else 1
           for k, v in dic.items()}
    
    #print("dic:", dic)
    aux_dic={}

    dic_keys = list(dic.keys())
    for deg in range(2, degree + 1):
        for term_keys in iproduct(dic_keys, repeat=deg):
            term_names, term_facts = [], []
            for k, n in Counter(term_keys).items():
                v = dic[k]
                if type(v) is int and n > 1:
                    break
                #term_names.append(k if n == 1 else str(str(k) + "^" + str(n)))
                #if (type(v) in num_types):
                if (n==1):
                    term_names.append(k)
                else:
                    aux_str=str(k) + "^" + str(n)
                    term_names.append(aux_str)
                        
                    #print("v:", v)
                    #print("n:", n)
                    term_facts.append(v**n)
            else:  # No dummy feature was included more than once
                dic['*'.join(sorted(term_names))] = product(term_facts)
    
    output_dic=dict(chain.from_iterable(d.items() for d in (aux_dic,dic)))
    return output_dic
Ejemplo n.º 7
0
    def make_diag_figure(self, xnames, ynames):
        nobj = len(xnames)

        # initialize subplot size
        gs, fig = ftools.gen_gridspec_fig(nobj,
                                          add_row=False,
                                          border=(0.6, 0.6, 0.2, 0.4),
                                          space=(0.6, 0.35))

        # set up subplot interactions
        gs_geo = gs.get_geometry()

        fgrid_r, fgrid_c = tuple(list(range(n)) for n in gs_geo)
        gs_iter = iproduct(fgrid_r, fgrid_c)

        # set up kwargs for matplotlib errorbar
        # prefer to change color, then marker
        colors_ = ['C{}'.format(cn) for cn in range(10)]
        markers_ = ['o', 'v', 's', 'P', 'X', 'D', 'H']

        # iterate through rows & columns (rows change fastest)
        # which correspond to different quantities
        for (i, (ri, ci)) in enumerate(gs_iter):

            if i >= len(xnames):
                continue

            # choose axis
            ax = fig.add_subplot(gs[ri, ci])
            kwarg_cycler = cycler(marker=markers_) * \
                           cycler(c=colors_)

            xqty = xnames[i]
            yqty = ynames[i]

            # now iterate through results hdulists
            for (j, (result,
                     kwargs)) in enumerate(zip(self.results, kwarg_cycler)):

                kwargs['label'] = result[0].header['PLATEIFU']

                ax = self._add_log_offset_plot(j,
                                               xqty=xqty,
                                               yqty=yqty,
                                               ax=ax,
                                               **kwargs)

                ax.tick_params(labelsize=5)

            if i == 0:
                handles_, labels_ = ax.get_legend_handles_labels()
                plt.figlegend(handles=handles_,
                              labels=labels_,
                              loc='upper right',
                              prop={'size': 4.})

        fig.suptitle('PCA fitting diagnostics', size=8.)

        return fig
Ejemplo n.º 8
0
    def _cross_provider_maps(self, lmap, rmap):
        result = {}
        for lspec, rspec in iproduct(lmap, rmap):
            try:
                constrained = lspec.constrained(rspec)
            except spack.spec.UnsatisfiableSpecError:
                continue

            # lp and rp are left and right provider specs.
            for lp_spec, rp_spec in iproduct(lmap[lspec], rmap[rspec]):
                if lp_spec.name == rp_spec.name:
                    try:
                        const = lp_spec.constrained(rp_spec, deps=False)
                        result.setdefault(constrained, set()).add(const)
                    except spack.spec.UnsatisfiableSpecError:
                        continue
        return result
Ejemplo n.º 9
0
    def _cross_provider_maps(self, lmap, rmap):
        result = {}
        for lspec, rspec in iproduct(lmap, rmap):
            try:
                constrained = lspec.constrained(rspec)
            except spack.spec.UnsatisfiableSpecError:
                continue

            # lp and rp are left and right provider specs.
            for lp_spec, rp_spec in iproduct(lmap[lspec], rmap[rspec]):
                if lp_spec.name == rp_spec.name:
                    try:
                        const = lp_spec.constrained(rp_spec, deps=False)
                        result.setdefault(constrained, set()).add(const)
                    except spack.spec.UnsatisfiableSpecError:
                        continue
        return result
Ejemplo n.º 10
0
    def rpoints(self):
        """Array with the points in real space in reduced coordinates."""
        nx, ny, nz = self.nx, self.ny, self.nz
        rpoints = np.empty((self.size,3))

        for ifft, p1_fft in enumerate(iproduct(range(nx), range(ny), range(nz))):
            rpoints[ifft,:] = p1_fft[0]/nx, p1_fft[1]/ny, p1_fft[2]/nz

        return rpoints
Ejemplo n.º 11
0
def tracetimes(tracer, s, elements, targets, trlist, quiet=True):
    '''
	For a PathTracer tracer, a 3-D Numpy array s representing a slowness, a
	keymap of source coordinates elements, and a list of targets, trace
	indicated paths to determine arrival times.

	If trlist is True, targets is interpreted as a list of (t, r) indices
	into the keymap elements. Otherwise, targets should be a Numpy array
	(or compatible sequence) of shape (N, 3) such that targets[i] contains
	the world coordinates for the i-th target.

	The return value is a Numpy record array of "timetype" records that
	each indicate transmit and receive indices and the corresponding path
	integral.

	Failure to trace a path will cause its time to be NaN.

	If quiet is not True, a progress bar will be printed to show tracing
	progress.
	'''
    if trlist:
        nrec = len(targets)
        itertr = iter(targets)
        targc = elements
    else:
        M, N = len(elements), len(targets)
        nrec = M * N
        itertr = iproduct(range(M), range(N))
        targc = targets

    times = np.zeros((nrec, ), dtype=timetype)

    tracer.set_slowness(s)

    if not quiet: bar = progressbar.ProgressBar(max_value=nrec)
    else: bar = None

    for i, (t, r) in enumerate(itertr):
        src = elements[t]
        rcv = targc[r]

        record = [t, r]

        try:
            tm = tracer.trace([src, rcv], intonly=True)
        except (ValueError, TraceError):
            tm = float('nan')

        record.append(tm)

        times[i] = tuple(record)

        if not quiet: bar.update(i)

    if not quiet: bar.update(nrec)

    return times
Ejemplo n.º 12
0
    def rpoints(self):
        """Array with the points in real space in reduced coordinates."""
        nx, ny, nz = self.nx, self.ny, self.nz
        rpoints = np.empty((self.size,3))

        for ifft, p1_fft in enumerate(iproduct(range(nx), range(ny), range(nz))):
            rpoints[ifft,:] = p1_fft[0]/nx, p1_fft[1]/ny, p1_fft[2]/nz

        return rpoints
Ejemplo n.º 13
0
    def get_rpoints(self):
        nx, ny, nz = self.nx, self.ny, self.nz
        rpoints = np.empty((self.size,3))

        for ifft, p1_fft in enumerate(iproduct(range(nx), range(ny), range(nz))):
            rpoints[ifft,0] = p1_fft[0] / nx
            rpoints[ifft,1] = p1_fft[1] / ny
            rpoints[ifft,2] = p1_fft[2] / nz

        return rpoints
Ejemplo n.º 14
0
    def make_diag_figure(self, xnames, ynames):
        nobj = len(xnames)

        # initialize subplot size
        gs, fig = ftools.gen_gridspec_fig(
            nobj, add_row=False, border=(0.6, 0.6, 0.2, 0.4),
            space=(0.6, 0.35))

        # set up subplot interactions
        gs_geo = gs.get_geometry()

        fgrid_r, fgrid_c = tuple(list(range(n)) for n in gs_geo)
        gs_iter = iproduct(fgrid_r, fgrid_c)

        # set up kwargs for matplotlib errorbar
        # prefer to change color, then marker
        colors_ = ['C{}'.format(cn) for cn in range(10)]
        markers_ = ['o', 'v', 's', 'P', 'X', 'D', 'H']

        # iterate through rows & columns (rows change fastest)
        # which correspond to different quantities
        for (i, (ri, ci)) in enumerate(gs_iter):

            if i >= len(xnames):
                continue

            # choose axis
            ax = fig.add_subplot(gs[ri, ci])
            kwarg_cycler = cycler(marker=markers_) * \
                           cycler(c=colors_)

            xqty = xnames[i]
            yqty = ynames[i]

            # now iterate through results hdulists
            for (j, (result, kwargs)) in enumerate(
                zip(self.results, kwarg_cycler)):

                kwargs['label'] = result[0].header['PLATEIFU']

                ax = self._add_log_offset_plot(
                    j, xqty=xqty, yqty=yqty, ax=ax, **kwargs)

                ax.tick_params(labelsize=5)

            if i == 0:
                handles_, labels_ = ax.get_legend_handles_labels()
                plt.figlegend(
                    handles=handles_, labels=labels_,
                    loc='upper right', prop={'size': 4.})

        fig.suptitle('PCA fitting diagnostics', size=8.)

        return fig
Ejemplo n.º 15
0
    def match_filenames(self, filenames):
        """Update internal queues with passed filenames. Returns names that match across the head of all queues if
        any are found, or an empty list otherwise.
        """
        # insert
        # we assume that usually we'll just be appending to the end - other options
        # include heapq and bisect, but it probably doesn't really matter
        for filename in filenames:
            qname = self.fname_to_qname_fcn(filename)
            if qname is None:
                global_logger.get().warn(
                    "Could not get queue name for file '%s', skipping" %
                    filename)
                continue
            tpname = self.fname_to_timepoint_fcn(filename)
            if tpname is None:
                global_logger.get().warn(
                    "Could not get timepoint for file '%s', skipping" %
                    filename)
                continue
            self.qname_to_queue[qname].append(tpname)
            self.keys_to_fullnames[(qname, tpname)] = filename

        # maintain sorting and dedup:
        for qname, queue in self.qname_to_queue.iteritems():
            if not is_sorted(queue):
                self.qname_to_queue[qname] = deque(
                    unique_justseen(sorted(list(queue))))

        # all queues are now sorted and unique-ified

        # check for matching first entries across queues
        matching = self.get_matching_first_entry()
        matches = []
        dcs = self.do_check_sequence
        while matching:
            if dcs:
                self.check_sequence(matching)
            matches.append(matching)
            matching = self.get_matching_first_entry()

        # convert matches back to full filenames
        fullnamekeys = list(iproduct(self.qname_to_queue.iterkeys(), matches))
        fullnames = [self.keys_to_fullnames.pop(key) for key in fullnamekeys]
        fullnames.sort()

        # filter out files that are smaller than the first file to be added to the queue, if requested
        # this attempts to check for and work around an error state where some files are incompletely
        # transferred
        if self.qname_to_expected_size is not None:
            fullnames = self.filter_size_mismatch_files(fullnames)

        return fullnames
Ejemplo n.º 16
0
    def __str__(self):
        message = ''
        # header
        for var in self.var:
            message = message + 'v' + str(var) + '\t'
        message = message + 'value\n'

        # iterate each
        cards = [range(i) for i in self.card]
        for comb in iproduct(*cards):
            for item in comb:
                message = message + str(item) + '\t'
            message = message + str(self.val[comb]) + '\n'
        return message
Ejemplo n.º 17
0
    def match_filenames(self, filenames):
        """Update internal queues with passed filenames. Returns names that match across the head of all queues if
        any are found, or an empty list otherwise.
        """
        # insert
        # we assume that usually we'll just be appending to the end - other options
        # include heapq and bisect, but it probably doesn't really matter
        for filename in filenames:
            qname = self.fname_to_qname_fcn(filename)
            if qname is None:
                global_logger.get().warn("Could not get queue name for file '%s', skipping" % filename)
                continue
            tpname = self.fname_to_timepoint_fcn(filename)
            if tpname is None:
                global_logger.get().warn("Could not get timepoint for file '%s', skipping" % filename)
                continue
            self.qname_to_queue[qname].append(tpname)
            self.keys_to_fullnames[(qname, tpname)] = filename

        # maintain sorting and dedup:
        for qname, queue in self.qname_to_queue.iteritems():
            if not is_sorted(queue):
                self.qname_to_queue[qname] = deque(unique_justseen(sorted(list(queue))))

        # all queues are now sorted and unique-ified

        # check for matching first entries across queues
        matching = self.get_matching_first_entry()
        matches = []
        dcs = self.do_check_sequence
        while matching:
            if dcs:
                self.check_sequence(matching)
            matches.append(matching)
            matching = self.get_matching_first_entry()

        # convert matches back to full filenames
        fullnamekeys = list(iproduct(self.qname_to_queue.iterkeys(), matches))
        fullnames = [self.keys_to_fullnames.pop(key) for key in fullnamekeys]
        fullnames.sort()

        # filter out files that are smaller than the first file to be added to the queue, if requested
        # this attempts to check for and work around an error state where some files are incompletely
        # transferred
        if self.qname_to_expected_size is not None:
            fullnames = self.filter_size_mismatch_files(fullnames)

        return fullnames
Ejemplo n.º 18
0
    def product(self, other: 'Relation') -> 'Relation':
        '''
        Cartesian product. Attributes of the relations must differ.
        '''

        if (not isinstance(other, Relation)):
            raise Exception('Operand must be a relation')
        if self.header.sharedAttributes(other.header) != 0:
            raise Exception(
                _('Unable to perform product on relations with colliding attributes'
                  ))
        header = Header(self.header + other.header)

        content = frozenset(i + j
                            for i, j in iproduct(self.content, other.content))
        return Relation(header, content)
Ejemplo n.º 19
0
def getFields(src, nm, NL):
    """getFields( src, nm, NL ) -- parse C source src to find 
     definition of struct nm (all fields must be doubles) and
     list the fields. 
     
     The legacy form of this function handles arrays only with the 
     form double foo [NUM_LEGS]. In this case, NL is an integer and
     the entry is expanded to a list of fields foo_0 foo_1, foo_(NL-1)
     
     In its newer form, NL is a dictionary mapping names to tuples of
     integers. The field double bar [BAR], with NL = {'BAR':(2,3)} 
     gives fields bar_0_0, bar_0_1, bar_0_2, bar_1_0, bar_1_1, bar_1_2
     In addition, integers in the brackets are converted to vector
     lengths, i.e. double foo[2] becomes foo_0, foo_1.     
  """
    # Backward compatible NL format
    if type(NL) != dict:
        NL = {'NUM_LEGS': (NL, )}
    sl = getSlice(src, re.compile('\s*struct\s+%s\s*[{].*' % nm),
                  re.compile('\s*[}]\s*[;][^E]*ENDS:\s+struct\s+%s.*' % nm))
    if not sl:
        return None
    sl.pop(0)
    res = []
    rex = re.compile('\s*double\s+(\w+)\s*(?:\[\s*(\w+)\s*\])?.*')
    for item in sl:
        m = rex.match(item)
        if m is None:
            raise ValueError, "Line '%s' cannot be parsed" % item
        var = m.group(1)
        if m.group(2):
            # Try to resolve from NL dictionary
            sz = NL.get(m.group(2), None)
            # If failed --> resolve as integer
            if sz is not None:
                idx = [
                    var + "".join(["_%d" % k for k in x])
                    for x in iproduct(*map(xrange, sz))
                ]
            else:
                sz = int(m.group(2))
                idx = ["%s_%d" % (var, idx) for idx in xrange(sz)]
            res.extend(idx)
        else:
            res.append(var)
    return res
Ejemplo n.º 20
0
def getFields( src, nm, NL ):
  """getFields( src, nm, NL ) -- parse C source src to find 
     definition of struct nm (all fields must be doubles) and
     list the fields. 
     
     The legacy form of this function handles arrays only with the 
     form double foo [NUM_LEGS]. In this case, NL is an integer and
     the entry is expanded to a list of fields foo_0 foo_1, foo_(NL-1)
     
     In its newer form, NL is a dictionary mapping names to tuples of
     integers. The field double bar [BAR], with NL = {'BAR':(2,3)} 
     gives fields bar_0_0, bar_0_1, bar_0_2, bar_1_0, bar_1_1, bar_1_2
     In addition, integers in the brackets are converted to vector
     lengths, i.e. double foo[2] becomes foo_0, foo_1.     
  """
  # Backward compatible NL format
  if type(NL) != dict:
    NL = {'NUM_LEGS' : (NL,) }
  sl = getSlice( src, 
    re.compile('\s*struct\s+%s\s*[{].*' % nm),
    re.compile('\s*[}]\s*[;][^E]*ENDS:\s+struct\s+%s.*' % nm) )
  if not sl:
    return None
  sl.pop(0)
  res = []
  rex = re.compile('\s*double\s+(\w+)\s*(?:\[\s*(\w+)\s*\])?.*')
  for item in sl:
    m = rex.match(item)
    if m is None:
      raise ValueError,"Line '%s' cannot be parsed" % item
    var = m.group(1)
    if m.group(2):
      # Try to resolve from NL dictionary
      sz = NL.get(m.group(2),None)
      # If failed --> resolve as integer
      if sz is not None:
        idx = [ var+"".join(["_%d" % k for k in x]) 
            for x in iproduct(*map(xrange,sz))]
      else:
        sz = int(m.group(2))
        idx = [ "%s_%d" % (var,idx) for idx in xrange(sz) ]
      res.extend(idx)
    else:
      res.append(var)
  return res
Ejemplo n.º 21
0
def encode(dic, degree, num_types):
    dic = {
        k if type(v) in num_types else f'{k}={v}':
        float(v) if type(v) in num_types else 1
        for k, v in dic.items()
    }
    dic_keys = list(dic.keys())
    for deg in range(2, degree + 1):
        for term_keys in iproduct(dic_keys, repeat=deg):
            term_names, term_facts = [], []
            for k, n in Counter(term_keys).items():
                v = dic[k]
                if type(v) is int and n > 1:
                    break
                term_names.append(k if n == 1 else f'{k}^{n}')
                term_facts.append(v**n)
            else:  # No dummy feature was included more than once
                dic['*'.join(sorted(term_names))] = product(term_facts)
    return dic
Ejemplo n.º 22
0
def _distance_2(misorientation, verbose):
    if misorientation.size > 1e4:  # pragma no cover
        confirm = input('Large datasets may crash your RAM.\nAre you sure? (y/n) ')
        if confirm != 'y':
            raise InterruptedError('Aborted')
    from itertools import product as iproduct
    S_1, S_2 = misorientation._symmetry
    mis2orientation = (~misorientation).outer(S_1).outer(S_1).outer(misorientation)
    distance = np.full(misorientation.shape + misorientation.shape, np.infty)
    symmetry_pairs = iproduct(S_2, S_2)
    if verbose:
        from tqdm import tqdm
        symmetry_pairs = tqdm(symmetry_pairs, total=S_2.size ** 2)
    for s_1, s_2 in symmetry_pairs:
        m = s_1 * mis2orientation * s_2
        axis = (len(misorientation.shape), len(misorientation.shape) + 1)
        angle = m.angle.data.min(axis=axis)
        distance = np.minimum(distance, angle)
    return distance
Ejemplo n.º 23
0
def srcint(k, src, obs, cell, ifunc, n = 4, wts = None):
	'''
	Evaluate source integration, of order n, of the pairwise Green's
	function for wave number k from source location src to observer
	location obs. The list cell specifies the dimensions of the cell.

	The pairwise Green's function function ifunc takes arguments (k, s, o),
	where k is the wave number, s is the source location and o is the
	observer location. The source position s varies throughout the cell
	centered at src according to Gauss-Legendre quadrature rules.

	If specified, wts should be an n-by-2 array (or list of lists) in which
	the first column contains the quadrature points and the second column
	contains the corresponding quadrature weights.
	'''

	dim = len(src)

	if len(obs) != dim: raise ValueError('Dimension of src and obs must agree.')
	if len(cell) != dim: raise ValueError('Dimension of src and cell must agree.')

	# Compute the node scaling factor
	sc = [0.5 * c for c in cell]

	# Grab the roots and weights of the Legendre polynomial of order n
	if wts is None: wts = spec.legendre(n).weights

	# Compute a sparse coordinate grid for sampling within the cell
	coords = np.ogrid[[slice(n) for i in range(dim)]]
	# Create an iterator factory to loop through coordinate pairs
	enum = lambda c: iproduct(*[cv.flat for cv in c])

	# Compute the cell-relative quadrature points
	qpts = ([o + s * wts[i][0] for i, o, s in zip(c, src, sc)] for c in enum(coords))

	# Compute the corresponding quadrature weights
	qwts = (cutil.prod(wts[i][1] for i in c) for c in enum(coords))

	# Sum all contributions to the integral
	ival = np.sum(w * ifunc(k, p, obs) for w, p in zip(qwts, qpts))

	return ival * cutil.prod(cell) / 2.**dim
Ejemplo n.º 24
0
    def irottable(self, symmops):
        nsym = len(symmops)
        nx, ny, nz = self.nx, self.ny, self.nz

        red2fft = np.diag([nx, ny, nz])
        fft2red = np.diag([1 / nx, 1 / ny, 1 / nz])

        # For a fully compatible mesh, each mat in rotsm1_fft should be integer
        rotsm1_fft, tnons_fft = np.empty((nsym, 3, 3)), np.empty((nsym, 3))

        for isym, symmop in enumerate(symmops):
            rotm1_r, tau = symmop.rotm1_r, symmop.tau
            rotsm1_fft[isym] = np.dot(np.dot(red2fft, rotm1_r), fft2red)
            tnons_fft[isym] = np.dot(red2fft, tau)

        # Indeces of $R^{-1}(r-\tau)$ in the FFT box.
        irottable = np.empty((nsym, nx * ny * nz), dtype=np.int)

        #max_err = 0.0
        nxyz = np.array((nx, ny, nz), np.int)
        for isym in range(nsym):
            rm1_fft = rotsm1_fft[isym]
            tau_fft = tnons_fft[isym]
            for ifft, p1_fft in enumerate(
                    iproduct(range(nx), range(ny), range(nz))):
                # Form R^-1 (r-\tau) in the FFT basis.
                p1_fft = np.array(p1_fft)
                prot_fft = np.dot(rm1_fft, p1_fft - tau_fft)
                #err = ABS(prot_fft - (ix, iy, iz)) / (nx, ny, nz)
                prot_fft = np.round(prot_fft)
                jx, jy, jz = prot_fft % nxyz
                irottable[isym, ifft] = jz + (jy * nz) + (jx * nx * ny)

        # Test
        #for isym in range(nsym):
        #    irottable[isym, ifft]
        #    rm1_fft = rotsm1_fft[isym]
        #    tau_fft = tnons_fft[isym]
        #    for ifft, p1_fft in enumerate(itertools.product(range(nx), range(ny), range(nz))):
        #        irot_fft == irottable[isym, ifft]

        return irottable
Ejemplo n.º 25
0
    def set_symmetry(self, Gl, Gr, verbose=False):
        """Assign symmetries to this misorientation.

        Computes equivalent transformations which have the smallest angle of
        rotation and assigns these in-place.

        Parameters
        ----------
        Gl, Gr : Symmetry

        Returns
        -------
        Misorientation
            A new misorientation object with the assigned symmetry.

        Examples
        --------
        >>> from orix.quaternion.symmetry import C4, C2
        >>> data = np.array([[0.5, 0.5, 0.5, 0.5], [0, 1, 0, 0]])
        >>> m = Misorientation(data).set_symmetry(C4, C2)
        >>> m
        Misorientation (2,) 4, 2
        [[-0.7071  0.     -0.7071  0.    ]
         [ 0.      0.7071 -0.7071  0.    ]]

        """
        symmetry_pairs = iproduct(Gl, Gr)
        if verbose:
            import tqdm

            symmetry_pairs = tqdm.tqdm(symmetry_pairs, total=Gl.size * Gr.size)
        orientation_region = OrientationRegion.from_symmetry(Gl, Gr)
        o_inside = self.__class__.identity(self.shape)
        outside = np.ones(self.shape, dtype=bool)
        for gl, gr in symmetry_pairs:
            o_transformed = gl * self[outside] * gr
            o_inside[outside] = o_transformed
            outside = ~(o_inside < orientation_region)
            if not np.any(outside):
                break
        o_inside._symmetry = (Gl, Gr)
        return o_inside
Ejemplo n.º 26
0
def _run(n, query, stream):
    iters = range(n)
    hypothesis_sizes = [2, 4, 8, 16, 32]

    data = dict(np.load('data/50animals.npz'))

    trials = list(iproduct(iters, range(len(hypothesis_sizes))))
    np.random.shuffle(trials)

    trial_data = np.empty((len(iters), len(hypothesis_sizes)), dtype=int)
    for (i, j) in trials:
        hyp_size = hypothesis_sizes[j]
        animal_choices = np.arange(len(data['animal_names']))
        np.random.shuffle(animal_choices)
        animal_choices = animal_choices[:hyp_size]

        animal_index = animal_choices[np.random.randint(hyp_size)]
        animal_name = data['animal_names'][animal_index]
        feature_indices = np.nonzero(data['animal_features'][animal_index])[0]
        np.random.shuffle(feature_indices)

        features = list(data['feature_names'][feature_indices])
        animals = list(data['animal_names'][animal_choices])

        guessed = False
        num_guesses = 0
        stream.write("-" * 70 + "\n")
        while not guessed:
            animal_guess = query(features[:2 + num_guesses], animals)
            if animal_guess == animal_name:
                stream.write("Correct!\n")
                guessed = True
            else:
                stream.write("Sorry, try again.\n")

            stream.write("\n")
            num_guesses += 1

        trial_data[i, j] = num_guesses

    return trial_data
Ejemplo n.º 27
0
def _run(n, query, stream):
    iters = range(n)
    hypothesis_sizes = [2, 4, 8, 16, 32]

    data = dict(np.load('data/50animals.npz'))

    trials = list(iproduct(iters, range(len(hypothesis_sizes))))
    np.random.shuffle(trials)

    trial_data = np.empty((len(iters), len(hypothesis_sizes)), dtype=int)
    for (i, j) in trials:
        hyp_size = hypothesis_sizes[j]
        animal_choices = np.arange(len(data['animal_names']))
        np.random.shuffle(animal_choices)
        animal_choices = animal_choices[:hyp_size]

        animal_index = animal_choices[np.random.randint(hyp_size)]
        animal_name = data['animal_names'][animal_index]
        feature_indices = np.nonzero(data['animal_features'][animal_index])[0]
        np.random.shuffle(feature_indices)

        features = list(data['feature_names'][feature_indices])
        animals = list(data['animal_names'][animal_choices])

        guessed = False
        num_guesses = 0
        stream.write("-" * 70 + "\n")
        while not guessed:
            animal_guess = query(features[:2 + num_guesses], animals)
            if animal_guess == animal_name:
                stream.write("Correct!\n")
                guessed = True
            else:
                stream.write("Sorry, try again.\n")

            stream.write("\n")
            num_guesses += 1

        trial_data[i, j] = num_guesses

    return trial_data
Ejemplo n.º 28
0
    def irottable(self, symmops):
        nsym = len(symmops)
        nx, ny, nz = self.nx, self.ny, self.nz

        red2fft = np.diag([nx, ny, nz])
        fft2red = np.diag([1/nx, 1/ny, 1/nz])

        # For a fully compatible mesh, each mat in rotsm1_fft should be integer
        rotsm1_fft, tnons_fft = np.empty((nsym,3,3)), np.empty((nsym,3))

        for isym, symmop in enumerate(symmops):
            rotm1_r, tau = symmop.rotm1_r, symmop.tau
            rotsm1_fft[isym] = np.dot(np.dot(red2fft, rotm1_r), fft2red)
            tnons_fft[isym] = np.dot(red2fft, tau)

        # Indeces of $R^{-1}(r-\tau)$ in the FFT box.
        irottable = np.empty((nsym, nx*ny*nz), dtype=np.int)

        #max_err = 0.0
        nxyz = np.array((nx, ny, nz), np.int)
        for isym in range(nsym):
            rm1_fft = rotsm1_fft[isym]
            tau_fft = tnons_fft[isym]
            for ifft, p1_fft in enumerate(iproduct(range(nx), range(ny), range(nz))):
                # Form R^-1 (r-\tau) in the FFT basis.
                p1_fft = np.array(p1_fft)
                prot_fft = np.dot(rm1_fft, p1_fft - tau_fft)
                #err = ABS(prot_fft - (ix, iy, iz)) / (nx, ny, nz)
                prot_fft = np.round(prot_fft)
                jx, jy, jz = prot_fft % nxyz
                irottable[isym, ifft] = jz + (jy * nz) + (jx * nx * ny)

        # Test
        #for isym in range(nsym):
        #    irottable[isym, ifft]
        #    rm1_fft = rotsm1_fft[isym]
        #    tau_fft = tnons_fft[isym]
        #    for ifft, p1_fft in enumerate(itertools.product(range(nx), range(ny), range(nz))):
        #        irot_fft == irottable[isym, ifft]

        return irottable
Ejemplo n.º 29
0
def make_grid(scale, minx, maxx, miny, maxy, mult=True):
    gridx = arange(minx, maxx, scale)
    gridy = arange(miny, maxy, scale)

    if mult:
        pool = multiprocessing.Pool(None)
        tasks = list(iproduct(gridx, gridy))
        results = []
    
        r = pool.map_async(fmult, tasks, callback=results.append)
        r.wait() # Wait on the results
        
        rr = zeros((len(gridx), len(gridy)), dtype='object')
        def insert(x):
            rr[int((x[0] - minx)/scale)][int((x[1] - miny)/scale)] = x[2]
    
        for x in results[0]:
            insert(x)

        return rr
    else:
        return array([ [ F(i + j*1j) for j in gridy ] for i in gridx ])
Ejemplo n.º 30
0
    def get_stable_states(self):
        if self.weight is None:
            raise Exception("Weight matrix is empty!")
        self.text_output.append(
            u"<h2>Sinhrono pridobivanje stabilnih stanj</h2>")
        if self.function_type == 0:
            self.text_output.append(r"$$y =  \left\{\begin{matrix} 0 & \mbox {if } v < 0, \\ 1 & \mbox{if } v > 0, \\ \text{enako kot prej} & \mbox{if } v=0\end{matrix}\right.$$")
        elif self.function_type == 1:
            self.text_output.append(r"$$y =  \left\{\begin{matrix} -1 & \mbox {if } v < 0, \\ +1 & \mbox{if } v > 0, \\ \text{enako kot prej} & \mbox{if } v=0\end{matrix}\right.$$")
            self.text_output.append(u'<span class="label label-warning">Opozorilo</span>Ta funkcija je še v beta stanju. Nevem namreč kako računat oznake</p>')
        self.number_of_weights = len(self.weight)
        weights_label = ["\(y_%d\)" % i for i in range(1,
                                                       self.number_of_weights + 1)]
        header = weights_label + ["Oznaka"] + weights_label + ["Oznaka"]
        self.table.append(header)
        truth_table = list(
            iproduct([0, 1], repeat=int(self.number_of_weights)))
        for index, input_row in enumerate(truth_table):
            self.text_output.append("<h3>Vrstica %d</h3>" % (index + 1, ))
            table_row = []
            table_row.extend(input_row)
            table_row.append(index)
            for i, input_y in enumerate(input_row):
                v_i = self.v_i(i, index + 1, input_row)
                y_i = self.y_func(v_i, input_y)
                table_row.append(y_i)
            if self.function_type == 0:
                oznaka_bin = "".join(
                    map(str, table_row[-self.number_of_weights:]))
                oznaka = int(oznaka_bin, 2)
                if oznaka == index:
                    oznaka = {'val': oznaka}
            else:
                oznaka = "??"
            table_row.append(oznaka)

            self.table.append(table_row)
Ejemplo n.º 31
0
def gen_mappings(assignments):
    from itertools import product as iproduct

    I, J = assignments
    m = len(unique(I))
    M = I.max() + 1

    if m == len(I):
        mappings = [(I, J)]
    else:
        pool = [[] for _ in range(M)]
        for i, j in zip(I, J):
            pool[i].append((i, j))

        mappings = []
        for mapping in iproduct(*pool):
            r = zeros(len(mapping), dtype=int)
            c = zeros(len(mapping), dtype=int)
            for i, pair in enumerate(mapping):
                r[i] = pair[0]
                c[i] = pair[1]
            mappings.append((r, c))
        
    return mappings
Ejemplo n.º 32
0
    def create(cls, params):
        """Create the tasks dictionary from the parameters."""

        sim_root = path(params["sim_root"])
        if not sim_root.exists():
            sim_root.makedirs_p()

        floor_path = CPO_PATH.joinpath(params['floor_path'])
        cpo_paths = [CPO_PATH.joinpath(x) for x in params['cpo_paths']]

        index_names = params['index_names']
        index_levels = params['index_levels']
        cpos_rec_names = index_levels['object']
        record_intervals = list(np.diff(index_levels['timestep'][1:]))

        cond_names = [
            'sigma',
            'phi',
            'kappa',
            'stimulus',
            'sample',
        ]

        conditions = list(
            iproduct(*[
                enumerate(index_levels[x]) for x in cond_names
                if x != 'stimulus'
            ]))
        n_conditions = len(conditions)
        n_chunks = int(np.ceil(n_conditions / float(params['max_chunk_size'])))
        chunks = np.array_split(np.arange(n_conditions), n_chunks, axis=0)
        base_shape = [
            len(index_levels[x]) for x in index_names if x not in cond_names
        ]

        tasks = cls()
        completed = cls()
        for icpo, cp in enumerate(cpo_paths):
            for ichunk, chunk_idx in enumerate(chunks):
                sim_name = "%s_%s_%02d" % (cp.namebase, params["tag"], ichunk)
                data_path = sim_root.joinpath("%s.npy" % sim_name)
                chunk = [conditions[i] for i in chunk_idx]
                shape = [len(chunk)] + base_shape

                # Make the task dicts for this sample.
                tasks[sim_name] = {
                    "icpo": icpo,
                    "floor_path": str(floor_path),
                    "cpo_path": str(cp),
                    "data_path": str(data_path),
                    "script_root": params["script_root"],
                    "task_name": sim_name,
                    "bodies": cpos_rec_names,
                    "seed": abs(hash(sim_name)),
                    "conditions": chunk,
                    "record_intervals": record_intervals,
                    "shape": shape,
                    "num_tries": 0,
                }

                completed[sim_name] = False

        return tasks, completed
Ejemplo n.º 33
0
HINT: Some products can be obtained in more than one way so be sure to only include it once in your sum.
"""

# Observations:
# The product cannot have three or fewer digits (the smallest product containing six digits is 11111 x 1 = 11111, which is greater than any three digit number)
# Also, the product cannot have five or more digits (the largest product containing four digits is 99 x 99 = 9801, which is smaller than any five digit number)
# Thus, the product must have four digits.

from itertools import permutations, combinations, product as iproduct, imap

digits = set([1,2,3,4,5,6,7,8,9])

def digits2num(digits):
    return reduce(lambda x, d: x*10 + d, digits)

def num2digits(num):
    return set(map(int, str(num)))

pandigital_products = set()
for operand_digits in imap(set, combinations(digits, 5)):
    for num_multiplicand_digits in range(3,4+1):
        for multiplicand_digits in imap(set, combinations(operand_digits, num_multiplicand_digits)):
            multiplier_digits = operand_digits - multiplicand_digits
            for multiplicand, multiplier in iproduct(imap(digits2num, permutations(multiplicand_digits)), imap(digits2num, permutations(multiplier_digits))):
                product = multiplicand * multiplier
                product_digits = num2digits(product)
                if len(str(product)) + len(operand_digits) == 9 and 0 not in product_digits and len(product_digits | operand_digits) == 9:
                    pandigital_products.add(product)

print sum(pandigital_products)
Ejemplo n.º 34
0
from euler import *
from itertools import product as iproduct


st_digs = [2, 3, 5, 7]
md_digs = [1, 3, 5, 7, 9]
trunc_rl = set(st_digs)
trunc_lr = set(st_digs)
trunc = set()

while len(trunc) < 11:
  rl = trunc_rl.copy()
  trunc_rl.clear()

  for x, d in iproduct(rl, md_digs):
    n = x*10 + d
    if is_prime(n):
      trunc_rl.add(n)

  lr = trunc_lr.copy()
  trunc_lr.clear()

  for x, d in iproduct(lr, xrange(1, 10)):
    nd = num_digits(x)
    n = (d * 10**nd) + x
    if is_prime(n):
      trunc_lr.add(n)
      if n in trunc_rl:
        trunc.add(n)

print sum(trunc)
Ejemplo n.º 35
0
def solve_landscape_ntypes(landscape,
                           par,
                           dx,
                           f_tol=None,
                           force_positive=False,
                           verbose=True,
                           debug=False):
    r"""Find the stationary solution for a landscape with many types of habitat.

    Uses a Newton-Krylov solver with LGMRES sparse inverse method to find a
    stationary solution (or the solution to the elliptical problem) to the
    system of equations in 2 dimensions (x is a 2-d vector):

    .. math:: 
        \frac{\partial u_i}{\partial t} = D_i \nabla^2 u_i + 
        r_i u_i\left(1-\frac{u}{K_i}\right) = 0

    Parameters
    ----------
    landscape : 2-d array of ints
        describe the landscape, with any number of habitat types
    par : dict
        parameters (dict keys):

        - r : growth rates (can be negative)
        - K : carrying capacities (cn be np.Inf)
        - mu : mortality rate in the matrix
        - D : diffusivities
        - g : dict of habitat discontinuities $\gamma_{ij}$ - see interface
          conditions below. The keys are tuples (i,j) of the habitat
          types indices (optional)
        - alpha : dict of habitat preferences, only taken into account if g is
          not present. In that case, $\gamma_{ij}$ is calculated as
          $\gamma_{ij} = D_j \alpha_{ij} / (D_i*(1-\alpha_{ij}))$ (optional)
        - left : (a, b, c): external boundary conditions at left border
        - right : (a, b, c): external boundary conditions at right border
        - top : (a, b, c): external boundary conditions at top border
        - bottom : (a, b, c): external boundary conditions at bottom border
    dx : float
        lenght of each edge
    f_tol : float
        tolerance for the residue, passed on to the solver routine.  Default is
        6e-6
    force_positive : bool
        make sure the solution is always non-negative - in a hacky way. Default
        False
    verbose : bool
        print residue of the solution and its maximum and minimum values

    Returns
    -------
    solution : 2-d array of the same shape of the landscape input 
        the solution


    .. rubric:: Boundary and interface conditions

    External boundaries are of the form

    .. math:: a \nabla u \cdot \hat{n} + b u + c = 0

    and may be different for left, right, top, bottom.  The derivative of u is
    taken along the normal to the boundary.

    The interfaces between patches and matrix are given by

    .. math::
        u_i(x) &= \gamma_{ij} u_j(x) \\
        D_i \nabla u_i(x) \cdot \hat{n} &= D_j \nabla u_j(x) \cdot \hat{n}

    Usually the discontinuity $\gamma_{ij}$ is a result of different
    diffusivities and preference at the border (see Ovaskainen and Cornell
    2003). In that case, given a preference $\alpha_{ij}$ (between 0 and 1,
    exclusive) towards $i$, this parameter should be:

    .. math:: \gamma_{ij} = \frac{D_j}{D_i} \frac{\alpha_{ij}}{1-\alpha_{ij}}

    This last condition is used in case $\gamma$ is not set. If $\alpha$ isn't
    set either, it's assumed $\alpha = 1/2$. Notice that $\alpha_{ij} +
    \alpha_{ji} = 1$, and so $\gamma_{ij} = \gamma_{ji}^{-1}$. To ensure this
    condition, the key (i,j) is always taken with $i>j$.

    These conditions are handled using an asymetric finite difference scheme
    for the 2nd derivative:

    .. math:: u_{xx}(x) = \frac{4}{3h^2} (u(x-h) - 3 u(x) + 2 u(x+h/2))

    At the interface, $u(x+h/2)$ and $v(x+h/2)$ must obey:

    .. math::
        u(x+h/2) &= \gamma v(x+h/2) \\
        D_p (u(x+h/2) - u(x))  &= D_m (v(x+h) - v(x+h/2))

    Solving this system, we arrive at the approximation at the interface:

    .. math:: u(x+h/2) = \frac{D_m v(x+h)+D_p u(x)}{D_p+D_m / \gamma}

    if u(x) is in a patch and v(x+h) is in the matrix, or

    .. math:: v(x+h/2) = \frac{D_m v(x)+D_p u(x+h)}{D_p \gamma +D_m}

    if v(x) is in the matrix and u(x+h) is in a patch.

    Example
    -------
    >>> # simple patch/matrix
    >>> from landscape import *
    >>> parn = OrderedDict([
        ('r', [-0.03, 0.1]),
        ('K', [np.Inf, 1.0]),
        ('D', [0.001, 0.0001]),
        ('left', [1.0, 0.0, 0.0]),
        ('right', [1.0, 0.0, 0.0]),
        ('top', [1.0, 0.0, 0.0]),
        ('bottom', [1.0, 0.0, 0.0])
        ])
    >>> l = np.zeros((100,100), dtype=int)
    >>> l[40:60, 40:60] = 1
    >>> sol = solve_landscape_ntypes(l, parn, dx)

    """
    from scipy.optimize import newton_krylov

    n = np.unique(landscape).astype(int)

    p = par.copy()
    if 'g' not in p.keys():
        p['g'] = {}
        if 'alpha' not in p.keys():
            p['alpha'] = {}
            for i, j in iproduct(n, repeat=2):
                if i > j:
                    p['alpha'][(i, j)] = 0.5
        for i, j in iproduct(n, repeat=2):
            if i > j:
                p['g'][(i,j)] = p['D'][j]/p['D'][i] * \
                                  p['alpha'][(i,j)] / (1-p['alpha'][(i,j)])

    # this ensures the consistency of the interface discontinuities
    # it ignores the values of g_ij with i < j, replacing it by 1/g_ji
    for i, j in iproduct(n, repeat=2):
        if i < j:
            p['g'][(i, j)] = 1 / p['g'][(j, i)]

    (al, bl, cl) = p['left']
    (ar, br, cr) = p['right']
    (at, bt, ct) = p['top']
    (ab, bb, cb) = p['bottom']

    D = np.zeros_like(landscape, dtype=np.float_)
    r = np.zeros_like(landscape, dtype=np.float_)
    c = np.zeros_like(landscape, dtype=np.float_)
    for i in n:
        li = np.where(landscape == i)
        D[li] = p['D'][i]
        r[li] = p['r'][i]
        c[li] = -p['r'][i] / p['K'][i]

    Bx, By = find_interfaces_ntypes(landscape)
    factor = {}
    for i, j in iproduct(n, repeat=2):
        if i != j:
            factor[(i, j)] = (
                # coefficient of term u(x) in u_xx(x)
                -2. + 8. / 3 * p['D'][i] /
                (p['D'][i] + p['D'][j] / p['g'][(i, j)]),
                # coefficient of term u(x+h) in u_xx(x)
                -1. + 8. / 3 * p['D'][j] /
                (p['D'][i] + p['D'][j] / p['g'][(i, j)]))

    shapewb = (landscape.shape[0] + 2, landscape.shape[1] + 2)
    Bxleft = np.zeros(shapewb, dtype=np.float_)
    Bxcenter = np.zeros(shapewb, dtype=np.float_)
    Bxright = np.zeros(shapewb, dtype=np.float_)
    Byleft = np.zeros(shapewb, dtype=np.float_)
    Bycenter = np.zeros(shapewb, dtype=np.float_)
    Byright = np.zeros(shapewb, dtype=np.float_)
    for i, j in factor.keys():
        ## direction x
        # patch type i
        Bxcenter[:, 1:-1][shifted_index(Bx[i, j], 0, 1)] += factor[i, j][0]
        Bxleft[:, 1:-1][shifted_index(Bx[i, j], 0, 0)] += 1. / 3
        Bxright[:, 1:-1][shifted_index(Bx[i, j], 0, 2)] += factor[i, j][1]
        # patch type j
        Bxcenter[:, 1:-1][shifted_index(Bx[i, j], 0, 2)] += factor[j, i][0]
        Bxleft[:, 1:-1][shifted_index(Bx[i, j], 0, 1)] += factor[j, i][1]
        Bxright[:, 1:-1][shifted_index(Bx[i, j], 0, 3)] += 1. / 3
        ## direction y
        # patch type i
        Bycenter[1:-1, :][shifted_index(By[i, j], 1, 1)] += factor[i, j][0]
        Byleft[1:-1, :][shifted_index(By[i, j], 1, 0)] += 1. / 3
        Byright[1:-1, :][shifted_index(By[i, j], 1, 2)] += factor[i, j][1]
        # patch type j
        Bycenter[1:-1, :][shifted_index(By[i, j], 1, 2)] += factor[j, i][0]
        Byleft[1:-1, :][shifted_index(By[i, j], 1, 1)] += factor[j, i][1]
        Byright[1:-1, :][shifted_index(By[i, j], 1, 3)] += 1. / 3

    def residual(P):
        if force_positive:
            P = np.abs(P)

        P = np.pad(P, 1, 'constant')
        # external boundaries
        P[0, :] = (-cl - al / dx * P[0, :]) / (bl - al / dx)
        P[-1, :] = (-cr + ar / dx * P[-1, :]) / (br + ar / dx)
        P[:, 0] = (-cb - ab / dx * P[:, 0]) / (bb - ab / dx)
        P[:, -1] = (-ct + at / dx * P[:, -1]) / (bt + at / dx)

        d2x = np.zeros_like(P)
        d2x[1:-1, :] = P[2:, :] - 2 * P[1:-1, :] + P[:-2, :]
        # interface conditions
        d2x[1:-1, :] += (Bxcenter[1:-1, :] * P[1:-1, :] +
                         Bxleft[:-2, :] * P[:-2, :] +
                         Bxright[2:, :] * P[2:, :])

        d2y = np.zeros_like(P)
        d2y[:, 1:-1] = P[:, 2:] - 2 * P[:, 1:-1] + P[:, :-2]
        # interface conditions
        d2y[:,
            1:-1] += (Bycenter[:, 1:-1] * P[:, 1:-1] +
                      Byleft[:, :-2] * P[:, :-2] + Byright[:, 2:] * P[:, 2:])

        return D*(d2x[1:-1,1:-1] + d2y[1:-1,1:-1])/dx/dx + r*P[1:-1,1:-1] \
                + c*P[1:-1,1:-1]**2

    # solve
    guess = r.copy()
    guess[guess > 0] = 1 / ((-c / r)[guess > 0])
    guess[guess <= 0] = 1e-6

    if debug:
        return guess, residual

    sol = newton_krylov(residual, guess, method='lgmres', f_tol=f_tol)

    if force_positive:
        sol = np.abs(sol)
    if verbose:
        print('Residual: %e' % abs(residual(sol)).max())
        print('max. pop.: %f' % sol.max())
        print('min. pop.: %f' % sol.min())

    return sol
Ejemplo n.º 36
0
def go(n):
    # order to calculate up to
    #n = 4

    # dimension of the states base
    d = n + 2

    # number of time scales
    T = 1 + n//2
    
    z = S.Symbol('z', real=True) # perturbation \epsilon << 1
    s = S.Symbol('s', real=True) # parameter \sigma ~ ord(1)
    g = S.Symbol('g', real=True) # forcing oscilattion parameter
    # time variables
    ts = [ S.Symbol('t%i' % i, real=True) for i in range(T) ]
    
    # coefficients array
    # not including the ones we know are zero
    c = N.array([S.Symbol("c_%i%i" % (i, j), complex=True) if j <= i+1 and j % 2 == (i+1) % 2 else S.sympify(0) for (i, j) in iproduct(range(n+1), range(d))]).reshape(n+1, d)

    # the amplitude at order zero is a "free" parameter, depending on t1, t2 etc. (but *not* t0)
    A = S.Function('A')(*ts[1:])
    c[0][1] = A

    # the solution ansatz
    u = N.sum([ z**i * c[i,:] for i in range(n+1) ], axis = 0)

    one = N.zeros_like(u)
    one[0] = 1/2

    cosine = N.zeros_like(u)
    cosine[1] = 1/2

    # finally the equation
    E = N.vectorize(S.simplify)( D(u, ts, z, 1) + prod(delay_taylor(u, ts, z, param=s*z**2, order=n, tau=S.pi/2), one + z * u + g * z**2 * prod(u, cosine)) )
    E = N.vectorize(lambda x: S.Poly(x, z))(E)

    # cross your fingers
    sols = {}
    diffs = {}
    M = S.sympify(0)
    for o in range(1, n+1):
        eq1 = N.vectorize(lambda x: x.coeff(o))(E)
        eq = N.vectorize(S.simplify) (subs(subs(eq1, sols), diffs))
        # keep firt position out
        coeffs = [ c[o][i] for i in range(d) if c[o][i] ]
        # as well as the equation for it
        solution = apply(S.solvers.solve, [[eq[0]] + eq[2:].tolist()] + coeffs)
        if solution:
            sols.update(solution)
            # zero frequency coefficients can be taken to be real
            sols[c[o][0]] = S.simplify(sols[c[o][0]].as_real_imag()[0])
            if o is not 0:
                # homogeneous solution appears only in order zero
                sols[c[o][1]] = S.sympify(0)
            if o % 2 == 0:
                ss = S.solve(E[1].subs(sols).coeff(o).subs(diffs), A.diff(ts[o//2]))
                if ss:
                    diffs[A.diff(ts[o//2])] = ss[0]
                    M += z ** (o) * ss[0]
        else:
            print 'Solution not found at order %i.' % o
            return { 'success': False, 'eq': eq }
    
    x, y = S.symbols('xy', real=True)
    rmsubs = {S.re(A): x, S.im(A): y}
    Q = list((M.subs(diffs)/z**2).expand().as_real_imag())
    Q[0] = S.collect(S.simplify(Q[0].subs(rmsubs)), z)
    Q[1] = S.collect(S.simplify(Q[1].subs(rmsubs)), z)
    return { 'success': True,
            'M': M,
            'Q': Q,
            'E': E,
            'diffs': diffs,
            'sols': sols,
            'ts': ts, 'c': c, 'A': A, 'z': z, 'g': g, 's': s, 'x': x, 'y': y
           }
Ejemplo n.º 37
0
    def test_meters_of_seawater(self):
        dbars = [5e2, 1e3, 2e3, 3e3, 4e3, 5e3, 6e3, 7e3, 8e3, 9e3, 10e3]
        lats = [0, 30, 45, 60, 90]
        expected = [
            496.65,
            496.00,
            495.34,
            494.69,
            494.03,
            992.12,
            990.81,
            989.50,
            988.19,
            986.88,
            1979.55,
            1976.94,
            1974.33,
            1971.72,
            1969.11,
            2962.43,
            2958.52,
            2954.61,
            2950.71,
            2946.81,
            3940.88,
            3935.68,
            3930.49,
            3925.30,
            3920.10,
            4915.04,
            4908.56,
            4902.08,
            4895.60,
            4889.13,
            5885.03,
            5877.27,
            5869.51,
            5861.76,
            5854.01,
            6850.95,
            6841.92,
            6832.89,
            6823.86,
            6814.84,
            7812.93,
            7802.63,
            7792.33,
            7782.04,
            7771.76,
            8771.07,
            8759.51,
            8747.95,
            8736.40,
            8724.85,
            9725.47,
            9712.65,
            9699.84,
            9687.03,
            9674.23,
        ]
        for inputs, output in zip(iproduct(dbars, lats), expected):
            kw = dict(dbar=inputs[0], latitude=inputs[1])
            p = Pressure(**kw)
            kw['expected_output'] = output
            with self.subTest(**kw):
                self.assertAlmostEqual(p.m_seawater, output, 2)

        self.assertRaises(NotImplementedError, meters_of_seawater_to_dbar, 1,
                          0)
        self.assertRaises(AttributeError, Pressure, **dict(m_seawater=100))
Ejemplo n.º 38
0
    def _build(self, **kwargs):
        """Finds all the direct parents for the clusters in partitions. This is the first 
        step of weave(). Subclasses can override this function to achieve different results.

        Parameters
        ----------
        cutoff : keyword argument (0.5 ~ 1.0, default=0.8)
            containment index cutoff for claiming parenthood. 

        Returns
        -------
        G : networkx.DiGraph
            
        """

        cutoff = kwargs.pop('cutoff', 0.8)

        assume_levels = self.assume_levels
        terminals = self.terminals
        n_nodes = self.n_terminals
        L = self._assignment
        labels = self._labels
        levels = self._levels

        n_sets = len(L)

        rng = range(n_sets)
        if assume_levels:
            gen = ((i, j) for i, j in iproduct(rng, rng)
                   if levels[i] > levels[j])
        else:
            gen = ((i, j) for i, j in iproduct(rng, rng) if i != j)

        # find all potential parents
        LOGGER.timeit('_init')
        LOGGER.info('initializing the graph...')
        # calculate containment indices
        CI = containment_indices_boolean(L, L)
        G = nx.DiGraph()
        for i, j in gen:
            C = CI[i, j]
            na = (i, 0)
            nb = (j, 0)

            if not G.has_node(na):
                G.add_node(na, index=i, level=levels[i], label=labels[i])

            if not G.has_node(nb):
                G.add_node(nb, index=j, level=levels[j], label=labels[j])

            if C >= cutoff:
                if G.has_edge(na, nb):
                    C0 = G[na][nb]['weight']
                    if C > C0:
                        G.remove_edge(na, nb)
                    else:
                        continue

                #if not nx.has_path(G, nb, na):
                G.add_edge(nb, na, weight=C)

        LOGGER.report('graph initialized in %.2fs', '_init')

        # add a root node to the graph
        roots = []
        for node, indeg in G.in_degree():
            if indeg == 0:
                roots.append(node)

        if len(roots) > 1:
            root = (-1, 0)  # (-1, 0) will be changed to (0, 0) later
            G.add_node(root, index=-1, level=-1, label=True)
            for node in roots:
                G.add_edge(root, node, weight=1.)
        else:
            root = roots[0]

        # remove grandparents (redundant edges)
        LOGGER.timeit('_redundancy')
        LOGGER.info('removing redudant edges...')
        redundant = []

        for node in G.nodes():
            parents = [_ for _ in G.predecessors(node)]
            ancestors = [_ for _ in nx.ancestors(G, node)]

            for a in parents:
                for b in ancestors:
                    if neq(a, b) and G.has_edge(a, b):
                        # a is a grandparent
                        redundant.append((a, node))
                        break

        # rcl = getrecursionlimit()
        # if rcl < RECURSION_MAX_DEPTH:
        #     setrecursionlimit(RECURSION_MAX_DEPTH)

        # for u, v in G.edges():
        #     if n_simple_paths(G, u, v) > 1:
        #         redundant.append((u, v))

        # setrecursionlimit(rcl)

        #nx.write_edgelist(G, 'sample_granny_network.txt')
        G.remove_edges_from(redundant)
        LOGGER.report('redundant edges removed in %.2fs', '_redundancy')

        # attach graph nodes to nodes in G
        # for efficiency purposes, this is done after redundant edges are
        # removed. So we need to make sure we don't introduce new redundancy
        LOGGER.timeit('_attach')
        LOGGER.info('attaching terminal nodes to the graph...')
        X = np.arange(n_nodes)
        nodes = [node for node in G.nodes]
        attached_record = defaultdict(list)

        for node in nodes:
            n = node[0]
            if n == -1:
                continue

            x = X[L[n]]

            for i in x:
                ter = denumpize(terminals[i])
                attached = attached_record[ter]

                skip = False
                if attached:
                    for other in reversed(attached):
                        if nx.has_path(
                                G, node,
                                other):  # other is a descendant of node, skip
                            skip = True
                            break
                        elif nx.has_path(
                                G, other, node
                        ):  # node is a descendant of other, remove other
                            attached.remove(other)
                            G.remove_edge(other, ter)

                if not skip:
                    G.add_edge(node, ter, weight=1.)
                    attached.append(node)

        LOGGER.report('terminal nodes attached in %.2fs', '_attach')

        self._full = G

        # find secondary edges
        def node_size(node):
            if istuple(node):
                i = node[0]
                return np.count_nonzero(L[i])
            else:
                return 1

        LOGGER.timeit('_sec')
        LOGGER.info('finding secondary edges...')
        secondary_edges = []
        secondary_terminal_edges = []

        for node in G.nodes():
            parents = [_ for _ in G.predecessors(node)]
            if len(parents) > 1:
                nsize = node_size(node)
                if istuple(node):
                    pref = []
                    for p in parents:
                        w = G.edges()[p, node]['weight']
                        psize = node_size(p)
                        usize = w * nsize
                        j = usize / (nsize + psize - usize)
                        pref.append(j)

                    # weight (CI) * node_size gives the size of the union between the node and the parent
                    ranked_edges = [((x[0], node), x[1]) for x in sorted(
                        zip(parents, pref), key=lambda x: x[1], reverse=True)]
                    secondary_edges.extend(ranked_edges[1:])
                else:
                    edges = []
                    for p in parents:
                        #psize = node_size(p)
                        n_steps = nx.shortest_path_length(G, root, p)
                        edges.append(((p, node), n_steps))

                    edges = sorted(edges, key=lambda x: x[1], reverse=True)
                    secondary_terminal_edges.extend(edges[1:])

        secondary_edges.sort(key=lambda x: x[1], reverse=True)
        secondary_terminal_edges.sort(key=lambda x: x[1], reverse=True)

        self._secondary_edges = secondary_edges
        self._secondary_terminal_edges = secondary_terminal_edges
        LOGGER.report('secondary edges found in %.2fs', '_sec')

        return G
Ejemplo n.º 39
0
    def _build(self, **kwargs):
        """Finds all the direct parents for the clusters in partitions. This is the first 
        step of weave(). Subclasses can override this function to achieve different results.

        Parameters
        ----------
        cutoff : keyword argument (0.5 ~ 1.0, default=0.75)
            containment index cutoff for claiming parenthood.
        merge : bool

        Returns
        -------
        G : networkx.DiGraph
            
        """

        cutoff = kwargs.pop('cutoff', 0.75)
        merge = kwargs.pop('merge', False)

        assume_levels = self.assume_levels
        terminals = self.terminals
        n_nodes = self.n_terminals
        L = self._assignment
        labels = self._labels
        levels = self._levels

        n_sets = len(L)

        rng = range(n_sets)
        if assume_levels:
            gen = ((i, j) for i, j in iproduct(rng, rng)
                   if levels[i] > levels[j])
        else:
            gen = ((i, j) for i, j in iproduct(rng, rng) if i != j)

        # find all potential parents
        LOGGER.timeit('_init')
        LOGGER.info('initializing the graph...')
        # calculate containment indices
        CI = containment_indices_boolean(L, L)
        G = nx.DiGraph()
        for i, j in gen:
            C = CI[i, j]
            na = (i, 0)
            nb = (j, 0)

            if not G.has_node(na):
                G.add_node(na, index=i, level=levels[i], label=labels[i])

            if not G.has_node(nb):
                G.add_node(nb, index=j, level=levels[j], label=labels[j])

            if C >= cutoff:
                if not merge:
                    if G.has_edge(na, nb):
                        C0 = G[na][nb]['weight']
                        if C > C0:
                            G.remove_edge(na, nb)
                        else:
                            continue
                G.add_edge(nb, na, weight=C)

        LOGGER.report('graph initialized in %.2fs', '_init')

        # remove loops
        if merge:

            def _collapse_nodes(G, vs):
                all_in_nodes, all_out_nodes = [], []
                vs = list(vs)
                vs = sorted(vs, key=lambda x: G.nodes[x]['index'])

                # add merge record
                new_index = []
                for v in vs:
                    new_index.append(G.nodes[v]['index'])
                    all_in_nodes.extend([w for w in G.predecessors(v)])
                    all_out_nodes.extend([w for w in G.successors(v)])
                all_in_nodes = list(set(all_in_nodes).difference(vs))
                all_out_nodes = list(set(all_out_nodes).difference(vs))
                dict_in_weights = {u: 0 for u in all_in_nodes}
                dict_out_weights = {u: 0 for u in all_out_nodes}
                for v in vs:
                    for w in G.predecessors(v):
                        if not w in all_in_nodes:
                            continue
                        if G[w][v]['weight'] > dict_in_weights[w]:
                            dict_in_weights[w] = G[w][v]['weight']
                for v in vs:
                    for w in G.successors(v):
                        if not w in all_out_nodes:
                            continue
                        if G[v][w]['weight'] > dict_out_weights[w]:
                            dict_out_weights[w] = G[v][w]['weight']

                G.remove_nodes_from(vs[1:])
                G.nodes[vs[0]]['index'] = tuple(
                    new_index)  # TODO: why does this has to be a tuple?
                for u in all_in_nodes:
                    if not G.has_predecessor(vs[0], u):
                        G.add_edge(u, vs[0], weight=dict_in_weights[u])
                for u in all_out_nodes:
                    if not G.has_successor(vs[0], u):
                        G.add_edge(vs[0], u, weight=dict_out_weights[u])

                return G

            LOGGER.timeit('_cluster_redundancy')
            LOGGER.info(
                '"merge" parameter set to true, so merging redundant clusters...'
            )

            try:
                cycles = list(nx.simple_cycles(G))
                # LOGGER.info('Merge {} redundant groups ...'.format(len(cycles)))
            except:
                LOGGER.info('No redundant groups has been found ...')
                cycles = []
            if len(cycles) > 0:
                Gcyc = nx.Graph()
                for i in range(len(cycles)):
                    for v, w in itertools.combinations(cycles[i], 2):
                        Gcyc.add_edge(v, w)
                components = list(nx.connected_components(Gcyc))
                LOGGER.info('Merge {} redundant groups ...'.format(
                    len(components)))
                for vs in components:
                    G = _collapse_nodes(
                        G,
                        vs,
                    )

            LOGGER.report('redundant nodes removed in %.2fs',
                          '_cluster_redundancy')

        # add a root node to the graph
        roots = []
        for node, indeg in G.in_degree():
            if indeg == 0:
                roots.append(node)

        # TODO: right now needs a cluster full of 1, otherwise report an error; figure out why and fix it
        if len(roots) > 1:
            root = (-1, 0)  # (-1, 0) will be changed to (0, 0) later
            G.add_node(root, index=-1, level=-1, label=True)
            for node in roots:
                G.add_edge(root, node, weight=1.)
        else:
            root = roots[0]

        # remove grandparents (redundant edges)
        LOGGER.timeit('_redundancy')
        LOGGER.info('removing redudant edges...')
        redundant = []

        for node in G.nodes():
            parents = [_ for _ in G.predecessors(node)]
            ancestors = [_ for _ in nx.ancestors(G, node)]

            for a in parents:
                for b in ancestors:
                    if neq(a, b) and G.has_edge(a, b):
                        # a is a grandparent
                        redundant.append((a, node))
                        break

        G.remove_edges_from(redundant)
        LOGGER.report('redundant edges removed in %.2fs', '_redundancy')

        # attach graph nodes to nodes in G (this part can be skipped)

        LOGGER.timeit('_attach')
        LOGGER.info('attaching terminal nodes to the graph...')
        X = np.arange(n_nodes)
        nodes = [node for node in G.nodes]
        attached_record = defaultdict(list)

        for node in nodes:
            n = node[0]
            if n == -1:
                continue

            x = X[L[n]]

            for i in x:
                ter = denumpize(terminals[i])
                attached = attached_record[ter]

                skip = False
                if attached:
                    for other in reversed(attached):
                        if nx.has_path(
                                G, node,
                                other):  # other is a descendant of node, skip
                            skip = True
                            break
                        elif nx.has_path(
                                G, other, node
                        ):  # node is a descendant of other, remove other
                            attached.remove(other)
                            G.remove_edge(other, ter)

                if not skip:
                    G.add_edge(node, ter, weight=1.)
                    attached.append(node)

        LOGGER.report('terminal nodes attached in %.2fs', '_attach')

        # update node assignments
        LOGGER.timeit('_update')
        LOGGER.info(
            'propagate terminal node assignments upward in the hierarchy'
        )  # TODO: this can be iterated until there's no change
        L_sp = sp.sparse.csr_matrix(L.T)

        ## construct a community connectivity matrix
        row, col = [], []
        for v in nodes:
            row.append(v[0])
            col.append(v[0])
        for v, w in itertools.combinations(nodes, 2):
            if nx.has_path(G, v, w):  # w is a descendant of v
                row.append(w[0])
                col.append(v[0])
        data = np.ones_like(row, dtype=int)
        cc_mat = sp.sparse.coo_matrix((data, (row, col)),
                                      shape=(L.shape[0], L.shape[0]))
        cc_mat = cc_mat.tocsr()
        L_sp_new = L_sp.dot(cc_mat) > 0
        self._assignment = L_sp_new.toarray().T
        LOGGER.report('terminal nodes propagated in %.2fs', '_update')

        in_degrees = np.array([deg for (_, deg) in G.in_degree()])
        if np.where(in_degrees == 0)[0] > 1:
            G.add_node('root')  # add root
            # print('root added')
            for node in G.nodes():
                if G.in_degree(node) == 0:
                    G.add_edge('root', node)

        self._full = G

        # find secondary edges
        def node_size(node):
            if istuple(node):
                i = node[0]
                return np.count_nonzero(L[i])
            else:
                return 1

        LOGGER.timeit('_sec')
        LOGGER.info('finding secondary edges...')
        secondary = []
        for node in G.nodes():
            parents = [_ for _ in G.predecessors(node)]
            if len(parents) > 1:
                nsize = node_size(node)
                pref = []
                for p in parents:
                    w = G.edges()[p, node]['weight']
                    psize = node_size(p)
                    usize = w * nsize
                    j = usize / (nsize + psize - usize)
                    pref.append(j)

                # weight (CI) * node_size gives the size of the union between the node and the parent
                ranked_edges = [((x[0], node), x[1]) for x in sorted(
                    zip(parents, pref), key=lambda x: x[1], reverse=True)]
                secondary.extend(ranked_edges[1:])

        secondary.sort(key=lambda x: x[1], reverse=True)

        self._secondary = secondary
        LOGGER.report('secondary edges found in %.2fs', '_sec')

        return G
Ejemplo n.º 40
0
    def __init__(self, voxmap, s):
        '''
		Initialize a PiecewiseSlowness instance. The map voxmap should
		map keys to 3-D array of shape (L, M, N) such that, if

		  keys = sorted(voxmap.keys()),

		a slowness image for a perturbation x is given by

		slowness = s + sum(voxmap[keys[i]] * x[i]
					for i in range(len(x))).

		A special class of keys, starting with 'unconstrained', will
		behave differently. Each nonzero voxel in the image at
		voxmap['unconstrained'] will effectively get its own key,
		allowing each nonzero pixel in the 'unconstrained'
		voxmap to take a distinct value.

		Additional 'unconstrained' keys are allowed to take the form
		'unconstrained_<d>x', where '<d>' is an arbitrary base-10
		integer. Each nonzero voxel in I = voxmap['unconstrained_Dx']
		for an integer D represents a unique value corresponding to a
		cluster of DxDxD ordinary voxels; i.e., each voxel (i,j,k) in I
		corresponds to an effective map M_ijk with

		  M_ijk[D*i:D*(i+1),D*j:D*(j+1),D*k:D*(k+1)] = I[i,j,k]

		and M_ijk = 0 everywhere else.

		The 'unconstrained' class of keys is case sensitive.
		'''
        # Build a sparse matrix representation from the voxmap
        self._shape = None
        data, rows, cols = [], [], []
        M, N = 0, 0
        # Match unconstrained keys with optional scales
        unre = re.compile('^unconstrained(_([0-9]+)x)?')
        # Note that voxmap may not be a proper dictionary (e.g., NpzFile)
        for key in sorted(voxmap.keys()):
            # Check if the key is a scaled or unconstrained key
            m = unre.match(key)
            if m: scale = int(m.groups(1)[1])
            else: scale = 1

            v = np.asarray(voxmap[key]).astype(np.float64)
            if v.ndim != 3:
                raise ValueError('All voxmaps must have three dimensions')

            # Scale the shape of the grid if appropriate
            shape = v.shape
            if scale != 1: shape = tuple(scale * sv for sv in shape)

            if not self._shape:
                self._shape = shape
                M = np.product(self._shape)
            elif self._shape != shape:
                raise ValueError('All voxmap shapes must be compatible')

            if not m or scale == 1:
                # Process constrained or scale-1 unconstrained maps
                v = v.ravel('C')
                ri = np.nonzero(v)[0]
                data.extend(v[ri])
                rows.extend(ri)

                if not m:
                    # Constrained voxels share a column
                    cols.extend(N for sv in range(len(ri)))
                    N += 1
                else:
                    # Unconstrained voxels get their own columns
                    cols.extend(range(N, N + len(ri)))
                    N += len(ri)
                continue

            # List of neighbor offsets for scaled voxels
            nbrs = list(iproduct(*(range(scale) for sv in range(v.ndim))))

            # Process each scaled voxel as a separate column
            for i, j, k in np.transpose(np.nonzero(v)):
                # Explode each voxel in a scaled unconstrained image
                # np.ravel_multi_index requires the transposed form
                vxi = np.transpose(
                    [[scale * i + ii, scale * j + jj, scale * k + kk]
                     for ii, jj, kk in nbrs])
                # Map the exploded voxels to C-raveled indices
                ri = np.ravel_multi_index(vxi, shape, order='C')
                # Build the next block of the matrix
                # All voxels in the block get the same weight
                rows.extend(ri)
                cols.extend(N for sv in range(len(ri)))
                data.extend(v[i, j, k] for sv in range(len(ri)))
                N += 1

        # Confirm that the background has the right form
        self._s = np.array(s, dtype=np.float64)
        if self._s.shape and self._s.shape != self._shape:
            raise ValueError(
                'Background slowness s must be scalar or match voxmap shapes')

        # Convert the representation to CSR for efficiency
        self._voxmap = coo_matrix((data, (rows, cols)), shape=(M, N)).tocsr()
Ejemplo n.º 41
0
def solve_landscape_ntypes_nspecies(landscape,
                                    par,
                                    dx,
                                    f_tol=None,
                                    force_positive=False,
                                    skip_refine=False,
                                    return_residual=False,
                                    verbose=True):
    '''Find the stationary solution for a given landscape and set of parameters.

    Uses a Newton-Krylov solver with LGMRES sparse inverse method to find a
    stationary solution (or the solution to the elliptical problem) to the
    system of 2n equations in 2 dimensions (x is a 2-d vector):

    .. math::
        \\frac{\partial u_{ik}}{\partial t} &= D_k \\nabla^2 u_{ik} + r_{ik} u_{ik} (1-\sum_{j=1}^n \\alpha_j u_{jk}) = 0

    where i runs over the n species and k over the patch types.

    Parameters
    ----------
    landscape : 2-d array of ints
        describe the landscape, with any number of habitat types
    par : list
        the first element is the matrix (or dict with tuple keys) of
        competition coefficients, including the inverse of carrying capacities,
        and the following elements are dicts with parameters as in the
        `solve_landscape_ntypes()` function, except for the carrying capacity.
    dx : float
        length of each edge
    f_tol : float
        tolerance for the residue, passed on to the solver routine.  Default is
        6e-6
    force_positive : bool
        make sure the solution is always non-negative - in a hacky way. Default
        False
    skip_refine : bool
        do not refine the grid to calculate the residual. This can greatly
        improve speed, but will generate errors (even silent wrong results) if
        the landscape has contiguous interfaces
    return_residual : bool
        returns only the residual function, without calculating the solution
    verbose : bool
        print residue of the solution and its maximum and minimum values

    Returns
    -------
    solution : 2-d array of the same shape of the landscape input containing
        the solution

    Boundary and interface conditions
    ---------------------------------
    External boundaries are of the form

    .. math::
        a \\nabla u \cdot \hat{n} + b u + c = 0

    and may be different for left, right, top, bottom.  The derivative of u is
    taken along the normal to the boundary.

    The interfaces between patches and matrix are given by

    .. math::
        u(x) &= \gamma v(x) \\\\
        D_p \\nabla u(x) \cdot \hat{n} &= D_m \\nabla v(x) \cdot \hat{n}

    where u is a patch and v is the solution in the matrix.

    Example
    -------
    >>> from landscape import *
    >>> lA = loadtxt('landA.txt')
    >>> par = [
            [
                [[0, 0],
                 [0.1, 0.1]],
                [[0.1, 0],
                 [0.1, 0.1]]
            ],
            {'r': [-0.03, 0.1],
             'D': [0.001, 0.0001],
             'left': [1.0, 0.0, 0.0],
             'right': [1.0, 0.0, 0.0],
             'top': [1.0, 0.0, 0.0],
             'bottom': [1.0, 0.0, 0.0]},
            {'r': [0.05, 0.05],
             'D': [0.001, 0.001],
             'left': [1.0, 0.0, 0.0],
             'right': [1.0, 0.0, 0.0],
             'top': [1.0, 0.0, 0.0],
             'bottom': [1.0, 0.0, 0.0]}
            ]
    >>> sol = solve_landscape_ntypes_nspecies(lA, par, dx)

    '''
    from scipy.optimize import newton_krylov

    if not skip_refine:
        # refine the grid to avoid contiguous interfaces
        landscape = refine_grid(landscape)
        dx /= 2

    N = len(par) - 1
    n = np.unique(landscape).astype(int)

    resi = [
        solve_landscape_ntypes(landscape,
                               dict(p, K=np.Inf * np.ones(len(n))),
                               dx,
                               skip_refine=True,
                               return_residual=True) for p in par[1:]
    ]

    sec_term = np.zeros((N, N, landscape.shape[0], landscape.shape[1]))
    for i, j in iproduct(range(N), range(N)):
        for k in n:
            lk = np.where(landscape == k)
            sec_term[i, j][lk] = par[0][k][i][j]

    def residual(P):
        if force_positive:
            P = np.abs(P)
        res = np.zeros_like(P)
        # loops are for lazy people
        for i, Pi in enumerate(P):
            res[i, :, :] = resi[i](Pi) - Pi * (sec_term[i, :, :] *
                                               P).sum(axis=0)
        return res

    if return_residual:
        return residual

    # build guess based on where growth is positive
    guess = np.zeros((N, landscape.shape[0], landscape.shape[1]))
    for i in range(N):
        for k in n:
            lk = np.where(landscape == k)
            rik = par[i + 1]['r'][k]
            if rik <= 0:
                guess[i, :, :][lk] = 1e-6
            else:
                guess[i, :, :][lk] = rik / par[0][k][i][i]

    # solve
    sol = newton_krylov(residual, guess, method='lgmres', f_tol=f_tol)
    if force_positive:
        sol = np.abs(sol)
    if verbose:
        print('Residuals:',
              *['%e' % i for i in np.abs(residual(sol)).max(axis=(1, 2))])
        print('max. pops.:', *['%f' % i for i in sol.max(axis=(1, 2))])
        print('min. pops.:', *['%f' % i for i in sol.min(axis=(1, 2))])

    if not skip_refine:
        sol = coarse_grid(sol)

    return sol
def main():
    print('case', 'condition', 'temperature')
    for i, data in enumerate(
            iproduct(range(1, 4), map(lambda x: 0.5 * x, range(-1, 2)))):
        print(i + 1, *data)
    return 0
Ejemplo n.º 43
0
def main(argv):
    try:
        opts, args = getopt.getopt(argv, "f:i:r:s:otv", [
            "field=", "species=", "infile=", "radius=", "output_csv", "total",
            "ion_velocity"
        ])
    except getopt.GetoptError:
        print(getopt.GetoptError)
        print('error')
        return

    out_csv, total, ion_velocity = False, False, False
    for opt, arg in opts:
        if opt in ("-f", "--field"):
            if arg == "all":
                fields_suffix = ['flux', 'number_density']
            elif arg == 'mag':
                fields_suffix = [
                    'magnetic_field_normal', 'magnetic_field_total',
                    'magnetic_field_x', 'magnetic_field_y', 'magnetic_field_z'
                ]
            else:
                fields_suffix = [arg]
        elif opt in ("-s", "--species"):
            if arg == 'all': ions = ['O2_p1_', 'O_p1_']  #'CO2_p1',
            elif arg == 'None': ions = ['']
            else: ions = [arg + "_"]
        elif opt in ("-i", "--infile"):
            dsk = arg.split('/')[-1].split('.')[0]
            ds_names = {dsk: arg}
            if 'batsrus' in dsk: ds_types = {'batsrus': [dsk]}
            else: ds_types = {'rhybrid': [dsk]}
        elif opt in ("-r", "--radius"):
            if arg == 'all': radii = np.arange(1.0, 3.0, 0.2)
            else: radii = ast.literal_eval(arg)
            if type(radii) == float: radii = [radii]
        elif opt in ("-o", "-output_csv"):
            out_csv = True
        elif opt in ("-t", "-total"):
            total = True
        elif opt in ("-v", "-ion_velocity"):
            ion_velocity = True

    fields = [ion + suff for ion, suff in iproduct(ions, fields_suffix)]
    if total: fields.append('total_flux')

    if out_csv: df = pd.DataFrame(columns=ions, index=radii)

    for ds_type in ds_names.keys():
        for r in radii:
            field_dat = run_sphere_flux(ds_names,
                                        ds_types,
                                        r,
                                        fields,
                                        ion_velocity=ion_velocity)

            if out_csv:
                for ion in ions:
                    total_ions = np.sum(np.nan_to_num(field_dat['area']*\
                                field_dat[ion+'flux'][ds_type]))
                    df.loc[r, ion] = total_ions

        if out_csv: df.to_csv('Output/sphere_flux_{0}.csv'.format(ds_type))
Ejemplo n.º 44
0
def group_cavity_solve(
        S=(100, 100),
        mu=1,
        sigma=.0,
        gamma=0,
        mean_k=1.,
        sigma_k=0,
        sigma_d=0,
        tau=1,
        x0=None,
        NTRIALS=3,
        EPS=10**-15,
        logmode=0,
        ftol=0.003,
        SIGMA_THRESH=0.01,  #Threshold for recursing
        varmode=1,
        sigrow=None,
        **kwargs):
    USE_RECURS = kwargs.get('USE_RECURS', 1)
    USE_ROOT_HELPER = kwargs.get('USE_ROOT_HELPER', 1)

    #tau = ratio of typical timescales (e.g. for metabolic)

    levels = len(S)
    S, mean_k, sigma_k = [
        np.array(x).astype('float')
        if np.array(x).shape else np.array([float(x) for nb in range(levels)])
        for x in S, mean_k, sigma_k
    ]
    s = S / np.mean(S)

    #Making matrices
    if len(mu.shape) < 2:
        MU = (np.ones((levels, levels)) * mu).T
    else:
        MU = mu
    if len(sigma.shape) < 2:
        SIGMA2 = (np.ones((levels, levels)) * sigma**2).T
    else:
        SIGMA2 = sigma**2
    if len(gamma.shape) < 2:
        GAMMA = (np.ones((levels, levels)) * gamma).T
    else:
        GAMMA = gamma  #sigma**2

    if not sigrow is None:
        SIGROW2 = sigrow**2
    else:
        SIGROW2 = 0

    if 0 and (MU < 0).any():
        test = -(s * MU.T)
        import numpy.linalg as la
        if np.max(np.abs(la.eigvals(test))) > 1:
            return np.zeros((4, levels)) + EPS

    CALC_INTEGRAL = kwargs.get('CALC_INTEGRAL', 0)
    traceback = []

    def eqs(vec, calc=CALC_INTEGRAL, logmode=logmode):
        if logmode:
            logvec = vec
            vec = np.exp(np.clip(vec, None, 30))
        N1, N2, V, Phi = vec.reshape((4, levels))
        if varmode:
            N2 = N2 * N1**2
        if logmode:
            # Phi=np.clip(Phi,0,1)
            pass
        else:
            zerocond = np.min(
                np.concatenate([N1.ravel(),
                                Phi.ravel(),
                                N2.ravel()
                                ]))  # Things that should be positive!

        if 0:
            traceback.append(vec)
        #N2=np.clip(N2,0,None)
        #N1=np.clip(N1,0,None)
        #Phi=np.clip(Phi,0,None)

        mean0, var0, Veq = group_cavity_calc(S,
                                             MU,
                                             SIGMA2,
                                             GAMMA,
                                             mean_k,
                                             sigma_k,
                                             sigma_d,
                                             N1,
                                             N2,
                                             V,
                                             Phi,
                                             tau,
                                             SIGROW2=SIGROW2)
        #print N1,mean0,var0,erfmom(1,mean0,var0)

        eq4 = 1 - Veq
        if calc:
            raise Exception(
                "TODO: explicit calculation in group_cavity_solve not done yet"
            )
        else:
            #Equivalent with directly computed moments of erf

            if logmode:
                flor = EPS
            else:
                flor = 0
            roof = np.max(np.sqrt(mean_k**2 + sigma_k**2)) * 10**7
            theophi = np.clip(erfmom(0, mean0, var0), flor, 1)
            theoN1 = np.clip(erfmom(1, mean0, var0), flor, roof)
            theoN2 = np.clip(erfmom(2, mean0, var0), flor, roof**2)

            #ref[theophi<10**-5]=0

            def refscale(x):
                if 0:
                    return 1
                else:
                    return np.mean(x)

            if not logmode:
                #Putting all the equations on the same scale, while regularizing zero denominators
                # tmp=refscale(Phi)
                # eq1=1 -(Phi+tmp)/(theophi+tmp)
                eq1 = Phi - theophi
                phi = np.clip(theophi, 0.001, None)
                tmp = refscale(theoN1)
                # eq2=1-(np.abs(Phi)*N1  +tmp)/(theoN1+tmp)
                eq2 = (phi * N1 - theoN1) / tmp
                tmp = refscale(theoN2)
                # eq3=1-(np.abs(Phi)*N2 +tmp)/(theoN2+tmp)
                eq3 = (phi * N2 - theoN2) / tmp
            else:
                if 0:
                    eq1 = (Phi - theophi) / max(theophi)
                    eq2 = (Phi * N1 - theoN1) / max(theoN1)
                    eq3 = (Phi * N2 - theoN2) / max(theoN2)
                else:
                    logN1, logN2, logV, logPhi = logvec.reshape((4, levels))
                    tmp = refscale(theophi)
                    eq1 = 1 - (Phi + tmp) / (theophi + tmp)
                    tmp = refscale(np.log(theoN1))
                    eq2 = 1 - (logN1 + tmp) / (np.log(theoN1) -
                                               np.log(theophi) + tmp)
                    tmp = refscale(np.log(theoN2))
                    eq3 = 1 - (logN2 + tmp) / (np.log(theoN2) -
                                               np.log(theophi) + tmp)

        res = np.concatenate((eq2, eq3, eq4, eq1))
        # if logmode:
        # res*=max(1,1+np.max(vec)/20.)

        return res

    #root=sopt.newton_krylov
    #root=sopt.anderson

    tries = 0

    class Tmp():
        success = False
        fun = 1000

    res = Tmp()
    trials_left = kwargs.pop('trials_left', NTRIALS)
    recurs_depth = kwargs.pop('recurs_depth', 0)
    X0 = None
    XMF = None
    while not res.success and np.max(np.abs(
            res.fun)) > ftol and trials_left - tries > 0:
        newx0 = 0
        if (recurs_depth > 5
                or tries > 1) and np.max(sigma) > SIGMA_THRESH and USE_RECURS:
            kw = {}
            kw.update(kwargs)
            #print 'SIGMA', sigma
            #print kwargs.get('trials_left')
            factor = max(1.03, np.exp(np.log(np.max(sigma) / 0.01) / 10.))
            print '====> RECURSE FROM', np.max(
                sigma**
                2), 'TRIES LEFT', trials_left - tries, 'DEPTH', recurs_depth
            x0 = np.concatenate(
                group_cavity_solve(S=S,
                                   mu=mu,
                                   sigma=sigma / factor,
                                   mean_k=mean_k,
                                   gamma=gamma,
                                   sigma_k=sigma_k,
                                   sigma_d=sigma_d,
                                   x0=None,
                                   logmode=False,
                                   NTRIALS=2,
                                   recurs_depth=recurs_depth + 1,
                                   **kwargs)[:4])
            tries = trials_left
            newx0 = 1

        elif x0 is None:
            #Starting from Mean Field solution

            XMF = np.ones((4, levels))
            Phi = erfmom(0, mean_k, sigma_k**2)
            Phi[Phi == 0] = 1
            if not kwargs.get('N0', None) is None:
                print 'INITIALIZING WITH EMPIRICAL Ns'
            XMF[0], XMF[3] = group_meanfield(s,
                                             MU,
                                             mean_k,
                                             sigma_k=sigma_k,
                                             Ninit=kwargs.get('N0', None))
            if ((XMF[0], XMF[3]) <= 10**-14 + np.zeros((2, levels))).all():
                print "ZEROS FROM MEANFIELD"
                return np.zeros((4, levels)) + EPS
            XMF[1] = XMF[0]**2
            XMF[2] = 1
            #XMF[3]=np.clip(Phi,0.01,1)
            mean0, var0, Veq = group_cavity_calc(S,
                                                 MU,
                                                 SIGMA2,
                                                 GAMMA,
                                                 mean_k,
                                                 sigma_k,
                                                 sigma_d,
                                                 XMF[0],
                                                 XMF[1],
                                                 XMF[2],
                                                 Phi,
                                                 tau,
                                                 SIGROW2=SIGROW2)
            XMF[3] = np.clip(np.minimum(XMF[3], erfmom(0, mean0, var0)), 0.01,
                             1)

            x0 = XMF
            x0 = x0.ravel()
            newx0 = 1

        if varmode and newx0:
            x0[levels:2 * levels] = np.clip(
                x0[levels:2 * levels] / x0[:levels]**2, 0, 100)
            x0[levels:2 * levels][x0[:levels] == 0] = 1

        if logmode:
            X0 = np.log(x0 + EPS)
        else:
            X0 = x0
        funinit = np.abs(eqs(X0))
        if np.max(np.abs(funinit)) < ftol / 10:
            res.x = x0
            res.success = True
            res.fun = funinit
        if XMF is None:
            print "    x0:", np.sum(funinit), "maxerr", np.max(funinit)

        root = sopt.root
        if logmode:
            bounds = None
        else:
            bounds = [(0, None)
                      for i in range(3 * levels)] + [(0, 1)
                                                     for i in range(levels)]

        methods = ['hybr']
        if np.max(sigma) < SIGMA_THRESH:
            #Last chance, no recursion, so better try methods
            if logmode:
                methods = ['Krylov', 'df-sane', 'hybr']
            else:
                methods = [
                    'hybr', 'Krylov', 'df-sane'
                ]  #df-sane never seems to work better than hybr; Krylov rarely (but very slow)

        if (tries > 0 or recurs_depth > 0) and USE_ROOT_HELPER:
            print '    Using root helper'
            X1 = root_helper(eqs, X0, bounds=bounds, tol=ftol)
            import numpy.linalg as la
            nm = la.norm(X1 - X0)
            mx = np.argmax(np.abs(X1 - X0))
            print '    Root helper pushed', nm, [
                '{}_{}'.format(i, j) for i, j in iproduct(
                    ['N1', 'N2', 'V', 'Phi'], [str(z) for z in range(levels)])
            ][mx], X0[mx], '->', X1[mx]
            X0 = X1
        while not res.success and methods:
            method = methods.pop(0)
            try:
                res = root(eqs, X0, method=method, tol=ftol)
            except Exception as e:
                print e
            if not res.success:
                print '    Root finding failure:', method
            # if not methods and not logmode:
            #     methods=['Krylov']
            #     logmode=1
        tries += 1

    print MU, SIGMA2, GAMMA, SIGROW2
    if not res.success:
        code_debugger()

    result = np.clip(res.x, EPS, None)
    if logmode:
        result = np.exp(np.clip(result, np.log(EPS), 42))
    #print eqs(XMF.ravel()),res.fun
    success = 1
    error = np.max(np.abs(res.fun))

    if res.success and error > ftol:
        res.success = False
        print '     WARNING: groups.py claims success but error={}>ftol, REJECTED!'.format(
            error)

    if not res.success and error > ftol:
        success = 0
        DEBUG = kwargs.get('DEBUG', False)
        if DEBUG:
            print '\nERROR: {}'.format(
                res.message
            )  #,list(kwargs.get('best_fun',res.fun).reshape((4,levels))) )
            x0error = eqs(X0)
            x0better = (np.abs(res.fun) > np.abs(x0error))
            labels = [
                '{}_{}'.format(i, j) for i, j in iproduct(
                    ['N1', 'N2', 'V', 'Phi'], [str(z) for z in range(levels)])
            ]
            ERRORS = [
                '{}: {} ({}) -- prev {} ({})'.format(*i)
                for i in zip(labels, result, res.fun, x0, x0error)
                if np.abs(i[2]) > ftol
            ]
            nberr = 15
            print ERRORS[:nberr],
            if len(ERRORS) > nberr:
                print 'and {} others'.format(len(ERRORS) - nberr)
            else:
                print ''
            print 'RECURSION DEPTH', recurs_depth, 'NTRIALS', trials_left, "SIGMA2", np.max(
                sigma**2)
            result[x0better] = x0[x0better]
            error = min(error, np.max(np.abs(x0error)))

        handled = not DEBUG
        while not handled:
            N1, N2, V, Phi = result.reshape((4, levels))
            if varmode:
                N2 = N2 * N1**2
            handled = 1
            # code_debugger()
    else:
        if tries > 1:
            print '    Root finding: finally, success'

        #mean0,var0,Veq=group_cavity_calc(s,MU,SIGMA2,GAMMA,mean_k,sigma_k,
        #sigma_d,XMF[0],XMF[1],XMF[2],XMF[3],tau)
        #print mean0
        #print np.sqrt(var0)
        #print np.clip(erfmom(0,mean0,var0),0,1)
        #print Veq
        #Veff=np.dot(np.sqrt(SIGMA2*SIGMA2.T),s*Phi*V)
        #print 1-Veff
    #else:
    #print res.fun

    N1, N2, V, Phi = result.reshape((4, levels))
    N1[Phi < EPS] = EPS
    N2[Phi < EPS] = EPS
    if varmode:
        N2 = N2 * N1**2
    return N1, N2, V, Phi, success / error