Example #1
1
    def __init__(self, capacity=100, cost=100, number=None):

        Vehicle = namedtuple("Vehicle", ["index", "capacity", "cost"])

        if number is None:
            self.number = np.size(capacity)
        else:
            self.number = number
        idxs = np.array(range(0, self.number))

        if np.isscalar(capacity):
            capacities = capacity * np.ones_like(idxs)
        elif np.size(capacity) != np.size(capacity):
            print("capacity is neither scalar, nor the same size as num!")
        else:
            capacities = capacity

        if np.isscalar(cost):
            costs = cost * np.ones_like(idxs)
        elif np.size(cost) != self.number:
            print(np.size(cost))
            print("cost is neither scalar, nor the same size as num!")
        else:
            costs = cost

        self.vehicles = [Vehicle(idx, capacity, cost) for idx, capacity, cost in zip(idxs, capacities, costs)]
Example #2
1
def lqmn(m,n,z):
    """Associated Legendre functions of the second kind, Qmn(z) and its
    derivative, ``Qmn'(z)`` of order m and degree n.  Returns two
    arrays of size ``(m+1, n+1)`` containing ``Qmn(z)`` and ``Qmn'(z)`` for
    all orders from ``0..m`` and degrees from ``0..n``.

    z can be complex.
    """
    if not isscalar(m) or (m<0):
        raise ValueError("m must be a non-negative integer.")
    if not isscalar(n) or (n<0):
        raise ValueError("n must be a non-negative integer.")
    if not isscalar(z):
        raise ValueError("z must be scalar.")
    m = int(m)
    n = int(n)

    # Ensure neither m nor n == 0
    mm = max(1,m)
    nn = max(1,n)

    if iscomplex(z):
        q,qd = specfun.clqmn(mm,nn,z)
    else:
        q,qd = specfun.lqmn(mm,nn,z)
    return q[:(m+1),:(n+1)],qd[:(m+1),:(n+1)]
Example #3
0
    def get_equations(self, substitutions):
        # scale up if necessary
        val = substitutions[self.As]
        if np.isscalar(val):
            substitutions[self.As] = val * np.ones((self.N,self.N))
        else:
            assert np.asarray(val).shape == (self.N, self.N), 'Invalid dimension of A: {}'.format(np.asarray(val).shape)

        val = substitutions[self.Bs]
        if np.isscalar(val):
            substitutions[self.Bs] = val * np.ones(self.N)
        else:
            assert np.asarray(val).shape == (self.N,), 'Invalid dimension of B: {}'.format(np.asarray(val).shape)

        # apply substitutions (in an ugly way)
        eqs = []
        for eq in self._gen_system():
            for sym, val in substitutions.items():
                if isinstance(sym, tuple):
                    for ss, vv in zip(sym, val):
                        if isinstance(ss, tuple):
                            for s, v in zip(ss, vv):
                                eq = eq.subs(s, v)
                        else:
                            eq = eq.subs(ss, vv)
                else:
                    eq = eq.subs(sym, val)
            eqs.append(eq)
        return eqs
Example #4
0
def reflectivity_amplitude(Q,
                           depth,
                           rho,
                           mu=0,
                           sigma=None,
                           wavelength=1,
                           ):
    """
    Returns the complex reflectivity waveform.

    See reflectivity for details.
    """
    Q = _dense(Q,'d')
    R = numpy.empty(Q.shape,'D')

    n = len(depth)
    if numpy.isscalar(wavelength):
        wavelength=wavelength*numpy.ones(Q.shape, 'd')
    if numpy.isscalar(mu):
        mu = mu*numpy.ones(n, 'd')
    if numpy.isscalar(sigma):
        sigma = sigma*numpy.ones(n-1, 'd')

    wavelength,depth,rho,mu = [_dense(v,'d')
                                 for v in wavelength,depth,rho,mu]

    rho,mu = [v*1e-6 for v in rho,mu]
    if sigma is not None:
        sigma = _dense(sigma, 'd')
        reflmodule._reflectivity_amplitude_rough(rho, mu, depth, sigma, wavelength, Q, R)
    else:
        reflmodule._reflectivity_amplitude (rho, mu, depth, wavelength, Q, R)
    return R
Example #5
0
    def _find_bounding_points(self, x, y):
        """
        Find the indices of the grid points that bound the input
        ``(x, y)`` position.

        Parameters
        ----------
        x, y : float
            The ``(x, y)`` position where the PSF is to be evaluated.

        Returns
        -------
        indices : list of int
            A list of indices of the bounding grid points.
        """

        if not np.isscalar(x) or not np.isscalar(y):  # pragma: no cover
            raise TypeError('x and y must be scalars')

        if (x < self._xgrid_min or x > self._xgrid_max or
                y < self._ygrid_min or y > self._ygrid_max):  # pragma: no cover
            raise ValueError('(x, y) position is outside of the region '
                             'defined by grid of PSF positions')

        x0 = self._find_bounds_1d(self._xgrid, x)
        y0 = self._find_bounds_1d(self._ygrid, y)
        points = list(itertools.product(self._xgrid[x0:x0 + 2],
                                        self._ygrid[y0:y0 + 2]))

        indices = []
        for xx, yy in points:
            indices.append(np.argsort(np.hypot(self._grid_xpos - xx,
                                               self._grid_ypos - yy))[0])

        return indices
Example #6
0
  def _test_acgan_helper(self, create_gan_model_fn):
    model = create_gan_model_fn()
    loss = train.gan_loss(model)
    loss_ac_gen = train.gan_loss(model, aux_cond_generator_weight=1.0)
    loss_ac_dis = train.gan_loss(model, aux_cond_discriminator_weight=1.0)
    self.assertTrue(isinstance(loss, namedtuples.GANLoss))
    self.assertTrue(isinstance(loss_ac_gen, namedtuples.GANLoss))
    self.assertTrue(isinstance(loss_ac_dis, namedtuples.GANLoss))

    # Check values.
    with self.test_session(use_gpu=True) as sess:
      variables.global_variables_initializer().run()
      loss_gen_np, loss_ac_gen_gen_np, loss_ac_dis_gen_np = sess.run(
          [loss.generator_loss,
           loss_ac_gen.generator_loss,
           loss_ac_dis.generator_loss])
      loss_dis_np, loss_ac_gen_dis_np, loss_ac_dis_dis_np = sess.run(
          [loss.discriminator_loss,
           loss_ac_gen.discriminator_loss,
           loss_ac_dis.discriminator_loss])

    self.assertTrue(loss_gen_np < loss_dis_np)
    self.assertTrue(np.isscalar(loss_ac_gen_gen_np))
    self.assertTrue(np.isscalar(loss_ac_dis_gen_np))
    self.assertTrue(np.isscalar(loss_ac_gen_dis_np))
    self.assertTrue(np.isscalar(loss_ac_dis_dis_np))
Example #7
0
def allclose_with_out(x, y, atol=0.0, rtol=1.0e-5):
    # run the np.allclose on x and y
    # if it fails print some stats
    # before returning
    ac = np.allclose(x, y, rtol=rtol, atol=atol)
    if not ac:
        dd = np.abs(x - y)
        neon_logger.display('abs errors: %e [%e, %e] Abs Thresh = %e'
                            % (np.median(dd), np.min(dd), np.max(dd), atol))
        amax = np.argmax(dd)

        if np.isscalar(x):
            neon_logger.display('worst case: %e %e' % (x, y.flat[amax]))
        elif np.isscalar(y):
            neon_logger.display('worst case: %e %e' % (x.flat[amax], y))
        else:
            neon_logger.display('worst case: %e %e' % (x.flat[amax], y.flat[amax]))

        dd = np.abs(dd - atol) / np.abs(y)
        neon_logger.display('rel errors: %e [%e, %e] Rel Thresh = %e'
                            % (np.median(dd), np.min(dd), np.max(dd), rtol))
        amax = np.argmax(dd)
        if np.isscalar(x):
            neon_logger.display('worst case: %e %e' % (x, y.flat[amax]))
        elif np.isscalar(y):
            neon_logger.display('worst case: %e %e' % (x.flat[amax], y))
        else:
            neon_logger.display('worst case: %e %e' % (x.flat[amax], y.flat[amax]))
    return ac
Example #8
0
    def __getitem__(self, index):
        """Return the element(s) index=(i, j), where j may be a slice.
        This always returns a copy for consistency, since slices into
        Python lists return copies.
        """
        try:
            i, j = index
        except (AssertionError, TypeError):
            raise IndexError('invalid index')

        if not np.isscalar(i) and np.isscalar(j):
            warn('Indexing into a lil_matrix with multiple indices is slow. '
                 'Pre-converting to CSC or CSR beforehand is more efficient.',
                 SparseEfficiencyWarning)

        if np.isscalar(i):
            if np.isscalar(j):
                return self._get1(i, j)
            if isinstance(j, slice):
                j = self._slicetoseq(j, self.shape[1])
            if issequence(j):
                return self.__class__([[self._get1(i, jj) for jj in j]])
        elif issequence(i) and issequence(j):
            return self.__class__([[self._get1(ii, jj) for (ii, jj) in zip(i, j)]])
        elif issequence(i) or isinstance(i, slice):
            if isinstance(i, slice):
                i = self._slicetoseq(i, self.shape[0])
            if np.isscalar(j):
                return self.__class__([[self._get1(ii, j)] for ii in i])
            if isinstance(j, slice):
                j = self._slicetoseq(j, self.shape[1])
            if issequence(j):
                return self.__class__([[self._get1(ii, jj) for jj in j] for ii in i])
        else:
            raise IndexError
Example #9
0
    def test_choice_return_shape(self):
        p = [0.1, 0.9]
        # Check scalar
        assert_(np.isscalar(np.random.choice(2, replace=True)))
        assert_(np.isscalar(np.random.choice(2, replace=False)))
        assert_(np.isscalar(np.random.choice(2, replace=True, p=p)))
        assert_(np.isscalar(np.random.choice(2, replace=False, p=p)))
        assert_(np.isscalar(np.random.choice([1, 2], replace=True)))
        assert_(np.random.choice([None], replace=True) is None)
        a = np.array([1, 2])
        arr = np.empty(1, dtype=object)
        arr[0] = a
        assert_(np.random.choice(arr, replace=True) is a)

        # Check 0-d array
        s = tuple()
        assert_(not np.isscalar(np.random.choice(2, s, replace=True)))
        assert_(not np.isscalar(np.random.choice(2, s, replace=False)))
        assert_(not np.isscalar(np.random.choice(2, s, replace=True, p=p)))
        assert_(not np.isscalar(np.random.choice(2, s, replace=False, p=p)))
        assert_(not np.isscalar(np.random.choice([1, 2], s, replace=True)))
        assert_(np.random.choice([None], s, replace=True).ndim == 0)
        a = np.array([1, 2])
        arr = np.empty(1, dtype=object)
        arr[0] = a
        assert_(np.random.choice(arr, s, replace=True).item() is a)

        # Check multi dimensional array
        s = (2, 3)
        p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
        assert_(np.random.choice(6, s, replace=True).shape, s)
        assert_(np.random.choice(6, s, replace=False).shape, s)
        assert_(np.random.choice(6, s, replace=True, p=p).shape, s)
        assert_(np.random.choice(6, s, replace=False, p=p).shape, s)
        assert_(np.random.choice(np.arange(6), s, replace=True).shape, s)
Example #10
0
    def dt(self, it, ir=None):
        """Get the post-processor time step.

        :param it: Time index (may be ignored if implosions have a constant time step)
        :param ir: Optional since dt is the same for all spatial zones. However, this gives the option of getting a
            2-D array as the return value, which is convenient for some calculations.
        :returns: The delta in time between it and it+1 [s]
        """
        # Case where no ir is specified:
        if ir is None:
            assert np.isscalar(it) or (isinstance(it, tuple) and len(it)==2)
            if np.isscalar(it):
                return self.dtRaw[it,0]
            else:
                return self.dtRaw[it[0]:it[1], 0]

        # Both it and ir are specified:
        it, ir = self.__internalIndex__(it, ir)

        # Handle scalar it:
        if np.isscalar(it) and np.isscalar(ir):
            return self.dtRaw[it, ir]

        # Handle array:
        return self.dtRaw[it[0]:it[1], ir[0]:ir[1]]
Example #11
0
    def f(self, it, ir, A, Z):
        """Get fraction of a specified ion in a zone.

        :param it: The temporal index
        :param ir: The spatial index
        :param A: The ion mass you're interested in (scalar)
        :param Z: The ion atomic number you're interested in (scalar)
        :returns: Fraction of that ion in a zone
        """
        it, ir = self.__internalIndex__(it, ir)

        # scalars:
        if np.isscalar(it) and np.isscalar(ir):
            Azone = self.IonA(it, ir)
            Zzone = self.IonZ(it, ir)
            for i in range(len(Azone)):
                if A == Azone[i] and Z == Zzone[i]:
                    return self.IonF(it,ir)[i]
            return 0.

        # Truth values, for if a given entry corresponds to the ion we want
        shape = (it[1]-it[0], ir[1]-ir[0], len(self.IonARaw[0][0]))
        testA = np.ones(shape, dtype=np.float) * A
        testZ = np.ones(shape, dtype=np.float) * Z
        truthA = np.equal(testA, self.IonARaw)
        truthZ = np.equal(testZ, self.IonZRaw)
        return np.sum(self.IonFRaw*truthA*truthZ, axis=2)
Example #12
0
    def proxy_func(a, b, *args, **kwargs):
        context = determine_context(a, b)
        is_a_dap = isinstance(a, DistArray)
        is_b_dap = isinstance(b, DistArray)
        if is_a_dap and is_b_dap:
            a_key = a.key
            b_key = b.key
            distribution = a.distribution
        elif is_a_dap and numpy.isscalar(b):
            a_key = a.key
            b_key = context._key_and_push(b)[0]
            distribution = a.distribution
        elif is_b_dap and numpy.isscalar(a):
            a_key = context._key_and_push(a)[0]
            b_key = b.key
            distribution = b.distribution
        else:
            raise TypeError('only DistArray or scalars are accepted')
        new_key = context._generate_key()

        if 'casting' in kwargs:
            exec_str = "%s = distarray.local.%s(%s,%s, casting='%s')"
            exec_str %= (new_key, name, a_key, b_key, kwargs['casting'])
        else:
            exec_str = '%s = distarray.local.%s(%s,%s)'
            exec_str %= (new_key, name, a_key, b_key)

        context._execute(exec_str)
        return DistArray.from_localarrays(new_key, distribution=distribution)
Example #13
0
    def __call__(self,EGeV,**kwargs):
	"""
	Compute optical depth for one combination of line energy and column density. 
	
	Parameters
	----------
	EGeV:	m-dim array, photon energies, in GeV

	kwargs
	------
	Elines: n-dim array, line energy, in eV
	Nlines: n-dim array,  log 10 column density, in cm^-2
	z:	float, redshift

	Returns
	-------
	Optical depth as (n x m)-dim array
	"""

	if np.isscalar(EGeV):
	    E = np.array([EGeV])
	kwargs.setdefault('Elines',self.Elines)
	kwargs.setdefault('Nlines',self.Nlines)
	if np.isscalar(kwargs['Elines']):
	    kwargs['Elines'] = np.array(kwargs['Elines'])
	if np.isscalar(kwargs['Nlines']):
	    kwargs['Nlines'] = np.array(kwargs['Nlines'])
	if not len(kwargs['Nlines']) == len(kwargs['Elines']):
	    raise ValueError("Nlines and Elines do not match. Shapes: {0}, {1}".format(
		kwargs['Nlines'].shape, kwargs['Elines'].shape
		))
    	return self.__calctau(EGeV,**kwargs).T
Example #14
0
def mathieu_odd_coef(m,q):
    """Compute expansion coefficients for even mathieu functions and
    modified mathieu functions.
    """
    if not (isscalar(m) and isscalar(q)):
        raise ValueError("m and q must be scalars.")
    if (q < 0):
        raise ValueError("q >=0")
    if (m != floor(m)) or (m<=0):
        raise ValueError("m must be an integer > 0")

    if (q <= 1):
        qm = 7.5+56.1*sqrt(q)-134.7*q+90.7*sqrt(q)*q
    else:
        qm=17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q
    km = int(qm+0.5*m)
    if km > 251:
        print "Warning, too many predicted coefficients."
    kd = 4
    m = int(floor(m))
    if m % 2:
        kd = 3

    b = mathieu_b(m,q)
    fc = specfun.fcoef(kd,m,q,b)
    return fc[:km]
Example #15
0
 def __init__(self, endog, exog, constr, param=0., sigma=None):
     N, Q = exog.shape
     constr = np.asarray(constr)
     if constr.ndim == 1:
         K, P = 1, constr.shape[0]
     else:
         K, P = constr.shape
     if Q != P:
         raise Exception('Constraints and design do not align')
     self.ncoeffs = Q
     self.nconstraint = K
     self.constraint = constr
     if np.isscalar(param) and K > 1:
         param = np.ones((K,)) * param
     self.param = param
     if sigma is None:
         sigma = 1.
     if np.isscalar(sigma):
         sigma = np.ones(N) * sigma
     sigma = np.squeeze(sigma)
     if sigma.ndim == 1:
         self.sigma = np.diag(sigma)
         self.cholsigmainv = np.diag(np.sqrt(sigma))
     else:
         self.sigma = sigma
         self.cholsigmainv = np.linalg.cholesky(np.linalg.pinv(self.sigma)).T
     super(GLS, self).__init__(endog, exog)
Example #16
0
    def evaluate(self, x, y, flux, x_0, y_0):
        """
        Evaluate the `GriddedPSFModel` for the input parameters.
        """

        # NOTE: this is needed because the PSF photometry routines input
        # length-1 values instead of scalars.  TODO: fix the photometry
        # routines.
        if not np.isscalar(x_0):
            x_0 = x_0[0]
        if not np.isscalar(y_0):
            y_0 = y_0[0]

        if (x_0 < self._xgrid_min or x_0 > self._xgrid_max or
                y_0 < self._ygrid_min or y_0 > self._ygrid_max):

            # position is outside of the grid, so simply use the
            # closest reference PSF
            self._ref_indices = np.argsort(np.hypot(self._grid_xpos - x_0,
                                                    self._grid_ypos - y_0))[0]
            self._psf_interp = self.data[self._ref_indices, :, :]
        else:
            # find the four bounding reference PSFs and interpolate
            self._ref_indices = self._find_bounding_points(x_0, y_0)
            xyref = np.array(self.grid_xypos)[self._ref_indices]
            psfs = self.data[self._ref_indices, :, :]

            self._psf_interp = self._bilinear_interp(xyref, psfs, x_0, y_0)

        # now evaluate the PSF at the (x_0, y_0) subpixel position on
        # the input (x, y) values
        psfmodel = FittableImageModel(self._psf_interp,
                                      oversampling=self.oversampling)

        return psfmodel.evaluate(x, y, flux, x_0, y_0)
Example #17
0
    def setData(self, array, var=fname, dtype=field.dtype,
                ndim=field.ndim, none=field.none, flags=field.flags):
        if array is None:
            self._data.pop(var, None)
        else:
            if np.isscalar(array):
                self._data[var][:] = array
            else:
                if self._n_atoms == 0:
                    self._n_atoms = len(array)
                elif len(array) != self._n_atoms:
                    raise ValueError('length of array must match number '
                                    'of atoms')

                if not np.isscalar(array):
                    array = np.asarray(array, dtype)
                else:
                    raise TypeError('array must be an ndarray or a list')

                if array.ndim != ndim:
                    raise ValueError('array must be {0} '
                                    'dimensional'.format(ndim))
                elif array.dtype != dtype:
                    try:
                        array = array.astype(dtype)
                    except ValueError:
                        raise ValueError('array cannot be assigned type '
                                        '{0}'.format(dtype))
                self._data[var] = array
                if none: self._none(none)
                if flags and self._flags:
                    self._resetFlags(var)
Example #18
0
    def n_array(self,z,e):
	"""
	Returns EBL photon density in [1 / cm^3 / eV] for redshift z and energy e (eV) from BSpline Interpolation

	Parameters
	----------
	z: redshift
	    scalar or N-dim numpy array
	e: energy in eV 
	    scalar or M-dim numpy array

	Returns
	-------
	(N x M)-np.array with corresponding photon density values

	Notes
	-----
	if any z < self.z (from interpolation table), self.z[0] is used and RuntimeWarning is issued.
	"""
	if np.isscalar(e):
	    e = np.array([e])
	if np.isscalar(z):
	    z = np.array([z])

	# convert energy in eV to wavelength in micron
	l	=  SI_h * SI_c / e / SI_e  * 1e6	
	# convert energy in J
	e_J	= e * SI_e

	n = self.ebl_array(z,l)
	# convert nuInu to photon density in 1 / J / m^3
	n = 4.*PI / SI_c / e_J**2. * n  * 1e-9
	# convert photon density in 1 / eV / cm^3 and return
	return n * SI_e * 1e-6
    def _construct(self,el,mp,n):

        m = el.shape[0]-1
        G = opt.spmatrix(0.0,[],[],(m+1,n))
        h = opt.matrix(0.0,(m+1,1))

        # y
        y = el[m,0]
        if np.isscalar(y):
            h[0,0] = y*1.
        elif type(y) is cvxpy_obj:
            h[0,0] = y.value*1.
        else:
            G[0,mp[y]]=-1.

        # x
        for i in range(0,m,1):
            x = el[i,0]
            if np.isscalar(x):
                h[i+1,0] = x*1.
            elif type(x) is cvxpy_obj:
                h[i+1,0] = x.value*1.
            else:
                G[i+1,mp[x]] = -1.
        
        # Return G,h
        return G,h,m+1
Example #20
0
def add_jitter(psr, ecorr ,flagid=None, flags=None, coarsegrain=0.1,
               seed=None):
    """Add correlated quadrature noise of rms `ecorr` [s],
    with coarse-graining time `coarsegrain` [days].
    Optionally take a pseudorandom-number-generator seed."""
    
    if seed is not None:
        N.random.seed(seed)
    
    if flags is None:
        t, U = quantize_fast(N.array(psr.toas(),'d'), dt=coarsegrain)
    elif flags is not None and flagid is not None:
        t, f, U = quantize_fast(N.array(psr.toas(),'d'),
                                N.array(psr.flagvals(flagid)),
                                dt=coarsegrain)

    # default jitter value
    ecorrvec = N.zeros(len(t))
    
    # check that jitter is scalar if flags is None
    if flags is None:
        if not N.isscalar(ecorr):
            raise ValueError('ERROR: If flags is None, jitter must be a scalar')
        else:
            ecorrvec = N.ones(len(t)) * ecorr

    if flags is not None and flagid is not None and not N.isscalar(ecorr):
        if len(ecorr) == len(flags):
            for ct, flag in enumerate(flags):
                ind = flag == N.array(f)
                ecorrvec[ind] = ecorr[ct]

    psr.stoas[:] += (1 / day) * N.dot(U*ecorrvec, N.random.randn(U.shape[1]))
 def within_tol(x, y, atol, rtol):
     with np.errstate(invalid='ignore'):
         result = np.less_equal(abs(x-y), atol + rtol * abs(y))
         if np.isscalar(a) and np.isscalar(b):
             result = bool(result)
             return result
         x = np.array(a, copy=False, subok=True, ndmin=1)
         y = np.array(b, copy=False, subok=True, ndmin=1)
         xfin = np.isfinite(x)
         yfin = np.isfinite(y)
         if all(xfin) and all(yfin):
             return within_tol(x, y, atol, rtol)
         else:
             finite = xfin & yfin
             cond = np.zeros_like(finite, subok=True)
             # Because we're using boolean indexing, x & y must be the
             # same shape.
             # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's
             # in
             # lib.stride_tricks, though, so we can't import it
             # here.SubmodularWordCoverageCostRatiox = x *
             # ones_like(cond)
             y = y * np.ones_like(cond)
             # Avoid subtraction with infinite/nan values...
             cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
             # Check for equality of infinite values...
             cond[~finite] = (x[~finite] == y[~finite])
             if equal_nan:
                 # Make NaN == NaN
                 both_nan = np.isnan(x) & np.isnan(y)
                 cond[both_nan] = both_nan[both_nan]
                 return cond
Example #22
0
def blackbody_wn_rad2temp(wavenumber, radiance):
    """Derive brightness temperatures from radiance using the Planck 
    function. Wavenumber space"""

    if np.isscalar(radiance):
        rad = np.array([radiance, ], dtype='float64')
    else:
        rad = np.array(radiance, dtype='float64')
    if np.isscalar(wavenumber):
        wavnum = np.array([wavenumber, ], dtype='float64')
    else:
        wavnum = np.array(wavenumber, dtype='float64')

    const1 = H_PLANCK * C_SPEED / K_BOLTZMANN
    const2 = 2 * H_PLANCK * C_SPEED**2
    res = const1 * wavnum / np.log(np.divide(const2 * wavnum**3, rad) + 1.0)

    shape = rad.shape
    resshape = res.shape

    if wavnum.shape[0] == 1:
        if rad.shape[0] == 1:
            return res[0]
        else:
            return res[::].reshape(shape)
    else:
        if rad.shape[0] == 1:
            return res[0, :]
        else:
            if len(shape) == 1:
                return np.reshape(res, (shape[0], resshape[1]))
            else:
                return np.reshape(res, (shape[0], shape[1], resshape[1]))
def oneProjector(b,d=[],tau=-1):

    if tau==-1:
        if not d:
            print('ERROR: oneProjector requires at least two input parameters')
            return
        tau = d
        d   = []
        
    #print d    
    #if not d:
    #    d=1

    if not np.isscalar(d) and np.size(b) != np.size(d):
        print('ERROR: oneProjector: Vectors b and d must have the same length')
        return

    if np.isscalar(d) and d == 0:
        return b.copy()

    s = np.sign(b)
    b = abs(b)

    if np.isscalar(d):
        x = oneProjectorMex(b,tau/d);
    else:
        d   = abs(d)
        idx = np.where(d > np.spacing(1))
        x   = b.copy()
        x[idx] = oneProjectorMex(b[idx],d[idx],tau)

    return x*s
Example #24
0
    def __init__(self, low, high, shape=None):
        """
        Constructor.

        Args:
            low ([float, np.ndarray]): the minimum value of each dimension of
                the space. If a scalar value is provided, this value is
                considered as the minimum one for each dimension. If a
                np.ndarray is provided, each i-th element is considered the
                minimum value of the i-th dimension;
            high ([float, np.ndarray]): the maximum value of dimensions of the
                space. If a scalar value is provided, this value is considered
                as the maximum one for each dimension. If a np.ndarray is
                provided, each i-th element is considered the maximum value
                of the i-th dimension;
            shape (np.ndarray, None): the dimension of the space. Must match
                the shape of ``low`` and ``high``, if they are np.ndarray.

        """
        if shape is None:
            self._low = low
            self._high = high
            self._shape = low.shape
        else:
            self._low = low
            self._high = high
            self._shape = shape
            if np.isscalar(low) and np.isscalar(high):
                self._low += np.zeros(shape)
                self._high += np.zeros(shape)

        assert self._low.shape == self._high.shape
Example #25
0
    def groupby(cls, dataset, dim_names, container_type, group_type, **kwargs):
        # Get dimensions information
        dimensions = [dataset.get_dimension(d, strict=True) for d in dim_names]
        if 'kdims' in kwargs:
            kdims = kwargs['kdims']
        else:
            kdims = [kdim for kdim in dataset.kdims if kdim not in dimensions]
            kwargs['kdims'] = kdims

        invalid = [d for d in dimensions if dataset.data[d.name].ndim > 1]
        if invalid:
            if len(invalid) == 1: invalid = "'%s'" % invalid[0]
            raise ValueError("Cannot groupby irregularly sampled dimension(s) %s."
                             % invalid)

        # Update the kwargs appropriately for Element group types
        group_kwargs = {}
        group_type = dict if group_type == 'raw' else group_type
        if issubclass(group_type, Element):
            group_kwargs.update(util.get_param_values(dataset))
        else:
            kwargs.pop('kdims')
        group_kwargs.update(kwargs)

        drop_dim = any(d not in group_kwargs['kdims'] for d in kdims)

        # Find all the keys along supplied dimensions
        keys = [cls.coords(dataset, d.name) for d in dimensions]
        transpose = [dataset.ndims-dataset.kdims.index(kd)-1 for kd in kdims]
        transpose += [i for i in range(dataset.ndims) if i not in transpose]

        # Iterate over the unique entries applying selection masks
        grouped_data = []
        for unique_key in zip(*util.cartesian_product(keys)):
            select = dict(zip(dim_names, unique_key))
            if drop_dim:
                group_data = dataset.select(**select)
                group_data = group_data if np.isscalar(group_data) else group_data.columns()
            else:
                group_data = cls.select(dataset, **select)

            if np.isscalar(group_data) or (isinstance(group_data, get_array_types()) and group_data.shape == ()):
                group_data = {dataset.vdims[0].name: np.atleast_1d(group_data)}
                for dim, v in zip(dim_names, unique_key):
                    group_data[dim] = np.atleast_1d(v)
            elif not drop_dim:
                if isinstance(group_data, get_array_types()):
                    group_data = {dataset.vdims[0].name: group_data}
                for vdim in dataset.vdims:
                    data = group_data[vdim.name]
                    data = data.transpose(transpose[::-1])
                    group_data[vdim.name] = np.squeeze(data)
            group_data = group_type(group_data, **group_kwargs)
            grouped_data.append((tuple(unique_key), group_data))

        if issubclass(container_type, NdMapping):
            with item_check(False):
                return container_type(grouped_data, kdims=dimensions)
        else:
            return container_type(grouped_data)
Example #26
0
 def dist(x, y):
     if isscalar(x) and isscalar(y):
         return abs(x-y)
     elif isinstance(x, ndarray):
         return linalg.norm(x-y)
     else:
         pass # throw an exception
Example #27
0
    def __getitem__(self, index):
        """Return the element(s) index=(i, j), where j may be a slice.
        This always returns a copy for consistency, since slices into
        Python lists return copies.
        """
        try:
            i, j = index
        except (AssertionError, TypeError):
            raise IndexError('invalid index')

        if np.isscalar(i):
            if np.isscalar(j):
                return self._get1(i, j)
            if isinstance(j, slice):
                j = self._slicetoseq(j, self.shape[1])
            if issequence(j):
                return self.__class__([[self._get1(i, jj) for jj in j]])
        elif issequence(i) and issequence(j):
            return self.__class__([[self._get1(ii, jj) for (ii, jj) in zip(i, j)]])
        elif issequence(i) or isinstance(i, slice):
            if isinstance(i, slice):
                i = self._slicetoseq(i, self.shape[0])
            if np.isscalar(j):
                return self.__class__([[self._get1(ii, j)] for ii in i])
            if isinstance(j, slice):
                j = self._slicetoseq(j, self.shape[1])
            if issequence(j):
                return self.__class__([[self._get1(ii, jj) for jj in j] for ii in i])
        else:
            raise IndexError
def angle_difference(angle1, angle2, return_abs = False):
    if np.isscalar(angle1) and np.isscalar(angle2):
        diff = angle_difference_scalar(angle1, angle2)
    else:
        diff = angle_difference_vector(angle1, angle2)

    return diff if return_abs == False else np.abs(diff)
Example #29
0
    def add(self, d):
        """Adds a dictionary of scalars.

        Args:
            d (dict): Dictionary of scalars to accumulate. Only elements of
               scalars, zero-dimensional arrays, and variables of
               zero-dimensional arrays are accumulated. When the value
               is a tuple, the second element is interpreted as a weight.

        """
        summaries = self._summaries
        for k, v in six.iteritems(d):
            w = 1
            if isinstance(v, tuple):
                w = v[1]
                v = v[0]
                if isinstance(w, variable.Variable):
                    w = w.array
                if not numpy.isscalar(w) and not getattr(w, 'ndim', -1) == 0:
                    raise ValueError(
                        'Given weight to {} was not scalar.'.format(k))
            if isinstance(v, variable.Variable):
                v = v.array
            if numpy.isscalar(v) or getattr(v, 'ndim', -1) == 0:
                summaries[k].add(v, weight=w)
Example #30
0
    def __init__(self, lin_op, alpha=1.0, beta=1.0, b=0.0, c=0.0,
                 gamma=0.0, d=0.0, implem=None):
        # Error checking.
        for elem, name in zip([b, c], ["b", "c"]):
            if not (np.isscalar(elem) or elem.shape == lin_op.shape):
                raise Exception("Invalid dimensions of %s." % name)
        for elem, name in zip([alpha, gamma, d], ["alpha", "gamma"]):
            if not np.isscalar(elem) or elem < 0:
                raise Exception("%s must be a nonnegative scalar." % name)
        for elem, name in zip([beta, d], ["beta", "d"]):
            if not np.isscalar(elem):
                raise Exception("%s must be a scalar." % name)

        self.implem_key = implem
        self.implementation = Impl['numpy']
        if implem is not None:
            self.set_implementation(implem)

        self.lin_op = lin_op
        self.alpha = float(alpha)
        self.beta = float(beta)
        self.b = b
        self.c = c
        if np.isscalar(b):
            self.b = b * np.ones(self.lin_op.shape)
        if np.isscalar(c):
            self.c = c * np.ones(self.lin_op.shape)
        self.gamma = float(gamma)
        self.d = float(d)
        self.init_tmps()
        self.kernel_cuda_prox = None
        super(ProxFn, self).__init__()
Example #31
0
def combined_shape(length, shape=None):
    if shape is None:
        return (length, )
    return (length, shape) if np.isscalar(shape) else (length, *shape)
Example #32
0
def dvts_resamp(file, dirOut, RESAMP, SECTOR=None, overwrite=True):
    """ Resample TESS dv time series file and save as h5d format
        resamp - Resample factor just make it odd okay"""

    hdulist = fits.open(file)
    prihdr = hdulist[0].header
    nTces = prihdr['NUMTCES']

    dataSpanMax = 0.0
    # Get header information that we should keep
    if not SECTOR is None:
        keepprihdr = ['TICID','RA_OBJ', \
                  'DEC_OBJ','PMRA','PMDEC','PMTOTAL','TESSMAG','TEFF', \
                  'LOGG','RADIUS']
        formatprihdr = [np.uint32, \
                    float,float,float,float,float, \
                    float,float,float,float,float]
    else:
        keepprihdr = ['TICID','SECTOR','PXTABLE','RA_OBJ', \
                      'DEC_OBJ','PMRA','PMDEC','PMTOTAL','TESSMAG','TEFF', \
                      'LOGG','RADIUS']
        formatprihdr = [np.uint32, int, int, \
                        float,float,float,float,float, \
                        float,float,float,float,float]

    # make empty arrays
    kpCadenceNo = np.array([0], dtype=int)
    kpTimetbjd = np.array([0.0], dtype=float)
    kpQuality = np.array([0], dtype=int)
    kpPDC = np.array([0.0], dtype=float)
    for ii in range(nTces):
        # Check if already done
        epic = hdulist[0].header['TICID']
        if SECTOR is None:
            sec = hdulist[0].header['SECTOR']
        else:
            sec = SECTOR
        pn = ii + 1
        fileoutput = os.path.join(
            make_data_dirs(dirOut, sec, epic),
            'tess_dvts_{0:016d}_{1:02d}.h5d'.format(epic, pn))
        fileExists = os.path.isfile(fileoutput)
        if (not fileExists) or overwrite:

            extname = 'TCE_{0:d}'.format(ii + 1)
            arr = hdulist[extname].data['TIME']
            nImage = len(arr)

            cadenceNo = hdulist[extname].data['CADENCENO']
            kpCadenceNo = cadenceNo
            timetbjd = hdulist[extname].data['TIME']
            kpTimetbjd = timetbjd
            lc_init = hdulist[extname].data['LC_INIT']
            lc_init_err = hdulist[extname].data['LC_INIT_ERR']
            lc_white = hdulist[extname].data['LC_WHITE']
            lc_med_detrend = hdulist[extname].data['LC_DETREND']
            lc_model = hdulist[extname].data['MODEL_INIT']
            lc_white_model = hdulist[extname].data['MODEL_WHITE']
            lc_phase = hdulist[extname].data['PHASE']
            pdc_flux = hdulist['statistics'].data['PDCSAP_FLUX']
            pdc_flux_err = hdulist['statistics'].data['PDCSAP_FLUX_ERR']
            deweights = hdulist['statistics'].data['DEWEIGHTS']
            kpQuality = hdulist['statistics'].data['QUALITY']
            kpPDC = hdulist['statistics'].data['PDCSAP_FLUX']

            # Fix the issue of having times close to zero
            # First get time stamps on valid data
            idx = np.where((np.isfinite(timetbjd)) & (np.isfinite(lc_init))
                           & (np.isfinite(pdc_flux)))[0]
            # minimum time on valid data
            minGdTime = np.min(timetbjd[idx])
            idx = np.where(timetbjd < minGdTime - 30.0)[0]
            timetbjd[idx] = np.nan

            newNImage = int(np.floor(nImage / RESAMP))
            oldNImage = newNImage * RESAMP
            # trim off the excess images not integral into resamp
            idx = np.arange(0, oldNImage)
            cadenceNo, timetbjd, lc_init, lc_init_err, lc_white, lc_med_detrend, \
                lc_model, lc_white_model, lc_phase, pdc_flux, pdc_flux_err, \
                deweights = idx_filter(idx, cadenceNo, timetbjd, lc_init, lc_init_err, lc_white, lc_med_detrend, \
                                       lc_model, lc_white_model, lc_phase, pdc_flux, pdc_flux_err, \
                                       deweights)

            # Do downsampling of data stream
            cadenceNoBeg = np.min(np.reshape(cadenceNo, (newNImage, RESAMP)),
                                  axis=1)
            cadenceNoEnd = np.max(np.reshape(cadenceNo, (newNImage, RESAMP)),
                                  axis=1)
            cadenceNo = np.mean(np.reshape(cadenceNo, (newNImage, RESAMP)),
                                axis=1,
                                dtype=int)
            timetbjd = np.mean(np.reshape(timetbjd, (newNImage, RESAMP)),
                               axis=1)
            lc_init = np.mean(np.reshape(lc_init, (newNImage, RESAMP)), axis=1)
            lc_init_err = np.mean(np.reshape(lc_init_err, (newNImage, RESAMP)),
                                  axis=1)
            lc_white = np.mean(np.reshape(lc_white, (newNImage, RESAMP)),
                               axis=1)
            lc_med_detrend = np.mean(np.reshape(lc_med_detrend,
                                                (newNImage, RESAMP)),
                                     axis=1)
            lc_model = np.mean(np.reshape(lc_model, (newNImage, RESAMP)),
                               axis=1)
            lc_white_model = np.mean(np.reshape(lc_white_model,
                                                (newNImage, RESAMP)),
                                     axis=1)
            lc_phase = np.mean(np.reshape(lc_phase, (newNImage, RESAMP)),
                               axis=1)
            pdc_flux = np.sum(np.reshape(pdc_flux, (newNImage, RESAMP)),
                              axis=1)
            pdc_flux_err = np.mean(np.reshape(pdc_flux_err,
                                              (newNImage, RESAMP)),
                                   axis=1)
            deweights = np.mean(np.reshape(deweights, (newNImage, RESAMP)),
                                axis=1)

            # Identify data that is missing or NaN
            idx = np.where((np.isfinite(timetbjd)) & (np.isfinite(lc_init))
                           & (np.isfinite(pdc_flux)))[0]
            valid_data_flag = np.zeros((newNImage, ), dtype=np.bool_)
            valid_data_flag[idx] = True

            # Trim all leading in-valid data
            if not valid_data_flag[0]:
                idx = np.where(valid_data_flag)[0]
                if not len(idx) == 0:
                    idx = idx[0]
                    cadenceNoBeg = cadenceNoBeg[idx:]
                    cadenceNoEnd = cadenceNoEnd[idx:]
                    cadenceNo = cadenceNo[idx:]
                    timetbjd = timetbjd[idx:]
                    lc_init = lc_init[idx:]
                    lc_init_err = lc_init_err[idx:]
                    lc_white = lc_white[idx:]
                    lc_med_detrend = lc_med_detrend[idx:]
                    lc_model = lc_model[idx:]
                    lc_white_model = lc_white_model[idx:]
                    lc_phase = lc_phase[idx:]
                    pdc_flux = pdc_flux[idx:]
                    pdc_flux_err = pdc_flux_err[idx:]
                    deweights = deweights[idx:]
                    valid_data_flag = valid_data_flag[idx:]
                else:
                    print('No Valid data? {0:d} {1:d}'.format(ii, epic))

            # Get the data span in days for use in federation steps
            idx = np.where(valid_data_flag == True)[0]
            if (len(idx) > 0):
                dataSpan = np.max(timetbjd[idx]) - np.min(timetbjd[idx])
                if dataSpan > dataSpanMax:
                    dataSpanMax = dataSpan

            # Now save data as h5py
            f = h5py.File(fileoutput, 'w')
            tmp = f.create_dataset('cadenceNo',
                                   data=cadenceNo,
                                   compression='gzip')
            tmp = f.create_dataset('cadenceNoBeg',
                                   data=cadenceNoBeg,
                                   compression='gzip')
            tmp = f.create_dataset('cadenceNoEnd',
                                   data=cadenceNoEnd,
                                   compression='gzip')
            tmp = f.create_dataset('timetbjd',
                                   data=timetbjd,
                                   compression='gzip')
            tmp = f.create_dataset('lc_init', data=lc_init, compression='gzip')
            tmp = f.create_dataset('lc_init_err',
                                   data=lc_init_err,
                                   compression='gzip')
            tmp = f.create_dataset('lc_white',
                                   data=lc_white,
                                   compression='gzip')
            tmp = f.create_dataset('lc_med_detrend',
                                   data=lc_med_detrend,
                                   compression='gzip')
            tmp = f.create_dataset('lc_model',
                                   data=lc_model,
                                   compression='gzip')
            tmp = f.create_dataset('lc_white_model',
                                   data=lc_white_model,
                                   compression='gzip')
            tmp = f.create_dataset('lc_phase',
                                   data=lc_phase,
                                   compression='gzip')
            tmp = f.create_dataset('pdc_flux',
                                   data=pdc_flux,
                                   compression='gzip')
            tmp = f.create_dataset('pdc_flux_err',
                                   data=pdc_flux_err,
                                   compression='gzip')
            tmp = f.create_dataset('deweights',
                                   data=deweights,
                                   compression='gzip')
            tmp = f.create_dataset('valid_data_flag',
                                   data=valid_data_flag,
                                   compression='gzip')
            for i in range(len(keepprihdr)):
                curval = hdulist[0].header[keepprihdr[i]]
                if np.isscalar(curval):
                    tmp = f.create_dataset(
                        keepprihdr[i],
                        data=np.array([hdulist[0].header[keepprihdr[i]]],
                                      dtype=formatprihdr[i]))
                else:
                    tmp = f.create_dataset(keepprihdr[i],
                                           data=np.array(
                                               [-1], dtype=formatprihdr[i]))

            f.close()
    return dataSpanMax, kpCadenceNo, kpTimetbjd, kpQuality, kpPDC
Example #33
0
def __lt__(self, value):
    "Returns a boolean array with < status for all elements"
    if np.isscalar(value):
        return self.get_local() < value
    else:
        return self.get_local() < value.get_local()
 def toCPPText(self, mat):
     if (np.isscalar(mat)):
         return str(mat)
     text = "{" + ",".join([self.toCPPText(x) for x in mat]) + "}"
     return text
Example #35
0
 def set_channel_offsets(self, offsets, channel_ids=None):
     if np.isscalar(offsets):
         offsets = [offsets] * self.get_num_channels()
     self.set_property('offset_to_uV', offsets, ids=channel_ids)
Example #36
0
 def set_channel_gains(self, gains, channel_ids=None):
     if np.isscalar(gains):
         gains = [gains] * self.get_num_channels()
     self.set_property('gain_to_uV', gains, ids=channel_ids)
Example #37
0
    def __init__(self,
                 first,
                 poles,
                 second=None,
                 Nmesh=None,
                 kmin=0.,
                 dk=None,
                 use_fkp_weights=None,
                 P0_FKP=None):

        if use_fkp_weights is not None or P0_FKP is not None:
            raise ValueError(
                "use_fkp_weights and P0_FKP are deprecated. Assign a FKPWeight column to source['randoms']['FKPWeight'] and source['data']['FKPWeight'] with the help of the FKPWeightFromNbar(nbar) function"
            )

        first = _cast_mesh(first, Nmesh=Nmesh)
        if second is not None:
            second = _cast_mesh(second, Nmesh=Nmesh)
        else:
            second = first

        # data/randoms of second must be same as second
        # only difference can be FKP weight currently
        if not is_valid_crosscorr(first, second):
            msg = (
                "ConvolvedFFTPower cross-correlations currently require the same"
                " FKPCatalog (data/randoms), such that only the weight column can vary"
            )
            raise NotImplementedError(msg)

        self.first = first
        self.second = second

        # grab comm from first source
        self.comm = first.comm

        # check for comm mismatch
        assert second.comm is first.comm, "communicator mismatch between input sources"

        # make a box big enough for both catalogs if they are not equal
        # NOTE: both first/second must have the same BoxCenter to recenter Position
        if not numpy.array_equal(first.attrs['BoxSize'],
                                 second.attrs['BoxSize']):

            # stack box coordinates together
            joint = {}
            for name in ['BoxSize', 'BoxCenter']:
                joint[name] = numpy.vstack(
                    [first.attrs[name], second.attrs[name]])

            # determine max box length along each dimension
            argmax = numpy.argmax(joint['BoxSize'], axis=0)
            joint['BoxSize'] = joint['BoxSize'][argmax, [0, 1, 2]]
            joint['BoxCenter'] = joint['BoxCenter'][argmax, [0, 1, 2]]

            # re-center the box
            first.recenter_box(joint['BoxSize'], joint['BoxCenter'])
            second.recenter_box(joint['BoxSize'], joint['BoxCenter'])

        # make a list of multipole numbers
        if numpy.isscalar(poles):
            poles = [poles]

        # store meta-data
        self.attrs = {}
        self.attrs['poles'] = poles
        self.attrs['dk'] = dk
        self.attrs['kmin'] = kmin

        # store BoxSize and BoxCenter from source
        self.attrs['Nmesh'] = self.first.attrs['Nmesh'].copy()
        self.attrs['BoxSize'] = self.first.attrs['BoxSize']
        self.attrs['BoxPad'] = self.first.attrs['BoxPad']
        self.attrs['BoxCenter'] = self.first.attrs['BoxCenter']

        # grab some mesh attrs, too
        self.attrs['mesh.resampler'] = self.first.resampler
        self.attrs['mesh.interlaced'] = self.first.interlaced

        # and run
        self.run()
Example #38
0
def peakdet(v, delta, x=None):
    """
	Converted from MATLAB script at http://billauer.co.il/peakdet.html

	Returns two arrays

	function [maxtab, mintab]=peakdet(v, delta, x)
	%PEAKDET Detect peaks in a vector
	%        [MAXTAB, MINTAB] = PEAKDET(V, DELTA) finds the local
	%        maxima and minima ("peaks") in the vector V.
	%        MAXTAB and MINTAB consists of two columns. Column 1
	%        contains indices in V, and column 2 the found values.
	%
	%        With [MAXTAB, MINTAB] = PEAKDET(V, DELTA, X) the indices
	%        in MAXTAB and MINTAB are replaced with the corresponding
	%        X-values.
	%
	%        A point is considered a maximum peak if it has the maximal
	%        value, and was preceded (to the left) by a value lower by
	%        DELTA.

	% Eli Billauer, 3.4.05 (Explicitly not copyrighted).
	% This function is released to the public domain; Any use is allowed.

	"""
    maxtab = []
    mintab = []

    if x is None:
        x = arange(len(v))

    v = asarray(v)

    if len(v) != len(x):
        sys.exit('Input vectors v and x must have same length')

    if not isscalar(delta):
        sys.exit('Input argument delta must be a scalar')

    if delta <= 0:
        sys.exit('Input argument delta must be positive')

    mn, mx = Inf, -Inf
    mnpos, mxpos = NaN, NaN

    lookformax = True

    for i in arange(len(v)):
        this = v[i]
        if this > mx:
            mx = this
            mxpos = x[i]
        if this < mn:
            mn = this
            mnpos = x[i]

        if lookformax:
            if this < mx - delta:
                maxtab.append((mxpos, mx))
                mn = this
                mnpos = x[i]
                lookformax = False
        else:
            if this > mn + delta:
                mintab.append((mnpos, mn))
                mx = this
                mxpos = x[i]
                lookformax = True

    return array(maxtab), array(mintab)
Example #39
0
def run_evolution_strategies(trainer, result):
    warn_about_bad_reward_scales(trainer, result)  # original function

    if not hasattr(trainer, "update_policy_counter"):
        # This should be the first iter?
        trainer.update_policy_counter = 1
        trainer._last_update_weights = get_flat(trainer.get_policy("agent0"))
        trainer._es_optimizer = Adam(trainer._last_update_weights.size)
        logger.info(
            "First run of ES module. Setup counter, weights and optimizer.")

    rewards = result['policy_reward_mean']
    steps = result['info']['num_steps_trained']

    for rk, r in rewards.items():
        assert np.isscalar(r), \
            "Invalid reward happen! Should we skip this update?"

    update_steps = trainer.config['update_steps']
    if update_steps == "baseline":
        # Never enter the ES synchronization if set update_steps to baseline.
        update_steps = float('+inf')
    else:
        assert isinstance(update_steps, int)

    if steps > update_steps * trainer.update_policy_counter:
        best_agent = max(rewards, key=lambda x: rewards[x])
        returns = np.array(list(rewards.values()))
        proc_noisy_returns = utils.compute_centered_ranks(returns)
        weights_diff = {}
        for pid, p in trainer.workers.local_worker().policy_map.items():
            weights_diff[pid] = get_flat(p) - trainer._last_update_weights

        # Compute and take a step.
        g, count = utils.batched_weighted_sum(
            proc_noisy_returns, (weights_diff[pid] for pid in rewards.keys()),
            batch_size=500)  # batch_size 500 always greater # of policy 10
        g /= returns.size

        # Compute the new weights theta.
        theta = trainer._last_update_weights  # Old weights
        new_theta, update_ratio = trainer._es_optimizer.update(
            -g + 0.005 * theta, theta)
        theta_id = ray.put(new_theta)

        def _spawn_policy(policy, policy_id):
            new_weights = ray.get(theta_id)
            policy._variables.set_flat(new_weights)
            logger.debug("In ES updates {} sync {}.".format(
                trainer.update_policy_counter, policy_id))

        # set to policies on local worker. Then all polices would be the same.
        trainer.workers.local_worker().foreach_policy(_spawn_policy)

        info = {
            "weights_norm": np.square(theta).sum(),
            "grad_norm": np.square(g).sum(),
            "update_ratio": update_ratio,
            "update_policy_counter": trainer.update_policy_counter
        }
        result["evolution_info"] = info

        msg = "Current num_steps_trained is {}, exceed last update steps {}" \
              " (our update interval is {}). Current best agent is <{}> " \
              "with reward {:.4f}. We spawn it to others: {}.".format(
            steps, trainer.update_policy_counter * update_steps, update_steps,
            best_agent, rewards[best_agent], rewards
        )
        print(msg)
        logger.info(msg)
        trainer._last_update_weights = new_theta.copy()
        trainer.update_policy_counter += 1

    result['update_policy_counter'] = trainer.update_policy_counter
    result['update_policy_threshold'] = trainer.update_policy_counter * \
                                        update_steps
Example #40
0
    def store_recursively(fhandle,
                          node,
                          path=None,
                          attrs=None,
                          node_hashes=None):
        """Function for interatively doing the work"""
        path = [] if path is None else path
        node_hashes = OrderedDict() if node_hashes is None else node_hashes
        full_path = '/' + '/'.join(path)
        if attrs is not None:
            if isinstance(attrs, OrderedDict):
                sorted_attr_keys = attrs.keys()
            else:
                sorted_attr_keys = sorted(attrs.keys())
        if isinstance(node, Mapping):
            logging.trace('  creating Group "%s"', full_path)
            try:
                dset = fhandle.create_group(full_path)
                if attrs is not None:
                    for key in sorted_attr_keys:
                        dset.attrs[key] = attrs[key]
            except ValueError:
                pass

            for key in sorted(node.keys()):
                if isinstance(key, basestring):
                    key_str = key
                else:
                    key_str = str(key)
                    logging.warn(
                        'Making string from key "%s", %s for use as'
                        ' name in HDF5 file', key_str, type(key))
                val = node[key]
                new_path = path + [key_str]
                store_recursively(fhandle=fhandle,
                                  node=val,
                                  path=new_path,
                                  node_hashes=node_hashes)
        else:
            # Check for existing node
            node_hash = hash_obj(node)
            if node_hash in node_hashes:
                logging.trace('  creating hardlink for Dataset: "%s" -> "%s"',
                              full_path, node_hashes[node_hash])
                # Hardlink the matching existing dataset
                fhandle[full_path] = fhandle[node_hashes[node_hash]]
                return
            # For now, convert None to np.nan since h5py appears to not handle
            # None
            if node is None:
                node = np.nan
                logging.warn(
                    '  encountered `None` at node "%s"; converting to'
                    ' np.nan', full_path)
            # "Scalar datasets don't support chunk/filter options". Shuffling
            # is a good idea otherwise since subsequent compression will
            # generally benefit; shuffling requires chunking. Compression is
            # not done here since it is slow, but can be done by
            # post-processing the generated file(s).
            if np.isscalar(node):
                shuffle = False
                chunks = None
            else:
                shuffle = True
                chunks = True
                # Store the node_hash for linking to later if this is more than
                # a scalar datatype. Assumed that "None" has
                node_hashes[node_hash] = full_path
            if isinstance(node, basestring):
                # TODO: Treat strings as follows? Would this break
                # compatibility with pytables/Pandas? What are benefits?
                # Leaving the following two lines out for now...

                #dtype = h5py.special_dtype(vlen=str)
                #fh.create_dataset(k,data=v,dtype=dtype)

                # ... Instead: creating length-1 array out of string; this
                # seems to be compatible with both h5py and pytables
                node = np.array(node)

            logging.trace('  creating dataset at node "%s", hash %s',
                          full_path, node_hash)
            try:
                dset = fhandle.create_dataset(name=full_path,
                                              data=node,
                                              chunks=chunks,
                                              compression=None,
                                              shuffle=shuffle,
                                              fletcher32=False)
            except TypeError:
                try:
                    shuffle = False
                    chunks = None
                    dset = fhandle.create_dataset(name=full_path,
                                                  data=node,
                                                  chunks=chunks,
                                                  compression=None,
                                                  shuffle=shuffle,
                                                  fletcher32=False)
                except:
                    logging.error('  full_path: %s', full_path)
                    logging.error('  chunks   : %s', str(chunks))
                    logging.error('  shuffle  : %s', str(shuffle))
                    logging.error('  node     : %s', str(node))
                    raise

            if attrs is not None:
                for key in sorted_attr_keys:
                    dset.attrs[key] = attrs[key]
Example #41
0
def base_tester(model, impl, observation_shape, action_size=2):
    # dummy impl object
    model._impl = impl

    # check save  model
    impl.save_model = Mock()
    model.save_model("model.pt")
    impl.save_model.assert_called_with("model.pt")

    # check load model
    impl.load_model = Mock()
    model.load_model("mock.pt")
    impl.load_model.assert_called_with("mock.pt")

    # check get_params
    params = model.get_params(deep=False)
    clone = model.__class__(**params)
    for key, val in clone.get_params(deep=False).items():
        assert params[key] is val

    # check deep flag
    deep_params = model.get_params(deep=True)
    assert deep_params["impl"] is not impl

    # check set_params
    clone = model.__class__()
    for key, val in params.items():
        if np.isscalar(val) and not isinstance(val, str):
            params[key] = val + np.random.random()
    # set_params returns itself
    assert clone.set_params(**params) is clone
    for key, val in clone.get_params(deep=False).items():
        assert params[key] is val

    # check fit and fitter
    update_backup = model.update
    model.update = Mock(return_value=range(len(model.get_loss_labels())))
    n_episodes = 4
    episode_length = 25
    n_batch = 32
    n_epochs = 3
    data_size = n_episodes * episode_length
    model._batch_size = n_batch
    shape = (data_size, ) + observation_shape
    if len(observation_shape) == 3:
        observations = np.random.randint(256, size=shape, dtype=np.uint8)
    else:
        observations = np.random.random(shape).astype("f4")
    actions = np.random.random((data_size, action_size))
    rewards = np.random.random(data_size)
    terminals = np.zeros(data_size)
    for i in range(n_episodes):
        terminals[(i + 1) * episode_length - 1] = 1.0
    dataset = MDPDataset(observations, actions, rewards, terminals)

    # check fit
    results = model.fit(
        dataset.episodes,
        n_epochs=n_epochs,
        logdir="test_data",
        verbose=False,
        show_progress=False,
    )

    assert isinstance(results, list)
    assert len(results) == n_epochs

    # check if the correct number of iterations are performed
    assert len(model.update.call_args_list) == data_size // n_batch * n_epochs

    # check arguments at each iteration
    for i, call in enumerate(model.update.call_args_list):
        epoch = i // (data_size // n_batch)
        total_step = i
        assert call[0][0] == epoch + 1
        assert call[0][1] == total_step
        assert isinstance(call[0][2], TransitionMiniBatch)
        assert len(call[0][2]) == n_batch

    # check fitter
    fitter = model.fitter(
        dataset.episodes,
        n_epochs=n_epochs,
        logdir="test_data",
        verbose=False,
        show_progress=False,
    )

    for epoch, metrics in fitter:
        assert isinstance(epoch, int)
        assert isinstance(metrics, dict)

    assert epoch == n_epochs

    # save params.json
    logger = D3RLPyLogger("test", root_dir="test_data", verbose=False)
    # save parameters to test_data/test/params.json
    model.save_params(logger)
    # load params.json
    json_path = os.path.join(logger.logdir, "params.json")
    new_model = model.__class__.from_json(json_path)
    assert new_model.impl is not None
    assert new_model.impl.observation_shape == observation_shape
    assert new_model.impl.action_size == action_size
    assert type(model.scaler) == type(new_model.scaler)

    # check __setattr__ override
    prev_batch_size = model.impl.batch_size
    model.batch_size = prev_batch_size + 1
    assert model.impl.batch_size == model.batch_size

    # check builds
    model._impl = None
    model.build_with_dataset(dataset)
    assert model.impl.observation_shape == dataset.get_observation_shape()
    assert model.impl.action_size == dataset.get_action_size()

    # set backed up methods
    model._impl = None
    model.update = update_backup

    return dataset
Example #42
0
def circles(x, y, s, c='b', ax=None, vmin=None, vmax=None, **kwargs):
    """
    Make a scatter of circles plot of x vs y, where x and y are sequence 
    like objects of the same lengths. The size of circles are in data scale.

    :param x,y: scalar or array_like, shape (n, )
                Input data
    :param s: scalar or array_like, shape (n, ) 
              Radius of circle in data scale (ie. in data unit)
    :param c: color or sequence of color, optional, default : 'b'
              `c` can be a single color format string, or a sequence of color
              specifications of length `N`, or a sequence of `N` numbers to be
              mapped to colors using the `cmap` and `norm` specified via kwargs.
              Note that `c` should not be a single numeric RGB or
              RGBA sequence because that is indistinguishable from an array of
              values to be colormapped.  `c` can be a 2-D array in which the
              rows are RGB or RGBA, however.
    :param ax: Axes object, optional, default: None
               Parent axes of the plot. It uses gca() if not specified.
    :param vmin, vmax: scalar, optional, default: None
        `vmin` and `vmax` are used in conjunction with `norm` to normalize
        luminance data.  If either are `None`, the min and max of the
        color array is used.  (Note if you pass a `norm` instance, your
        settings for `vmin` and `vmax` will be ignored.)
    :param kwargs: `~matplotlib.collections.Collection` properties
        eg. alpha, edgecolors, facecolors, linewidths, linestyles, norm, cmap

    :returns: paths : `~matplotlib.collections.PathCollection`


    Examples :: 
      
        a = np.arange(11)
        circles(a, a, a*0.2, c=a, alpha=0.5, edgecolor='none')

    This code is under [The BSD 3-Clause License]
    (http://opensource.org/licenses/BSD-3-Clause)
    """
    from matplotlib.patches import Circle
    from matplotlib.collections import PatchCollection
    import pylab as plt
    #import matplotlib.colors as colors

    if ax is None:
        ax = plt.gca()

    if isinstance(c, basestring):
        color = c  # ie. use colors.colorConverter.to_rgba_array(c)
    else:
        color = None  # use cmap, norm after collection is created
    kwargs.update(color=color)

    if np.isscalar(x):
        patches = [
            Circle((x, y), s),
        ]
    elif np.isscalar(s):
        patches = [Circle((x_, y_), s) for x_, y_ in zip(x, y)]
    else:
        patches = [Circle((x_, y_), s_) for x_, y_, s_ in zip(x, y, s)]
    collection = PatchCollection(patches, **kwargs)

    if color is None:
        collection.set_array(np.asarray(c))
        if vmin is not None or vmax is not None:
            collection.set_clim(vmin, vmax)

    ax.add_collection(collection)
    ax.autoscale_view()
    return collection
Example #43
0
    def _internal(self, source):
        # Error check
        data = np.asarray(source['data'])
        if data.ndim == 0:
            # Convert scalar to 1-dim array:
            data = np.array([data])
        if source['axes'] is None:
            # Automatic axis labels and values: 'axis0', 'axis1', etc.
            axes = ODict([('axis%d' % d, np.arange(data.shape[d]))
                          for d in np.arange(data.ndim)])
        else:
            # Build an ordered dictionary from the provided axis labels/values
            # and make sure it lines up with the dimensions of the NumPy array:
            try:
                axes = ODict(source['axes'])
            except TypeError:
                raise TypeError("'axes' must be either an ordered dictionary "
                                "or a list of tuples (label, values).")
            if len(axes) != data.ndim:
                raise ValueError("Number of axis labels (%d) does not match "
                                 "number of dimensions in the NumPy array "
                                 " (%d)." % (len(axes), data.ndim))
            if len(np.unique(list(axes.keys()))) < data.ndim:
                raise ValueError("All axis labels must be unique.")
            for i, (key, values) in enumerate(axes.items()):
                if values is None:
                    if data.shape[i] > 1:
                        # If there's 1 data point, then None is None. If
                        # there's > 1 data points, it's an omitted axis we need
                        # to fill in:
                        axes[key] = np.arange(data.shape[i])
                        continue
                else:
                    if data.shape[i] == 1 and np.isscalar(values):
                        values = np.array([values])
                    if len(values) != data.shape[i]:
                        err_str = ("Number of values for axis '%s' (%d) does "
                                   "not match data.shape[%d] "
                                   "(%d)" %
                                   (key, len(values), i, data.shape[i]))
                        raise ValueError(err_str)
                    axes[key] = values

        # Create a property for each of the following:
        pprint_params = ['data', 'dtype', 'shape', 'metadata']
        for param in pprint_params:
            setattr(self.__class__, param,
                    property(fget=self._fget_prop(param)))

        # Also add axis labels as properties:
        for axis, values in axes.items():
            setattr(self.__class__, axis, property(fget=self._fget_axes(axis)))
        pprint_params += list(axes.keys())

        # Internal data structure is a dictionary that stores the actual data
        # container as an N-dim array alongside axis labels and metadata.
        # Setting all elements at once enforces consistency; e.g. between shape
        # and axes:
        self.__internal = {
            'data': data,
            'dtype': data.dtype,
            'shape': data.shape,
            'axes': axes,
            'metadata': source['metadata'],
            'pprint_params': pprint_params
        }
Example #44
0
def static_error(features, noise, diameter, noise_size=1, ndim=2):
    """Compute the uncertainty in particle position ("the static error").

    Parameters
    ----------
    features : DataFrame of features
        The feature dataframe should have a `mass` column that is already
        background corrected.
    noise : number or DataFrame having `noise` column, indexed on `frame`
        standard deviation of the noise
    diameter : number or tuple, feature diameter used to locate centroids
    noise_size : noise correlation length, may be tuple-valued
    ndim : number of image dimensions, default 2
        if diameter is tuple-valued then its length will override ndim

    Returns
    -------
    DataFrame of static error estimates, indexed like the features.
    When either radius or noise_size are anisotropic, the returned DataFrame
    contains one column for each dimension.

    Where uncertainty estimation fails, NaN is returned.

    Note
    ----
    This is an adjusted version of the process described by Thierry Savin and
    Patrick S. Doyle in their paper "Static and Dynamic Errors in Particle
    Tracking Microrheology," Biophysical Journal 88(1) 623-638.

    Instead of measuring the peak intensity of the feature and calculating the
    total intensity (assuming a certain feature shape), the total intensity
    (=mass) is summed directly from the data. This quantity is more robust
    to noise and gives a better estimate of the static error.

    In addition, the sum of squared coordinates is calculated by taking the
    discrete sum instead of taking the continuous limit and integrating. This
    makes it possible to generalize this analysis to anisotropic masks.
    """
    if hasattr(diameter, '__iter__'):
        ndim = len(diameter)
    noise_size = validate_tuple(noise_size, ndim)[::-1]
    diameter = validate_tuple(diameter, ndim)[::-1]
    radius = tuple([d // 2 for d in diameter])

    if np.isscalar(noise):
        ep = _static_error(features['mass'], noise, radius, noise_size)
    else:
        assert 'noise' in noise
        temp = features.join(noise, on='frame')
        ep = _static_error(temp['mass'], temp['noise'], radius, noise_size)

    ep[ep < 0] = np.nan

    if ep.ndim == 1:
        ep.name = 'ep'
    elif ep.ndim == 2:
        if ndim < 4:
            coord_columns = ['ep_x', 'ep_y', 'ep_z'][:ndim]
        else:
            coord_columns = map(lambda i: 'ep_x' + str(i), range(ndim))
        ep = DataFrame(ep, columns=coord_columns, index=features.index)
    return ep
Example #45
0
def is_scalar(val: Any) -> bool:
    if torch.is_tensor(val) and val.ndim == 0:
        return True
    return bool(np.isscalar(val))
Example #46
0
def as_array(x):
    if np.isscalar(x):
        return np.array(x)
    return x
Example #47
0
def pixel_to_pixel(wcs_in, wcs_out, *inputs):
    """
    Transform pixel coordinates in a dataset with a WCS to pixel coordinates
    in another dataset with a different WCS.

    This function is designed to efficiently deal with input pixel arrays that
    are broadcasted views of smaller arrays, and is compatible with any
    APE14-compliant WCS.

    Parameters
    ----------
    wcs_in : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
        A WCS object for the original dataset which complies with the
        high-level shared APE 14 WCS API.
    wcs_out : `~astropy.wcs.wcsapi.BaseHighLevelWCS`
        A WCS object for the target dataset which complies with the
        high-level shared APE 14 WCS API.
    *inputs :
        Scalars or arrays giving the pixel coordinates to transform.
    """

    # Shortcut for scalars
    if np.isscalar(inputs[0]):
        world_outputs = wcs_in.pixel_to_world(*inputs)
        if not isinstance(world_outputs, (tuple, list)):
            world_outputs = (world_outputs, )
        return wcs_out.world_to_pixel(*world_outputs)

    # Remember original shape
    original_shape = inputs[0].shape

    matrix = _pixel_to_pixel_correlation_matrix(wcs_in, wcs_out)
    split_info = _split_matrix(matrix)

    outputs = [None] * wcs_out.pixel_n_dim

    for (pixel_in_indices, pixel_out_indices) in split_info:

        pixel_inputs = []
        for ipix in range(wcs_in.pixel_n_dim):
            if ipix in pixel_in_indices:
                pixel_inputs.append(unbroadcast(inputs[ipix]))
            else:
                pixel_inputs.append(inputs[ipix].flat[0])

        pixel_inputs = np.broadcast_arrays(*pixel_inputs)

        world_outputs = wcs_in.pixel_to_world(*pixel_inputs)

        if not isinstance(world_outputs, (tuple, list)):
            world_outputs = (world_outputs, )

        pixel_outputs = wcs_out.world_to_pixel(*world_outputs)

        if wcs_out.pixel_n_dim == 1:
            pixel_outputs = (pixel_outputs, )

        for ipix in range(wcs_out.pixel_n_dim):
            if ipix in pixel_out_indices:
                outputs[ipix] = np.broadcast_to(pixel_outputs[ipix],
                                                original_shape)

    return outputs[0] if wcs_out.pixel_n_dim == 1 else outputs
Example #48
0
 def test__call__point_evaluation(self):
     # check we get back a scalar for scalar input
     self.assertTrue(isscalar(self.f1(0.1)))
Example #49
0
def sympify(a,
            locals=None,
            convert_xor=True,
            strict=False,
            rational=False,
            evaluate=None):
    """Converts an arbitrary expression to a type that can be used inside SymPy.

    For example, it will convert Python ints into instances of sympy.Integer,
    floats into instances of sympy.Float, etc. It is also able to coerce symbolic
    expressions which inherit from Basic. This can be useful in cooperation
    with SAGE.

    It currently accepts as arguments:
       - any object defined in sympy
       - standard numeric python types: int, long, float, Decimal
       - strings (like "0.09" or "2e-19")
       - booleans, including ``None`` (will leave ``None`` unchanged)
       - lists, sets or tuples containing any of the above

    .. warning::
        Note that this function uses ``eval``, and thus shouldn't be used on
        unsanitized input.

    If the argument is already a type that SymPy understands, it will do
    nothing but return that value. This can be used at the beginning of a
    function to ensure you are working with the correct type.

    >>> from sympy import sympify

    >>> sympify(2).is_integer
    True
    >>> sympify(2).is_real
    True

    >>> sympify(2.0).is_real
    True
    >>> sympify("2.0").is_real
    True
    >>> sympify("2e-45").is_real
    True

    If the expression could not be converted, a SympifyError is raised.

    >>> sympify("x***2")
    Traceback (most recent call last):
    ...
    SympifyError: SympifyError: "could not parse u'x***2'"

    Locals
    ------

    The sympification happens with access to everything that is loaded
    by ``from sympy import *``; anything used in a string that is not
    defined by that import will be converted to a symbol. In the following,
    the ``bitcount`` function is treated as a symbol and the ``O`` is
    interpreted as the Order object (used with series) and it raises
    an error when used improperly:

    >>> s = 'bitcount(42)'
    >>> sympify(s)
    bitcount(42)
    >>> sympify("O(x)")
    O(x)
    >>> sympify("O + 1")
    Traceback (most recent call last):
    ...
    TypeError: unbound method...

    In order to have ``bitcount`` be recognized it can be imported into a
    namespace dictionary and passed as locals:

    >>> from sympy.core.compatibility import exec_
    >>> ns = {}
    >>> exec_('from sympy.core.evalf import bitcount', ns)
    >>> sympify(s, locals=ns)
    6

    In order to have the ``O`` interpreted as a Symbol, identify it as such
    in the namespace dictionary. This can be done in a variety of ways; all
    three of the following are possibilities:

    >>> from sympy import Symbol
    >>> ns["O"] = Symbol("O")  # method 1
    >>> exec_('from sympy.abc import O', ns)  # method 2
    >>> ns.update(dict(O=Symbol("O")))  # method 3
    >>> sympify("O + 1", locals=ns)
    O + 1

    If you want *all* single-letter and Greek-letter variables to be symbols
    then you can use the clashing-symbols dictionaries that have been defined
    there as private variables: _clash1 (single-letter variables), _clash2
    (the multi-letter Greek names) or _clash (both single and multi-letter
    names that are defined in abc).

    >>> from sympy.abc import _clash1
    >>> _clash1
    {'C': C, 'E': E, 'I': I, 'N': N, 'O': O, 'Q': Q, 'S': S}
    >>> sympify('I & Q', _clash1)
    I & Q

    Strict
    ------

    If the option ``strict`` is set to ``True``, only the types for which an
    explicit conversion has been defined are converted. In the other
    cases, a SympifyError is raised.

    >>> print(sympify(None))
    None
    >>> sympify(None, strict=True)
    Traceback (most recent call last):
    ...
    SympifyError: SympifyError: None

    Evaluation
    ----------

    If the option ``evaluate`` is set to ``False``, then arithmetic and
    operators will be converted into their SymPy equivalents and the
    ``evaluate=False`` option will be added. Nested ``Add`` or ``Mul`` will
    be denested first. This is done via an AST transformation that replaces
    operators with their SymPy equivalents, so if an operand redefines any
    of those operations, the redefined operators will not be used.

    >>> sympify('2**2 / 3 + 5')
    19/3
    >>> sympify('2**2 / 3 + 5', evaluate=False)
    2**2/3 + 5

    Extending
    ---------

    To extend ``sympify`` to convert custom objects (not derived from ``Basic``),
    just define a ``_sympy_`` method to your class. You can do that even to
    classes that you do not own by subclassing or adding the method at runtime.

    >>> from sympy import Matrix
    >>> class MyList1(object):
    ...     def __iter__(self):
    ...         yield 1
    ...         yield 2
    ...         return
    ...     def __getitem__(self, i): return list(self)[i]
    ...     def _sympy_(self): return Matrix(self)
    >>> sympify(MyList1())
    Matrix([
    [1],
    [2]])

    If you do not have control over the class definition you could also use the
    ``converter`` global dictionary. The key is the class and the value is a
    function that takes a single argument and returns the desired SymPy
    object, e.g. ``converter[MyList] = lambda x: Matrix(x)``.

    >>> class MyList2(object):   # XXX Do not do this if you control the class!
    ...     def __iter__(self):  #     Use _sympy_!
    ...         yield 1
    ...         yield 2
    ...         return
    ...     def __getitem__(self, i): return list(self)[i]
    >>> from sympy.core.sympify import converter
    >>> converter[MyList2] = lambda x: Matrix(x)
    >>> sympify(MyList2())
    Matrix([
    [1],
    [2]])

    Notes
    =====

    Sometimes autosimplification during sympification results in expressions
    that are very different in structure than what was entered. Until such
    autosimplification is no longer done, the ``kernS`` function might be of
    some use. In the example below you can see how an expression reduces to
    -1 by autosimplification, but does not do so when ``kernS`` is used.

    >>> from sympy.core.sympify import kernS
    >>> from sympy.abc import x
    >>> -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1
    -1
    >>> s = '-2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1'
    >>> sympify(s)
    -1
    >>> kernS(s)
    -2*(-(-x + 1/x)/(x*(x - 1/x)**2) - 1/(x*(x - 1/x))) - 1

    """
    if evaluate is None:
        if global_evaluate[0] is False:
            evaluate = global_evaluate[0]
        else:
            evaluate = True
    try:
        if a in sympy_classes:
            return a
    except TypeError:  # Type of a is unhashable
        pass
    try:
        cls = a.__class__
    except AttributeError:  # a is probably an old-style class object
        cls = type(a)
    if cls in sympy_classes:
        return a
    if cls is type(None):
        if strict:
            raise SympifyError(a)
        else:
            return a

    # Support for basic numpy datatypes
    # Note that this check exists to avoid importing NumPy when not necessary
    if type(a).__module__ == 'numpy':
        import numpy as np
        if np.isscalar(a):
            return _convert_numpy_types(a,
                                        locals=locals,
                                        convert_xor=convert_xor,
                                        strict=strict,
                                        rational=rational,
                                        evaluate=evaluate)

    try:
        return converter[cls](a)
    except KeyError:
        for superclass in getmro(cls):
            try:
                return converter[superclass](a)
            except KeyError:
                continue

    if isinstance(a, CantSympify):
        raise SympifyError(a)

    try:
        return a._sympy_()
    except AttributeError:
        pass

    if not strict:
        # Put numpy array conversion _before_ float/int, see
        # <https://github.com/sympy/sympy/issues/13924>.
        try:
            from ..tensor.array import Array
            return Array(a.flat, a.shape)  # works with e.g. NumPy arrays
        except AttributeError:
            pass

    if not isinstance(a, string_types):
        for coerce in (float, int):
            try:
                return sympify(coerce(a))
            except (TypeError, ValueError, AttributeError, SympifyError):
                continue

    if strict:
        raise SympifyError(a)

    if iterable(a):
        try:
            return type(a)([
                sympify(x,
                        locals=locals,
                        convert_xor=convert_xor,
                        rational=rational) for x in a
            ])
        except TypeError:
            # Not all iterables are rebuildable with their type.
            pass
    if isinstance(a, dict):
        try:
            return type(a)([
                sympify(x,
                        locals=locals,
                        convert_xor=convert_xor,
                        rational=rational) for x in a.items()
            ])
        except TypeError:
            # Not all iterables are rebuildable with their type.
            pass

    # At this point we were given an arbitrary expression
    # which does not inherit from Basic and doesn't implement
    # _sympy_ (which is a canonical and robust way to convert
    # anything to SymPy expression).
    #
    # As a last chance, we try to take "a"'s normal form via unicode()
    # and try to parse it. If it fails, then we have no luck and
    # return an exception
    try:
        from .compatibility import unicode
        a = unicode(a)
    except Exception as exc:
        raise SympifyError(a, exc)

    from sympy.parsing.sympy_parser import (parse_expr, TokenError,
                                            standard_transformations)
    from sympy.parsing.sympy_parser import convert_xor as t_convert_xor
    from sympy.parsing.sympy_parser import rationalize as t_rationalize

    transformations = standard_transformations

    if rational:
        transformations += (t_rationalize, )
    if convert_xor:
        transformations += (t_convert_xor, )

    try:
        a = a.replace('\n', '')
        expr = parse_expr(a,
                          local_dict=locals,
                          transformations=transformations,
                          evaluate=evaluate)
    except (TokenError, SyntaxError) as exc:
        raise SympifyError('could not parse %r' % a, exc)

    return expr
Example #50
0
def _make_rotate_mat(rotate):
    if np.isscalar(rotate):
        return _make_2d_rotation(rotate)
    elif np.array(rotate).ndim == 1 and len(rotate) == 3:
        return _make_3d_rotation(*rotate)
    return np.array(rotate)
def centered_difference(X, t):
    """
    Second order centered difference with third order forward/backward
    difference at endpoints.

    Warning: Sometimes has trouble with nonuniform grid spacing near boundaries
    """

    # Check whether data is 1D
    if np.ndim(X) == 1:

        # Uniform timestep
        if np.isscalar(t):
            X_diff = (X[2:] - X[:-2]) / (2 * t)
            forward_diff = np.array([
                (-11 / 6 * X[0] + 3 * X[1] - 3 / 2 * X[2] + X[3] / 3) / t
            ])
            backward_diff = np.array([
                (11 / 6 * X[-1] - 3 * X[-2] + 3 / 2 * X[-3] - X[-4] / 3) / t
            ])
            return np.concatenate((forward_diff, X_diff, backward_diff))

        # Variable timestep
        else:
            t_diff = t[2:] - t[:-2]
            X_diff = (X[2:] - X[:-2]) / (t_diff)
            forward_diff = np.array([
                (-11 / 6 * X[0] + 3 * X[1] - 3 / 2 * X[2] + X[3] / 3) /
                (t[1] - t[0])
            ])
            backward_diff = np.array([
                (11 / 6 * X[-1] - 3 * X[-2] + 3 / 2 * X[-3] - X[-4] / 3) /
                (t[-1] - t[-2])
            ])
            return np.concatenate((forward_diff, X_diff, backward_diff))

    # Otherwise assume data is 2D
    else:

        # Uniform timestep
        if np.isscalar(t):
            X_diff = (X[:, 2:] - X[:, :-2]) / (2 * t)
            forward_diff = ((-11 / 6 * X[:, 0] + 3 * X[:, 1] -
                             3 / 2 * X[:, 2] + X[:, 3] / 3) / t).reshape(
                                 X.shape[0], 1)
            backward_diff = ((11 / 6 * X[:, -1] - 3 * X[:, -2] +
                              3 / 2 * X[:, -3] - X[:, -4] / 3) / t).reshape(
                                  X.shape[0], 1)
            return np.concatenate((forward_diff, X_diff, backward_diff),
                                  axis=1)

        # Variable timestep
        else:
            t_diff = t[2:] - t[:-2]
            X_diff = (X[:, 2:] - X[:, :-2]) / t_diff
            forward_diff = ((-11 / 6 * X[:, 0] + 3 * X[:, 1] -
                             3 / 2 * X[:, 2] + X[:, 3] / 3) /
                            (t_diff[0] / 2)).reshape(X.shape[0], 1)
            backward_diff = ((11 / 6 * X[:, -1] - 3 * X[:, -2] +
                              3 / 2 * X[:, -3] - X[:, -4] / 3) /
                             (t_diff[-1] / 2)).reshape(X.shape[0], 1)
            return np.concatenate((forward_diff, X_diff, backward_diff),
                                  axis=1)
Example #52
0
def color_split(size,
                x=None,
                y=None,
                p1=None,
                p2=None,
                vector=None,
                col1=0,
                col2=1.0,
                grad_width=0):
    """Make an image splitted in 2 colored regions.
    
    Returns an array of size ``size`` divided in two regions called 1 and
    2 in wht follows, and which will have colors col& and col2
    respectively.
    
    Parameters
    -----------
    
    x: (int)
      If provided, the image is splitted horizontally in x, the left
      region being region 1.
        
    y: (int)
      If provided, the image is splitted vertically in y, the top region
      being region 1.
    
    p1,p2:
      Positions (x1,y1),(x2,y2) in pixels, where the numbers can be
      floats. Region 1 is defined as the whole region on the left when
      going from ``p1`` to ``p2``.
    
    p1, vector:
      ``p1`` is (x1,y1) and vector (v1,v2), where the numbers can be
      floats. Region 1 is then the region on the left when starting
      in position ``p1`` and going in the direction given by ``vector``.
       
    gradient_width
      If not zero, the split is not sharp, but gradual over a region of
      width ``gradient_width`` (in pixels). This is preferable in many
      situations (for instance for antialiasing). 
     
    
    Examples
    ---------
    
    >>> size = [200,200]
    >>> # an image with all pixels with x<50 =0, the others =1
    >>> color_split(size, x=50, col1=0, col2=1)
    >>> # an image with all pixels with y<50 red, the others green
    >>> color_split(size, x=50, col1=[255,0,0], col2=[0,255,0])
    >>> # An image splitted along an arbitrary line (see below) 
    >>> colorSplit(size, p1=[20,50], p2=[25,70] col1=0, col2=1)
        
    """

    if grad_width or ((x is None) and (y is None)):
        if p2 != None:
            vector = (np.array(p2) - np.array(p1))
        elif x is not None:
            vector = np.array([0, 1.0])
            p1 = np.array([x, 0])
        elif y is not None:
            vector = np.array([1.0, 0.0])
            p1 = np.array([0, y])

        x, y = vector
        vector = np.array([y, -x]).astype('float')
        norm = np.linalg.norm(vector)
        vector = max(0.1, grad_width) * vector / norm
        return color_gradient(size,
                              p1,
                              vector=vector,
                              col1=col1,
                              col2=col2,
                              shape='linear')
    else:

        w, h = size
        shape = (h, w) if np.isscalar(col1) else (h, w, len(col1))
        arr = np.zeros(shape)
        if x:
            arr[:, :x] = col1
            arr[:, x:] = col2
        elif y:
            arr[:y] = col1
            arr[y:] = col2

        return arr

    # if we are here, it means we didn't exit with a proper 'return'
    print("Arguments in color_split not understood !")
    raise
Example #53
0
        def _eq(t, o, a, obj, k1, k2):
            """ compare equal for these 2 keys """

            if a is not None and a > obj.ndim-1:
                return

            def _print(result, error = None):
                if error is not None:
                    error = str(error)
                v = "%-16.16s [%-16.16s]: [typ->%-8.8s,obj->%-8.8s,key1->(%-4.4s),key2->(%-4.4s),axis->%s] %s" % (name,result,t,o,method1,method2,a,error or '')
                if _verbose:
                    print(v)

            try:

                ### good debug location ###
                #if name == 'bool' and t == 'empty' and o == 'series' and method1 == 'loc':
                #    import pdb; pdb.set_trace()

                rs  = getattr(obj, method1).__getitem__(_axify(obj,k1,a))

                try:
                    xp = _get_result(obj,method2,k2,a)
                except:
                    result = 'no comp'
                    _print(result)
                    return

                try:
                    if np.isscalar(rs) and np.isscalar(xp):
                        self.assert_(rs == xp)
                    elif xp.ndim == 1:
                        assert_series_equal(rs,xp)
                    elif xp.ndim == 2:
                        assert_frame_equal(rs,xp)
                    elif xp.ndim == 3:
                        assert_panel_equal(rs,xp)
                    result = 'ok'
                except (AssertionError):
                    result = 'fail'

                # reverse the checks
                if fails is True:
                    if result == 'fail':
                        result = 'ok (fail)'
                    
                if not result.startswith('ok'):
                    raise AssertionError(_print(result))

                _print(result)

            except (AssertionError):
                raise
            except (TypeError):
                raise AssertionError(_print('type error'))
            except (Exception), detail:

                # if we are in fails, the ok, otherwise raise it
                if fails is not None:
                    if fails == type(detail):
                        result = 'ok (%s)' % type(detail).__name__
                        _print(result)
                        return
                
                result = type(detail).__name__
                raise AssertionError(_print(result, error = detail))
Example #54
0
assert F8.shape == (3, 3), 'eightpoint returns 3x3 matrix'

# 2.2
#F7 = sub.sevenpoint(data['pts1'][:7, :], data['pts2'][:7, :], M)
#assert (len(F7) == 1) | (len(F7) == 3), 'sevenpoint returns length-1/3 list'

# for f7 in F7:
# assert f7.shape == (3, 3), 'seven returns list of 3x3 matrix'

# 3.1
C1 = np.concatenate([np.random.rand(3, 3), np.ones([3, 1])], axis=1)
C2 = np.concatenate([np.random.rand(3, 3), np.ones([3, 1])], axis=1)

P, err = sub.triangulate(C1, data['pts1'], C2, data['pts2'])
assert P.shape == (N, 3), 'triangulate returns Nx3 matrix P'
assert np.isscalar(err), 'triangulate returns scalar err'

# 4.1
x2, y2 = sub.epipolarCorrespondence(im1, im2, F8, data['pts1'][0, 0],
                                    data['pts1'][0, 1])
assert np.isscalar(x2) & np.isscalar(
    y2), 'epipolarCoorespondence returns x & y coordinates'

# 5.1
"""
You can opt to uncomment this if extra credit q5 is implemented. Note this only checks formatting. 
"""
# F = sub.ransacF(data['pts1'], data['pts2'], M)
# assert F.shape == (3, 3), 'ransacF returns 3x3 matrix'

# # 5.2
Example #55
0
def lagcorr(x, y, lag=None, verbose=True):
    '''
    Compute lead-lag correlations between 2 time series.

    <x>,<y>: 1-D time series.
    <lag>: lag option, could take different forms of <lag>:
          if 0 or None, compute ordinary correlation and p-value;
          if positive integer, compute lagged correlation with lag
          upto <lag>;
          if negative integer, compute lead correlation with lead
          upto <-lag>;
          if pass in an list or tuple or array of integers, compute 
          lead/lag correlations at different leads/lags.

    Note: when talking about lead/lag, uses <y> as a reference.
    Therefore positive lag means <x> lags <y> by <lag>, computation is
    done by shifting <x> to the left hand side by <lag> with respect to
    <y>.
    Similarly negative lag means <x> leads <y> by <lag>, computation is
    done by shifting <x> to the right hand side by <lag> with respect to
    <y>.

    Return <result>: a (n*2) array, with 1st column the correlation 
    coefficients, 2nd column correpsonding p values.

    Currently only works for 1-D arrays.
    '''
    import numpy
    from scipy.stats import pearsonr

    if len(x) != len(y):
        raise ('Input variables of different lengths.')

    #--------Unify types of <lag>-------------
    if numpy.isscalar(lag):
        if abs(lag) >= len(x):
            raise ('Maximum lag equal or larger than array.')
        if lag < 0:
            lag = -numpy.arange(abs(lag) + 1)
        elif lag == 0:
            lag = [
                0,
            ]
        else:
            lag = numpy.arange(lag + 1)
    elif lag is None:
        lag = [
            0,
        ]
    else:
        lag = numpy.asarray(lag)

    #-------Loop over lags---------------------
    result = []
    #    if verbose:
    #        print ('\n#<lagcorr>: Computing lagged-correlations at lags:',lag)

    for ii in lag:
        if ii < 0:
            result.append(pearsonr(x[:ii], y[-ii:]))
        elif ii == 0:
            result.append(pearsonr(x, y))
        elif ii > 0:
            result.append(pearsonr(x[ii:], y[:-ii]))

    result = numpy.asarray(result)

    return result  #result=(Pearson correlation coefficient,p-value)
    def log_marginal_lengthscale(self,lengthscale,noise_delta):
        """
        Compute Log Marginal likelihood of the GP model w.r.t. the provided lengthscale
        """

        def compute_log_marginal(X,lengthscale,noise_delta):
            # compute K
            ur = unique_rows(self.X)
            myX=self.X[ur]
            #myY=np.sqrt(0.5*(self.fstar-self.Y[ur]))
            myY=self.Y[ur]
            
            if self.flagOptimizeHyperFirst==0:
                if self.kernel_name=='SE':
                    self.Euc_dist_X_X=euclidean_distances(myX,myX)
                    KK=np.exp(-np.square(self.Euc_dist_X_X)/lengthscale)+np.eye(len(myX))*self.noise_delta
                else:
                    KK=pdist(myX,lambda a,b: self.kernel_dist(a,b,lengthscale))
                    KK=squareform(KK)
                    KK=KK+np.eye(myX.shape[0])*(1+noise_delta)
                self.flagOptimizeHyperFirst=1
            else:
                if self.kernel_name=='SE':
                    KK=np.exp(-np.square(self.Euc_dist_X_X)/lengthscale)+np.eye(len(myX))*self.noise_delta
                else:
                    KK=pdist(myX,lambda a,b: self.kernel_dist(a,b,lengthscale))
                    KK=squareform(KK)
                    KK=KK+np.eye(myX.shape[0])*(1+noise_delta)

            try:
                temp_inv=np.linalg.solve(KK,myY)
            except: # singular
                return -np.inf
            
            
            try:
                #logmarginal=-0.5*np.dot(self.Y.T,temp_inv)-0.5*np.log(np.linalg.det(KK+noise_delta))-0.5*len(X)*np.log(2*3.14)
                first_term=-0.5*np.dot(myY.T,temp_inv)
                
                # if the matrix is too large, we randomly select a part of the data for fast computation
                if KK.shape[0]>200:
                    idx=np.random.permutation(KK.shape[0])
                    idx=idx[:200]
                    KK=KK[np.ix_(idx,idx)]
                #Wi, LW, LWi, W_logdet = pdinv(KK)
                #sign,W_logdet2=np.linalg.slogdet(KK)
                chol  = spla.cholesky(KK, lower=True)
                W_logdet=np.sum(np.log(np.diag(chol)))
                # Uses the identity that log det A = log prod diag chol A = sum log diag chol A
    
                #second_term=-0.5*W_logdet2
                second_term=-W_logdet
            except: # singular
                return -np.inf
            
            #print "first term ={:.4f} second term ={:.4f}".format(np.asscalar(first_term),np.asscalar(second_term))

            logmarginal=first_term+second_term-0.5*len(myY)*np.log(2*3.14)
                
            if np.isnan(np.asscalar(logmarginal))==True:
                print("theta={:s} first term ={:.4f} second  term ={:.4f}".format(lengthscale,np.asscalar(first_term),np.asscalar(second_term)))
                #print temp_det

            return np.asscalar(logmarginal)
        
        #print lengthscale
        logmarginal=0
        
        if np.isscalar(lengthscale):
            logmarginal=compute_log_marginal(self.X,lengthscale,noise_delta)
            return logmarginal

        if not isinstance(lengthscale,list) and len(lengthscale.shape)==2:
            logmarginal=[0]*lengthscale.shape[0]
            for idx in range(lengthscale.shape[0]):
                logmarginal[idx]=compute_log_marginal(self.X,lengthscale[idx],noise_delta)
        else:
            logmarginal=compute_log_marginal(self.X,lengthscale,noise_delta)
                
        #print logmarginal

        return logmarginal
Example #57
0
 def new(*args, **kwargs):
     output = vectorized(*args, **kwargs)
     if np.isscalar(args[0]) and not isinstance(args[0], np.ndarray):
         output = output.item()
     return output
Example #58
0
    def recvall(self, dest):
        """Gets the potential energy, force and virial from the driver.

        Args:
           dest: Object to be read into.

        Raises:
           Disconnected: Raised if client is disconnected.

        Returns:
           The data read from the socket to be read into dest.
        """

        blen = dest.itemsize * dest.size
        if blen > len(self._buf):
            self._buf.resize(blen)
        bpos = 0
        ntimeout = 0

        while bpos < blen:
            timeout = False

            # pre-2.5 version.
            try:
                bpart = ""
                bpart = self.recv(blen - bpos)
                if len(bpart) == 0:
                    raise socket.timeout  # if this keeps returning no data, we are in trouble....
                self._buf[bpos : bpos + len(bpart)] = np.fromstring(bpart, np.byte)
            except socket.timeout:
                # warning(" @SOCKET:   Timeout in recvall, trying again!", verbosity.low)
                timeout = True
                ntimeout += 1
                if ntimeout > NTIMEOUT:
                    warning(
                        " @SOCKET:  Couldn't receive within %5d attempts. Time to give up!"
                        % (NTIMEOUT),
                        verbosity.low,
                    )
                    raise Disconnected()
                pass
            if not timeout and len(bpart) == 0:
                raise Disconnected()
            bpos += len(bpart)

            # post-2.5 version: slightly more compact for modern python versions
            # try:
            #   bpart = 1
            #   bpart = self.recv_into(self._buf[bpos:], blen-bpos)
            # except socket.timeout:
            #   print " @SOCKET:   Timeout in status recvall, trying again!"
            #   timeout = True
            #   pass
            # if (not timeout and bpart == 0):
            #   raise Disconnected()
            # bpos += bpart
            # TODO this Disconnected() exception currently just causes the program to hang.
            # This should do something more graceful

        if np.isscalar(dest):
            return np.fromstring(self._buf[0:blen], dest.dtype)[0]
        else:
            return np.fromstring(self._buf[0:blen], dest.dtype).reshape(dest.shape)
Example #59
0
    def circles(self, x, y, s, c='b', vmin=None, vmax=None, **kwargs):
        """
        See https://gist.github.com/syrte/592a062c562cd2a98a83 

        Make a scatter plot of circles. 
        Similar to plt.scatter, but the size of circles are in data scale.
        Parameters
        ----------
        x, y : scalar or array_like, shape (n, )
            Input data
        s : scalar or array_like, shape (n, ) 
            Radius of circles.
        c : color or sequence of color, optional, default : 'b'
            `c` can be a single color format string, or a sequence of color
            specifications of length `N`, or a sequence of `N` numbers to be
            mapped to colors using the `cmap` and `norm` specified via kwargs.
            Note that `c` should not be a single numeric RGB or RGBA sequence 
            because that is indistinguishable from an array of values
            to be colormapped. (If you insist, use `color` instead.)  
            `c` can be a 2-D array in which the rows are RGB or RGBA, however. 
        vmin, vmax : scalar, optional, default: None
            `vmin` and `vmax` are used in conjunction with `norm` to normalize
            luminance data.  If either are `None`, the min and max of the
            color array is used.
        kwargs : `~matplotlib.collections.Collection` properties
            Eg. alpha, edgecolor(ec), facecolor(fc), linewidth(lw), linestyle(ls), 
            norm, cmap, transform, etc.
        Returns
        -------
        paths : `~matplotlib.collections.PathCollection`
        Examples
        --------
        a = np.arange(11)
        circles(a, a, s=a*0.2, c=a, alpha=0.5, ec='none')
        plt.colorbar()
        License
        --------
        This code is under [The BSD 3-Clause License]
        (http://opensource.org/licenses/BSD-3-Clause)
        """

        if np.isscalar(c):
            kwargs.setdefault('color', c)
            c = None

        if 'fc' in kwargs:
            kwargs.setdefault('facecolor', kwargs.pop('fc'))
        if 'ec' in kwargs:
            kwargs.setdefault('edgecolor', kwargs.pop('ec'))
        if 'ls' in kwargs:
            kwargs.setdefault('linestyle', kwargs.pop('ls'))
        if 'lw' in kwargs:
            kwargs.setdefault('linewidth', kwargs.pop('lw'))
        # You can set `facecolor` with an array for each patch,
        # while you can only set `facecolors` with a value for all.

        zipped = np.broadcast(x, y, s)
        patches = [Circle((x_, y_), s_) for x_, y_, s_ in zipped]
        collection = PatchCollection(patches, **kwargs)
        if c is not None:
            c = np.broadcast_to(c, zipped.shape).ravel()
            collection.set_array(c)
            collection.set_clim(vmin, vmax)

        ax = plt.gca()
        ax.add_collection(collection)
        ax.autoscale_view()
        # plt.draw_if_interactive()
        if c is not None:
            plt.sci(collection)
Example #60
0
    def __init__(self, name, nCon, linear, wrt, jac, lower, upper, scale):

        self.name = name
        self.ncon = nCon
        self.linear = linear
        self.wrt = wrt
        self.jac = jac
        self.partialReturnOk = None
        self.scale = scale
        self.rs = None
        self.re = None
        # Before we can do the processing below we need to have lower
        # and upper arguments expanded:

        if lower is None:
            lower = [None for i in range(self.ncon)]
        elif np.isscalar(lower):
            lower = lower * np.ones(self.ncon)
        elif len(lower) == self.ncon:
            pass  # Some iterable object
        else:
            raise Error(
                "The 'lower' argument to addCon or addConGroup is "
                "invalid. It must be None, a scalar, or a "
                "list/array or length ncon=%d." % nCon
            )

        if upper is None:
            upper = [None for i in range(self.ncon)]
        elif np.isscalar(upper):
            upper = upper * np.ones(self.ncon)
        elif len(upper) == self.ncon:
            pass  # Some iterable object
        else:
            raise Error(
                "The 'upper' argument to addCon or addConGroup is "
                "invalid. It must be None, a scalar, or a "
                "list/array or length ncon=%d." % nCon
            )

        # ------ Process the scale argument
        scale = np.atleast_1d(scale)
        if len(scale) == 1:
            scale = scale[0] * np.ones(nCon)
        elif len(scale) == nCon:
            pass
        else:
            raise Error(
                "The length of the 'scal' argument to "
                "addCon or addConGroup is %d, but the number of "
                "constraints is %d." % (len(scale), nCon)
            )

        # Save lower and upper...they are only used for printing however
        self.lower = lower
        self.upper = upper
        # The current value of the constraint (for printing purposes)
        self.value = np.zeros(self.ncon)

        # Now we determine what kind of constraint this is:
        # 1. An equality constraint
        # 2. A upper bound on a 1-sided constraint
        # 3. A lower bound on a 1-sided constraint
        # 4. Lower and Upper bounds on 2-sided constraint
        # 5. No lower or upper bounds. Typically will only be used for
        # dummy constraint on an unconstrained problem.

        # The first 3, will give a single "constraint" in all
        # optimizers. Some optimizers can only do 1-sided constraints
        # so type 4 and 5 must be split into two separate constraints
        # automatically.

        # This keeps track of the equality constraints:
        equalityConstraints = {"value": [], "ind": [], "fact": []}

        # All (inequality) constraints get added to
        # "twoSidedConstraints". This will be used in optimizers that
        # can do two-sided constraints properly
        twoSidedConstraints = {"lower": [], "upper": [], "ind": [], "fact": []}

        # All (inequality) constraints are also added to
        # "oneSidedConstraints". These are processed such that the
        # lower bound is ALWAYS -INFINITY such that: con <= upper For
        # optimizers that need things <= zero, this can be processed
        # with a (-value) offset. One sided constraints need a fact
        # defined which is precisely 1.0 or -1.0. The -1.0 appears
        # when a greater-than-constraint is turned into a
        # less-than-constraint.
        oneSidedConstraints = {"lower": [], "upper": [], "ind": [], "fact": []}

        for icon in range(self.ncon):
            # Check for equality constraint:
            if lower[icon] == upper[icon] and lower[icon] is not None:
                equalityConstraints["value"].append(lower[icon] * scale[icon])
                equalityConstraints["ind"].append(icon)
                equalityConstraints["fact"].append(1.0)

            # Two sided constraint:
            elif lower[icon] is not None and upper[icon] is not None:
                twoSidedConstraints["lower"].append(lower[icon] * scale[icon])
                twoSidedConstraints["upper"].append(upper[icon] * scale[icon])
                twoSidedConstraints["ind"].append(icon)
                twoSidedConstraints["fact"].append(1.0)

                # TWO sets of 1 sided constraints:
                oneSidedConstraints["lower"].append(-INFINITY)
                oneSidedConstraints["upper"].append(upper[icon] * scale[icon])
                oneSidedConstraints["ind"].append(icon)
                oneSidedConstraints["fact"].append(1.0)

                oneSidedConstraints["lower"].append(-INFINITY)
                oneSidedConstraints["upper"].append(-lower[icon] * scale[icon])
                oneSidedConstraints["ind"].append(icon)
                oneSidedConstraints["fact"].append(-1.0)

            # Upper bound only:
            elif upper[icon] is not None:
                twoSidedConstraints["lower"].append(-INFINITY)
                twoSidedConstraints["upper"].append(upper[icon] * scale[icon])
                twoSidedConstraints["ind"].append(icon)
                twoSidedConstraints["fact"].append(1.0)

                # Just one, 1-sided constraint
                oneSidedConstraints["lower"].append(-INFINITY)
                oneSidedConstraints["upper"].append(upper[icon] * scale[icon])
                oneSidedConstraints["ind"].append(icon)
                oneSidedConstraints["fact"].append(1.0)

            # Lower bound only:
            elif lower[icon] is not None:
                twoSidedConstraints["lower"].append(lower[icon] * scale[icon])
                twoSidedConstraints["upper"].append(INFINITY)
                twoSidedConstraints["ind"].append(icon)
                twoSidedConstraints["fact"].append(1.0)

                # Just one, 1-sided constraint
                oneSidedConstraints["lower"].append(-INFINITY)
                oneSidedConstraints["upper"].append(-lower[icon] * scale[icon])
                oneSidedConstraints["ind"].append(icon)
                oneSidedConstraints["fact"].append(-1.0)

            # Fully unconstrained!
            elif lower[icon] is None and upper[icon] is None:
                twoSidedConstraints["lower"].append(-INFINITY)
                twoSidedConstraints["upper"].append(INFINITY)
                twoSidedConstraints["ind"].append(icon)
                twoSidedConstraints["fact"].append(1.0)

                # Since this is just a dummy constraint, we only need
                # a single one....it can just be less than INFINITY
                oneSidedConstraints["lower"].append(-INFINITY)
                oneSidedConstraints["upper"].append(INFINITY)
                oneSidedConstraints["ind"].append(icon)
                oneSidedConstraints["fact"].append(1.0)
            # end if (con type)
        # end for (con loop)

        # Convert the stuff to arrays:
        oneSidedConstraints["ind"] = np.array(oneSidedConstraints["ind"], "intc")
        twoSidedConstraints["ind"] = np.array(twoSidedConstraints["ind"], "intc")
        equalityConstraints["ind"] = np.array(equalityConstraints["ind"], "intc")

        oneSidedConstraints["fact"] = np.array(oneSidedConstraints["fact"])
        twoSidedConstraints["fact"] = np.array(twoSidedConstraints["fact"])
        equalityConstraints["fact"] = np.array(equalityConstraints["fact"])

        equalityConstraints["value"] = np.array(equalityConstraints["value"])

        # Now save this information:
        self.equalityConstraints = equalityConstraints
        self.oneSidedConstraints = oneSidedConstraints
        self.twoSidedConstraints = twoSidedConstraints