コード例 #1
1
ファイル: cvrptw.py プロジェクト: supermihi/or-tools
    def __init__(self, capacity=100, cost=100, number=None):

        Vehicle = namedtuple("Vehicle", ["index", "capacity", "cost"])

        if number is None:
            self.number = np.size(capacity)
        else:
            self.number = number
        idxs = np.array(range(0, self.number))

        if np.isscalar(capacity):
            capacities = capacity * np.ones_like(idxs)
        elif np.size(capacity) != np.size(capacity):
            print("capacity is neither scalar, nor the same size as num!")
        else:
            capacities = capacity

        if np.isscalar(cost):
            costs = cost * np.ones_like(idxs)
        elif np.size(cost) != self.number:
            print(np.size(cost))
            print("cost is neither scalar, nor the same size as num!")
        else:
            costs = cost

        self.vehicles = [Vehicle(idx, capacity, cost) for idx, capacity, cost in zip(idxs, capacities, costs)]
コード例 #2
1
ファイル: moving_square.py プロジェクト: sabago/pysph
def get_dummy_particles():
    x, y = numpy.mgrid[-5 * dx : box_length + 5 * dx + 1e-10 : dx, -5 * dx : box_height + 5 * dx + 1e-10 : dx]

    xd, yd = x.ravel(), y.ravel()

    md = numpy.ones_like(xd) * m
    hd = numpy.ones_like(xd) * h

    rhod = numpy.ones_like(xd) * ro
    cd = numpy.ones_like(xd) * co
    pd = numpy.zeros_like(xd)

    dummy_fluid = base.get_particle_array(name="dummy_fluid", type=Fluid, x=xd, y=yd, h=hd, rho=rhod, c=cd, p=pd)

    # remove indices within the square

    indices = []

    np = dummy_fluid.get_number_of_particles()
    x, y = dummy_fluid.get("x", "y")

    for i in range(np):
        if -dx / 2 <= x[i] <= box_length + dx / 2:
            if -dx / 2 <= y[i] <= box_height + dx / 2:
                indices.append(i)

    to_remove = base.LongArray(len(indices))
    to_remove.set_data(numpy.array(indices))

    dummy_fluid.remove_particles(to_remove)

    return dummy_fluid
コード例 #3
0
ファイル: parabolischer_Zylinder.py プロジェクト: m4z/adef
 def __init__(self, rx, ry, rz):
     myobject.__init__(self)
     ################################################################
     #parameter fuer die flaeche
     self.u_f, self.v_f = mgrid[-1:1:180j, -1:1:180j]
     #alle funktionen fuer die flaeche
     self.x = lambda u, v: rx*u
     self.y = lambda u, v: ry*u**2
     self.z = lambda u, v: rz*v
     ################################################################
     #x,y,z sind die Koordinaten im dreidimensionalem euklidischen Raum
     #ones_like = einfach die arraywerte in 1 umschreiben.
     #dxu,dxv sind die Werte der ersten Patielenableitung
     self.dxu = lambda u, v: rx*ones_like(u)
     #zeros_like = einfach die arraywerte in 0 umschreiben.
     self.dxv = lambda u, v: zeros_like(v)
     self.dyu = lambda u, v: ry*2*u
     self.dyv = lambda u, v: zeros_like(v)
     self.dzu = lambda u, v: zeros_like(u)
     self.dzv = lambda u, v: rz*ones_like(v)
     self.dxuu = lambda u, v: zeros_like(u)
     self.dxvu = lambda u, v: zeros_like(u)
     self.dxuv = lambda u, v: zeros_like(v)
     self.dxvv = lambda u, v: zeros_like(v)
     
     self.dyuu = lambda u, v: ry*2
     self.dyvu = lambda u, v: zeros_like(u)
     self.dyuv = lambda u, v: zeros_like(v)
     self.dyvv = lambda u, v: zeros_like(v)
     self.dzuu = lambda u, v: zeros_like(u)
     self.dzvu = lambda u, v: zeros_like(u)
     self.dzuv = lambda u, v: zeros_like(v)
     self.dzvv = lambda u, v: zeros_like(v)
コード例 #4
0
ファイル: plot_pk_k3pk.py プロジェクト: SaulAryehKohn/capo
def posterior(kpl, pk, err, pkfold=None, errfold=None):
    k0 = n.abs(kpl).argmin()
    kpl = kpl[k0:]
    if pkfold is None:
        print 'Folding for posterior'
        pkfold = pk[k0:].copy()
        errfold = err[k0:].copy()
        pkpos,errpos = pk[k0+1:].copy(), err[k0+1:].copy()
        pkneg,errneg = pk[k0-1:0:-1].copy(), err[k0-1:0:-1].copy()
        pkfold[1:] = (pkpos/errpos**2 + pkneg/errneg**2) / (1./errpos**2 + 1./errneg**2)
        errfold[1:] = n.sqrt(1./(1./errpos**2 + 1./errneg**2))

    #ind = n.logical_and(kpl>.2, kpl<.5)
    ind = n.logical_and(kpl>.15, kpl<.5)
    #ind = n.logical_and(kpl>.12, kpl<.5)
    #print kpl,pk.real,err
    kpl = kpl[ind]
    pk= kpl**3 * pkfold[ind]/(2*n.pi**2)
    err = kpl**3 * errfold[ind]/(2*n.pi**2)
    s = n.logspace(5.,6.5,100)
    data = []
    for ss in s:
        data.append(n.exp(-.5*n.sum((pk.real - ss)**2 / err**2)))
    #    print data[-1]
    data = n.array(data)
    #print data
    #print s
    #data/=n.sum(data)
    data /= n.max(data)
    p.figure(5)
    p.plot(s, data)
    p.plot(s, n.exp(-.5)*n.ones_like(s))
    p.plot(s, n.exp(-.5*2**2)*n.ones_like(s))
    p.show()
コード例 #5
0
def fix_chip_wavelength(model_orders, data_orders, band_cutoff=1870):
    """ Adjust the wavelength in data_orders to be self-consistent
    """
    # H band
    model_orders_H = [o.copy() for o in model_orders if o.x[-1] < band_cutoff]
    data_orders_H = [o.copy() for o in data_orders if o.x[-1] < band_cutoff]
    ordernums_H = 121.0 - np.arange(len(model_orders_H))
    p_H = fit_wavelength(model_orders_H, ordernums_H, first_order=3, last_order=len(ordernums_H) - 4)

    # K band
    model_orders_K = [o.copy() for o in model_orders if o.x[-1] > band_cutoff]
    data_orders_K = [o.copy() for o in data_orders if o.x[-1] > band_cutoff]
    ordernums_K = 92.0 - np.arange(len(model_orders_K))
    p_K = fit_wavelength(model_orders_K, ordernums_K, first_order=7, last_order=len(ordernums_K) - 4)

    new_orders = []
    for i, order in enumerate(data_orders):
        pixels = np.arange(order.size(), dtype=np.float)
        if order.x[-1] < band_cutoff:
            # H band
            ordernum = ordernums_H[i] * np.ones_like(pixels)
            wave = p_H(pixels, ordernum) / ordernum
        else:
            # K band
            ordernum = ordernums_K[i-len(ordernums_H)] * np.ones_like(pixels)
            wave = p_K(pixels, ordernum) / ordernum
            
        new_orders.append(DataStructures.xypoint(x=wave, y=order.y, cont=order.cont, err=order.err))
    return new_orders
コード例 #6
0
    def pixel_to_prime(self, x, y, color=0):
        # Secret decoder ring:
        #  http://www.sdss.org/dr7/products/general/astrometry.html
        # (color)0 is called riCut;
        # g0, g1, g2, and g3 are called
        #    dRow0, dRow1, dRow2, and dRow3, respectively;
        # h0, h1, h2, and h3 are called
        #    dCol0, dCol1, dCol2, and dCol3, respectively;
        # px and py are called csRow and csCol, respectively;
        # and qx and qy are called ccRow and ccCol, respectively.
        color0 = self._get_ricut()
        g0, g1, g2, g3 = self._get_drow()
        h0, h1, h2, h3 = self._get_dcol()
        px, py, qx, qy = self._get_cscc()

        # #$(%*&^(%$%*& bad documentation.
        (px,py) = (py,px)
        (qx,qy) = (qy,qx)

        yprime = y + g0 + g1 * x + g2 * x**2 + g3 * x**3
        xprime = x + h0 + h1 * x + h2 * x**2 + h3 * x**3

        # The code below implements this, vectorized:
        # if color < color0:
        #   xprime += px * color
        #   yprime += py * color
        # else:
        #   xprime += qx
        #   yprime += qy
        qx = qx * np.ones_like(x)
        qy = qy * np.ones_like(y)
        xprime += np.where(color < color0, px * color, qx)
        yprime += np.where(color < color0, py * color, qy)

        return (xprime, yprime)
コード例 #7
0
    def prime_to_pixel(self, xprime, yprime,  color=0):
        color0 = self._get_ricut()
        g0, g1, g2, g3 = self._get_drow()
        h0, h1, h2, h3 = self._get_dcol()
        px, py, qx, qy = self._get_cscc()

        # #$(%*&^(%$%*& bad documentation.
        (px,py) = (py,px)
        (qx,qy) = (qy,qx)

        qx = qx * np.ones_like(xprime)
        qy = qy * np.ones_like(yprime)
        xprime -= np.where(color < color0, px * color, qx)
        yprime -= np.where(color < color0, py * color, qy)

        # Now invert:
        #   yprime = y + g0 + g1 * x + g2 * x**2 + g3 * x**3
        #   xprime = x + h0 + h1 * x + h2 * x**2 + h3 * x**3
        x = xprime - h0
        # dumb-ass Newton's method
        dx = 1.
        # FIXME -- should just update the ones that aren't zero
        # FIXME -- should put in some failsafe...
        while np.max(np.abs(np.atleast_1d(dx))) > 1e-10:
            xp    = x + h0 + h1 * x + h2 * x**2 + h3 * x**3
            dxpdx = 1 +      h1     + h2 * 2*x +  h3 * 3*x**2
            dx = (xprime - xp) / dxpdx
            x += dx
        y = yprime - (g0 + g1 * x + g2 * x**2 + g3 * x**3)
        return (x, y)
コード例 #8
0
ファイル: test_scalarmath.py プロジェクト: birm/numpy
    def test_blocked(self):
        # test alignments offsets for simd instructions
        # alignments for vz + 2 * (vs - 1) + 1
        for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:
            for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
                                                            type='binary',
                                                            max_size=sz):
                exp1 = np.ones_like(inp1)
                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
                assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)
                assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)

                np.add(inp1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)

                inp2[...] += np.arange(inp2.size, dtype=dt) + 1
                assert_almost_equal(np.square(inp2),
                                    np.multiply(inp2, inp2),  err_msg=msg)
                # skip true divide for ints
                if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning):
                    assert_almost_equal(np.reciprocal(inp2),
                                        np.divide(1, inp2),  err_msg=msg)

                inp1[...] = np.ones_like(inp1)
                np.add(inp1, 2, out=out)
                assert_almost_equal(out, exp1 + 2, err_msg=msg)
                inp2[...] = np.ones_like(inp2)
                np.add(2, inp2, out=out)
                assert_almost_equal(out, exp1 + 2, err_msg=msg)
コード例 #9
0
ファイル: telescope.py プロジェクト: TianlaiProject/tlpipe
    def noise_variance_feedpairs(self, fi, fj, f_indices, nt_per_day, ndays=None):
        ndays = self.ndays if not ndays else ndays # Set to value if not set.
        t_int = ndays * units.t_sidereal / nt_per_day
        # bw = 1.0e6 * (self.freq_upper - self.freq_lower) / self.num_freq
        bw = np.abs(self.frequencies[1] - self.frequencies[0]) * 1e6

        return np.ones_like(fi) * np.ones_like(fj) * 2.0*self.tsys(f_indices)**2 / (t_int * bw) # 2.0 for two pol
コード例 #10
0
ファイル: test_util.py プロジェクト: kb-rahul/librosa
    def __test_pass(axis, data, idx):
        # By default, mean aggregation
        dsync = librosa.util.sync(data, idx, axis=axis)
        if data.ndim == 1 or axis == -1:
            assert np.allclose(dsync, 2 * np.ones_like(dsync))
        else:
            assert np.allclose(dsync, data)

        # Explicit mean aggregation
        dsync = librosa.util.sync(data, idx, aggregate=np.mean, axis=axis)
        if data.ndim == 1 or axis == -1:
            assert np.allclose(dsync, 2 * np.ones_like(dsync))
        else:
            assert np.allclose(dsync, data)

        # Max aggregation
        dsync = librosa.util.sync(data, idx, aggregate=np.max, axis=axis)
        if data.ndim == 1 or axis == -1:
            assert np.allclose(dsync, 4 * np.ones_like(dsync))
        else:
            assert np.allclose(dsync, data)

        # Min aggregation
        dsync = librosa.util.sync(data, idx, aggregate=np.min, axis=axis)
        if data.ndim == 1 or axis == -1:
            assert np.allclose(dsync, np.zeros_like(dsync))
        else:
            assert np.allclose(dsync, data)

        # Test for dtype propagation
        assert dsync.dtype == data.dtype
コード例 #11
0
ファイル: numeric.py プロジェクト: ismaelresp/PyEMMA
    def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
        def within_tol(x, y, atol, rtol):
            result = np.less_equal(np.abs(x-y), atol + rtol * np.abs(y))
            if np.isscalar(a) and np.isscalar(b):
                result = np.bool(result)
            return result

        x = np.array(a, copy=False, subok=True, ndmin=1)
        y = np.array(b, copy=False, subok=True, ndmin=1)
        xfin = np.isfinite(x)
        yfin = np.isfinite(y)
        if np.all(xfin) and np.all(yfin):
            return within_tol(x, y, atol, rtol)
        else:
            finite = xfin & yfin
            cond = np.zeros_like(finite, subok=True)
            # Because we're using boolean indexing, x & y must be the same shape.
            # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
            # lib.stride_tricks, though, so we can't import it here.
            x = x * np.ones_like(cond)
            y = y * np.ones_like(cond)
            # Avoid subtraction with infinite/nan values...
            cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
            # Check for equality of infinite values...
            cond[~finite] = (x[~finite] == y[~finite])
            if equal_nan:
                # Make NaN == NaN
                cond[np.isnan(x) & np.isnan(y)] = True
            return cond
コード例 #12
0
ファイル: common.py プロジェクト: MechCoder/scipy
def reflective_transformation(y, lb, ub):
    """Compute reflective transformation and its gradient."""
    if in_bounds(y, lb, ub):
        return y, np.ones_like(y)

    lb_finite = np.isfinite(lb)
    ub_finite = np.isfinite(ub)

    x = y.copy()
    g_negative = np.zeros_like(y, dtype=bool)

    mask = lb_finite & ~ub_finite
    x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])
    g_negative[mask] = y[mask] < lb[mask]

    mask = ~lb_finite & ub_finite
    x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])
    g_negative[mask] = y[mask] > ub[mask]

    mask = lb_finite & ub_finite
    d = ub - lb
    t = np.remainder(y[mask] - lb[mask], 2 * d[mask])
    x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)
    g_negative[mask] = t > d[mask]

    g = np.ones_like(y)
    g[g_negative] = -1

    return x, g
コード例 #13
0
ファイル: modeling.py プロジェクト: DriesDries/shangri-la
def histgram_3D(data):
    '''
    入力された二次元配列を3Dhistgramとして表示する
    '''
    from mpl_toolkits.mplot3d import Axes3D
    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')
    
    x = data[:,0]
    y = data[:,1]

    hist, xedges, yedges = np.histogram2d(x, y, bins=30)
    X, Y = np.meshgrid(xedges[:-1] + 0.25, yedges[:-1] + 0.25)

    # bar3dでは行にする
    X = X.flatten()
    Y = Y.flatten()
    Z = np.zeros(len(X))

    # 表示するバーの太さ
    dx = (xedges[1] - xedges[0]) * np.ones_like(Z)
    dy = (yedges[1] - yedges[0]) * np.ones_like(Z)
    dz = hist.flatten() # これはそのままでok

    # 描画
    ax.bar3d(X, Y, Z, dx, dy, dz, color='b', zsort='average')
コード例 #14
0
ファイル: _analyze.py プロジェクト: mmittag/expyfun
def logit(prop, max_events=None):
    """Convert proportion (expressed in the range [0, 1]) to logit.

    Parameters
    ----------
    prop : float | array-like
        the occurrence proportion.
    max_events : int | array-like | None
        the number of events used to calculate ``prop``. Used in a correction
        factor for cases when ``prop`` is 0 or 1, to prevent returning ``inf``.
        If ``None``, no correction is done, and ``inf`` or ``-inf`` may result.

    Returns
    -------
    lgt : ``numpy.ndarray``, with shape matching ``numpy.array(prop).shape``.
    """
    prop = np.atleast_1d(prop).astype(float)
    if np.any([prop > 1, prop < 0]):
        raise ValueError('Proportions must be in the range [0, 1].')
    if max_events is not None:
        # add equivalent of half an event to 0s, and subtract same from 1s
        max_events = np.atleast_1d(max_events) * np.ones_like(prop)
        corr_factor = 0.5 / max_events
        for loc in zip(*np.where(prop == 0)):
            prop[loc] = corr_factor[loc]
        for loc in zip(*np.where(prop == 1)):
            prop[loc] = 1 - corr_factor[loc]
    return np.log(prop / (np.ones_like(prop) - prop))
コード例 #15
0
ファイル: sources.py プロジェクト: JLHelm/mayavi
 def _scalars_changed(self, s):
     self.dataset.point_data.scalars = s
     self.dataset.point_data.scalars.name = 'scalars'
     self.set(vectors=np.c_[np.ones_like(s),
                               np.ones_like(s),
                               s])
     self.update()
コード例 #16
0
ファイル: test_volatility.py プロジェクト: VolosSoftware/arch
    def test_ewma(self):
        ewma = EWMAVariance()

        sv = ewma.starting_values(self.resids)
        assert_equal(sv.shape[0], ewma.num_params)

        bounds = ewma.bounds(self.resids)
        assert_equal(len(bounds), 0)
        var_bounds = ewma.variance_bounds(self.resids)
        backcast = ewma.backcast(self.resids)
        parameters = np.array([])

        names = ewma.parameter_names()
        names_target = []
        assert_equal(names, names_target)

        ewma.compute_variance(parameters, self.resids, self.sigma2,
                              backcast, var_bounds)
        cond_var_direct = np.zeros_like(self.sigma2)
        parameters = np.array([0.0, 0.06, 0.94])
        rec.garch_recursion(parameters,
                            self.resids ** 2.0,
                            np.sign(self.resids),
                            cond_var_direct,
                            1, 0, 1, self.T, backcast, var_bounds)
        # sigma3 = np.zeros_like(self.sigma2)
        # sigma3[0] = backcast
        # for t in range(1,self.T):
        # sigma3[t] = 0.94 * sigma3[t-1] + 0.06 * self.resids[t-1]**2.0

        assert_allclose(self.sigma2 / cond_var_direct,
                        np.ones_like(self.sigma2))

        A, b = ewma.constraints()
        A_target = np.empty((0, 0))
        b_target = np.empty((0,))
        assert_array_equal(A, A_target)
        assert_array_equal(b, b_target)
        state = np.random.get_state()
        rng = Normal()
        sim_data = ewma.simulate(parameters, self.T, rng.simulate([]))
        np.random.set_state(state)
        e = np.random.standard_normal(self.T + 500)
        initial_value = 1.0

        sigma2 = np.zeros(self.T + 500)
        data = np.zeros(self.T + 500)
        sigma2[0] = initial_value
        data[0] = np.sqrt(initial_value)
        for t in range(1, self.T + 500):
            sigma2[t] = 0.94 * sigma2[t - 1] + 0.06 * data[t - 1] ** 2.0
            data[t] = e[t] * np.sqrt(sigma2[t])

        data = data[500:]
        sigma2 = sigma2[500:]
        assert_almost_equal(data - sim_data[0] + 1.0, np.ones_like(data))
        assert_almost_equal(sigma2 / sim_data[1], np.ones_like(sigma2))

        assert_equal(ewma.num_params, 0)
        assert_equal(ewma.name, 'EWMA/RiskMetrics')
コード例 #17
0
ファイル: rrtm.py プロジェクト: cjcardinale/climlab
    def _prepare_sw_arguments(self, ncol, nlay):
        aldif = _climlab_to_rrtm_sfc(self.aldif * np.ones_like(self.Ts))
        aldir = _climlab_to_rrtm_sfc(self.aldir * np.ones_like(self.Ts))
        asdif = _climlab_to_rrtm_sfc(self.asdif * np.ones_like(self.Ts))
        asdir = _climlab_to_rrtm_sfc(self.asdir * np.ones_like(self.Ts))
        coszen = _climlab_to_rrtm_sfc(self.coszen * np.ones_like(self.Ts))
        #  THE REST OF THESE ARGUMENTS ARE STILL BEING HARD CODED.
        #   NEED TO FIX THIS UP...

        #  These arrays have an extra dimension for number of bands
        dim_sw1 = [nbndsw,ncol,nlay]     # [nbndsw,ncol,nlay]
        dim_sw2 = [ncol,nlay,nbndsw]  # [ncol,nlay,nbndsw]
        tauc = np.zeros(dim_sw1) # In-cloud optical depth
        ssac = np.zeros(dim_sw1) # In-cloud single scattering albedo
        asmc = np.zeros(dim_sw1) # In-cloud asymmetry parameter
        fsfc = np.zeros(dim_sw1) # In-cloud forward scattering fraction (delta function pointing forward "forward peaked scattering")

        # AEROSOLS
        tauaer = np.zeros(dim_sw2)   # Aerosol optical depth (iaer=10 only), Dimensions,  (ncol,nlay,nbndsw)] #  (non-delta scaled)
        ssaaer = np.zeros(dim_sw2)   # Aerosol single scattering albedo (iaer=10 only), Dimensions,  (ncol,nlay,nbndsw)] #  (non-delta scaled)
        asmaer = np.zeros(dim_sw2)   # Aerosol asymmetry parameter (iaer=10 only), Dimensions,  (ncol,nlay,nbndsw)] #  (non-delta scaled)
        ecaer  = np.zeros([ncol,nlay,naerec])   # Aerosol optical depth at 0.55 micron (iaer=6 only), Dimensions,  (ncol,nlay,naerec)] #  (non-delta scaled)

        return (aldif,aldir,asdif,asdir,coszen,tauc,ssac,asmc,
                fsfc,tauaer,ssaaer,asmaer,ecaer)
コード例 #18
0
def generate_psf_2(uu, vv, ww, settings):
    im = oskar.imager.Imager('Single')
    im.set_fft_on_gpu(False)
    psf = im.make_image(uu, vv, ww, numpy.ones_like(uu, dtype='c8'),
                        numpy.ones_like(uu, dtype='f8'), settings['psf_fov_deg'],
                        settings['psf_im_size'])
    return psf
コード例 #19
0
ファイル: moving_square.py プロジェクト: sabago/pysph
def get_fluid():
    """ Get the fluid particle array """

    x, y = numpy.mgrid[dx : box_length - 1e-10 : dx, dx : box_height - 1e-10 : dx]

    xf, yf = x.ravel(), y.ravel()

    mf = numpy.ones_like(xf) * m
    hf = numpy.ones_like(xf) * h

    rhof = numpy.ones_like(xf) * ro
    cf = numpy.ones_like(xf) * co
    pf = numpy.zeros_like(xf)

    fluid = base.get_particle_array(name="fluid", type=Fluid, x=xf, y=yf, h=hf, rho=rhof, c=cf, p=pf)

    # remove indices within the square

    indices = []

    np = fluid.get_number_of_particles()
    x, y = fluid.get("x", "y")

    for i in range(np):
        if 1.0 - dx / 2 <= x[i] <= 2.0 + dx / 2:
            if 2.0 - dx / 2 <= y[i] <= 3.0 + dx / 2:
                indices.append(i)

    to_remove = base.LongArray(len(indices))
    to_remove.set_data(numpy.array(indices))

    fluid.remove_particles(to_remove)

    return fluid
コード例 #20
0
ファイル: boss_bodenheimer.py プロジェクト: Ingwar/amuse
  def new_model(self):
        
    base_sphere=uniform_unit_sphere(self.targetN,base_grid=self.base_grid)
    x_uni,y_uni,z=base_sphere.make_xyz()
    self.actualN=len(x_uni)
    rad=numpy.sqrt(x_uni**2 + y_uni**2)
    phi=numpy.arctan2(y_uni,x_uni)
    n_vec=2000
    phi_new_vec=numpy.linspace(-numpy.pi, numpy.pi, n_vec)
    phi_old_vec=phi_new_vec + self.rho_peturb*(numpy.sin(2.*phi_new_vec)/2.)
    phi_new=numpy.interp(phi,phi_old_vec,phi_new_vec)
    x=rad*numpy.cos(phi_new)
    y=rad*numpy.sin(phi_new)
    
    rad=numpy.sqrt(x**2 + y**2)
    phi=numpy.arctan2(y,x)
    vel=self.omega*rad
    vx=-vel*numpy.sin(phi)
    vy= vel*numpy.cos(phi)
    vz=0.

    mass=numpy.ones_like(x)/self.actualN

    Ep=3./5
    self.internalE=Ep*self.ethep_ratio
    internal_energy=numpy.ones_like(x)*self.internalE
    
    return (mass,x,y,z,vx,vy,vz,internal_energy)
コード例 #21
0
ファイル: test_sed.py プロジェクト: JonathanDHarris/gammapy
def test_cube_sed2():
    """Tests against known results with integral cube of 1s.
    """
    spec_cube = FermiGalacticCenter.diffuse_model()
    spec_cube.data = 10 * np.ones_like(spec_cube.data[:-1])

    counts = FermiGalacticCenter.diffuse_model()
    counts.data = np.ones_like(counts.data[:-1])

    lons, lats = spec_cube.spatial_coordinate_images

    mask = lon_lat_rectangle_mask(lons.value, lats.value, -8, 8, -4, 4)

    sed_table1 = cube_sed(spec_cube, mask, flux_type='integral')

    assert_allclose(sed_table1['ENERGY'][0], 56.95239033587774)
    assert_allclose(sed_table1['DIFF_FLUX'][0], 170.86224025271986)
    assert_allclose(sed_table1['DIFF_FLUX_ERR'], 0)

    sed_table2 = cube_sed(spec_cube, mask, flux_type='integral',
                          errors=True, standard_error = 0.1)

    assert_allclose(sed_table2['DIFF_FLUX_ERR'][0],
                    0.1 * sed_table2['DIFF_FLUX'][0])

    sed_table3 = cube_sed(spec_cube, mask, flux_type='integral',
                          errors=True, counts = counts)

    assert_allclose(sed_table3['DIFF_FLUX_ERR'][0],
                    np.sqrt(1./256) * sed_table3['DIFF_FLUX'][0])
コード例 #22
0
    def build_model(self):
        dim = 4
        mu1 = 0.5 * np.ones(dim)
        mu2 = -mu1

        stdev = 0.1
        sigma = np.power(stdev, 2) * np.eye(dim)
        isigma = np.linalg.inv(sigma)
        dsigma = np.linalg.det(sigma)

        w1 = stdev
        w2 = (1 - stdev)

        def two_gaussians(x):
            log_like1 = - 0.5 * dim * tt.log(2 * np.pi) \
                        - 0.5 * tt.log(dsigma) \
                        - 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1)
            log_like2 = - 0.5 * dim * tt.log(2 * np.pi) \
                        - 0.5 * tt.log(dsigma) \
                        - 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)
            return tt.log(w1 * tt.exp(log_like1) + w2 * tt.exp(log_like2))

        with pm.Model() as ATMIP_test:
            X = pm.Uniform('X',
                           shape=dim,
                           lower=-2. * np.ones_like(mu1),
                           upper=2. * np.ones_like(mu1),
                           testval=-1. * np.ones_like(mu1),
                           transform=None)
            like = pm.Deterministic('like', two_gaussians(X))
            pm.Potential('like', like)
        return ATMIP_test
コード例 #23
0
ファイル: formats.py プロジェクト: BTY2684/astropy
    def set_jds(self, val1, val2):
        self._check_scale(self._scale)  # Validate scale.

        sum12, err12 = two_sum(val1, val2)
        iy_start = np.trunc(sum12).astype(np.int)
        extra, y_frac = two_sum(sum12, -iy_start)
        y_frac += extra + err12

        val = (val1 + val2).astype(np.double)
        iy_start = np.trunc(val).astype(np.int)

        imon = np.ones_like(iy_start)
        iday = np.ones_like(iy_start)
        ihr = np.zeros_like(iy_start)
        imin = np.zeros_like(iy_start)
        isec = np.zeros_like(y_frac)

        # Possible enhancement: use np.unique to only compute start, stop
        # for unique values of iy_start.
        scale = self.scale.upper().encode('ascii')
        jd1_start, jd2_start = erfa.dtf2d(scale, iy_start, imon, iday,
                                          ihr, imin, isec)
        jd1_end, jd2_end = erfa.dtf2d(scale, iy_start + 1, imon, iday,
                                      ihr, imin, isec)

        t_start = Time(jd1_start, jd2_start, scale=self.scale, format='jd')
        t_end = Time(jd1_end, jd2_end, scale=self.scale, format='jd')
        t_frac = t_start + (t_end - t_start) * y_frac

        self.jd1, self.jd2 = day_frac(t_frac.jd1, t_frac.jd2)
コード例 #24
0
    def siggen_model(s, rad, phi, z, e, temp, num_1, num_2, num_3, den_1, den_2, den_3):
      out = np.zeros_like(data)
      
      detector.SetTemperature(temp)
      siggen_wf= detector.GetSiggenWaveform(rad, phi, z, energy=2600)

      if siggen_wf is None:
        return np.ones_like(data)*-1.
      if np.amax(siggen_wf) == 0:
        print "wtf is even happening here?"
        return np.ones_like(data)*-1.
      siggen_wf = np.pad(siggen_wf, (detector.zeroPadding,0), 'constant', constant_values=(0, 0))

      num = [num_1, num_2, num_3]
      den = [1,   den_1, den_2, den_3]
#      num = [-1.089e10,  5.863e17,  6.087e15]
#      den = [1,  3.009e07, 3.743e14,5.21e18]
      system = signal.lti(num, den)
      t = np.arange(0, len(siggen_wf)*10E-9, 10E-9)
      tout, siggen_wf, x = signal.lsim(system, siggen_wf, t)
      siggen_wf /= np.amax(siggen_wf)
      
      siggen_data = siggen_wf[detector.zeroPadding::]
      
      siggen_data = siggen_data*e
      
      out[s:] = siggen_data[0:(len(data) - s)]

      return out
コード例 #25
0
ファイル: drops.py プロジェクト: pankajp/pysph
def get_circular_patch(name="", type=0, dx=0.05):
    
    x,y = numpy.mgrid[-1.05:1.05+1e-4:dx, -1.05:1.05+1e-4:dx]
    x = x.ravel()
    y = y.ravel()
 
    m = numpy.ones_like(x)*dx*dx
    h = numpy.ones_like(x)*2*dx
    rho = numpy.ones_like(x)

    p = 0.5*1.0*100*100*(1 - (x**2 + y**2))

    cs = numpy.ones_like(x) * 100.0

    u = 0*x
    v = 0*y

    indices = []

    for i in range(len(x)):
        if numpy.sqrt(x[i]*x[i] + y[i]*y[i]) - 1 > 1e-10:
            indices.append(i)
            
    pa = base.get_particle_array(x=x, y=y, m=m, rho=rho, h=h, p=p, u=u, v=v,
                                 cs=cs,name=name, type=type)

    la = base.LongArray(len(indices))
    la.set_data(numpy.array(indices))

    pa.remove_particles(la)

    pa.set(idx=numpy.arange(len(pa.x)))

    return pa
コード例 #26
0
    def siggen_model(s, rad, phi, z, e, temp, gradIdx, pcRadIdx):
      out = np.zeros_like(data)
      if (gradIdx > detectorList.shape[0]-1) or (pcRadIdx > detectorList.shape[1]-1) :
        return np.ones_like(data)*-1.
      detector = detectorList[gradIdx, pcRadIdx]
      
      detector.SetTemperature(temp)
      siggen_wf= detector.GetSiggenWaveform(rad, phi, z, energy=2600)

#      print "len of siggen wf is %d" % len(siggen_wf)


      if siggen_wf is None:
        return np.ones_like(data)*-1.
      if np.amax(siggen_wf) == 0:
        print "wtf is even happening here?"
        return np.ones_like(data)*-1.
      siggen_wf = np.pad(siggen_wf, (detector.zeroPadding,0), 'constant', constant_values=(0, 0))


      tout, siggen_wf, x = signal.lsim(system, siggen_wf, t)
      siggen_wf /= np.amax(siggen_wf)
      
      siggen_data = siggen_wf[detector.zeroPadding::]
      
      siggen_data = siggen_data*e
      
      out[s:] = siggen_data[0:(len(data) - s)]

      return out
コード例 #27
0
    def siggen_model(s, rad, phi, z, e, temp):
      out = np.zeros_like(data)
      
      detector.SetTemperature(temp)
      siggen_wf= detector.GetSiggenWaveform(rad, phi, z, energy=2600)

      if siggen_wf is None:
        return np.ones_like(data)*-1.
      if np.amax(siggen_wf) == 0:
        print "wtf is even happening here?"
        return np.ones_like(data)*-1.
      siggen_wf = np.pad(siggen_wf, (detector.zeroPadding,0), 'constant', constant_values=(0, 0))


      tout, siggen_wf, x = signal.lsim(system, siggen_wf, t)
      siggen_wf /= np.amax(siggen_wf)
      
      siggen_data = siggen_wf[detector.zeroPadding::]
      
      siggen_data = siggen_data*e
      
      
      #ok, so the siggen step size is 1 ns and the
      #WARNING: only works for 1 ns step size for now
      #TODO: might be worth downsampling BEFORE applying transfer function
    
      siggen_start_idx = np.int(np.around(s, decimals=1) * data_to_siggen_size_ratio % data_to_siggen_size_ratio)
      switchpoint_ceil = np.int( np.ceil(s) )
      
      samples_to_fill = (len(data) - switchpoint_ceil)
      sampled_idxs = np.arange(samples_to_fill, dtype=np.int)*data_to_siggen_size_ratio+siggen_start_idx
      
      out[switchpoint_ceil:] = siggen_data[sampled_idxs]

      return out
コード例 #28
0
ファイル: test_ccddata.py プロジェクト: AlexaVillaume/ccdproc
def test_arithmetic_overload_ccddata_operand(ccd_data):
    ccd_data.uncertainty = StdDevUncertainty(np.ones_like(ccd_data))
    operand = ccd_data.copy()
    result = ccd_data.add(operand)
    assert len(result.meta) == 0
    np.testing.assert_array_equal(result.data,
                                  2 * ccd_data.data)
    np.testing.assert_array_equal(result.uncertainty.array,
                                  np.sqrt(2) * ccd_data.uncertainty.array)

    result = ccd_data.subtract(operand)
    assert len(result.meta) == 0
    np.testing.assert_array_equal(result.data,
                                  0 * ccd_data.data)
    np.testing.assert_array_equal(result.uncertainty.array,
                                  np.sqrt(2) * ccd_data.uncertainty.array)

    result = ccd_data.multiply(operand)
    assert len(result.meta) == 0
    np.testing.assert_array_equal(result.data,
                                  ccd_data.data ** 2)
    expected_uncertainty = (np.sqrt(2) * np.abs(ccd_data.data) *
                            ccd_data.uncertainty.array)
    np.testing.assert_allclose(result.uncertainty.array,
                               expected_uncertainty)

    result = ccd_data.divide(operand)
    assert len(result.meta) == 0
    np.testing.assert_array_equal(result.data,
                                  np.ones_like(ccd_data.data))
    expected_uncertainty = (np.sqrt(2) / np.abs(ccd_data.data) *
                            ccd_data.uncertainty.array)
    np.testing.assert_allclose(result.uncertainty.array,
                               expected_uncertainty)
コード例 #29
0
def plot(y, title, t):
    
        a = y[0]
        b = y[1]
        
        print a
        
        if a and b:
        
            bins = numpy.linspace(min(a+b), max(a+b), 20)
            pyplot.clf()
            
            if a:
                w0 = numpy.ones_like(a)/float(len(a))
                pyplot.hist(a, bins, weights=w0,alpha=0.5, color='r', histtype='stepfilled', label='link')
            
            if b:
                w1 = numpy.ones_like(b)/float(len(b))
                pyplot.hist(b, bins,weights=w1, alpha=0.5, color='b', histtype='stepfilled', label='no link')
            
            pyplot.title(title)
            pyplot.ylabel("Fraction over population")
            pyplot.xlabel("Similarity")
            pyplot.legend();
            #plt.savefig("/Users/spoulson/Dropbox/my_papers/figs/"+title.replace(' ','_')+'_'+ str(t) +'.png')
            pyplot.show()
コード例 #30
0
    def test_blocked(self):
        # test alignments offsets for simd instructions
        # alignments for vz + 2 * (vs - 1) + 1
        for dt, sz in [(np.float32, 11), (np.float64, 7)]:
            for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,
                                                            type='binary',
                                                            max_size=sz):
                exp1 = np.ones_like(inp1)
                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)
                assert_almost_equal(np.add(inp1, 1), exp1 + 1, err_msg=msg)
                assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)

                np.add(inp1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)

                inp2[...] += np.arange(inp2.size, dtype=dt) + 1
                assert_almost_equal(np.square(inp2),
                                    np.multiply(inp2, inp2),  err_msg=msg)
                assert_almost_equal(np.reciprocal(inp2),
                                    np.divide(1, inp2),  err_msg=msg)

                inp1[...] = np.ones_like(inp1)
                inp2[...] = np.zeros_like(inp2)
                np.add(inp1, 1, out=out)
                assert_almost_equal(out, exp1 + 1, err_msg=msg)
                np.add(1, inp2, out=out)
                assert_almost_equal(out, exp1, err_msg=msg)
コード例 #31
0
    j += 1

j = 0
bases = [5, 20, 50, 80, 100, 120, 140, 160, 180, 200, 250, 300]
t_deim = np.zeros(len(bases), dtype=np.float_)
for i in bases:
    t_deim[j] = np.average(
        np.load('simulation/reduced/full_trajectory/fixed_deim/pod_' + str(i) +
                '/sp0099ts2879N.petsc_timeings.npy'))
    j += 1

t_hd = 200
p = plt.subplot(111)
plt.plot(bases, t_deim / t_hd, '--', color='black', marker='o', markersize=15)
plt.plot([5, 10, 20, 30, 40, 60, 100, 120, 140, 180, 200, 300],
         t / t_hd,
         '--b',
         color='gray',
         marker='D',
         markersize=15)
plt.plot(bases, np.ones_like(bases), '--', color='black')
plt.yscale('log')
p.set_ylim([-0.5, 1.2])
p.set_xlim([0, 301])
#p.set_title(r"\textbf{Average scaled CPU time for one model year}",**axis_font)
plt.legend(["Fixed DEIM 150", "Fixed POD 150", "FOM"], loc='best')
plt.xlabel(r"\textbf{Size of other base}", **axis_font)
plt.ylabel(r"\textbf{CPU time in sec}", **axis_font)

plt.savefig('avarage_cpu_time.png', bbox_inches='tight')
plt.show()
コード例 #32
0
ファイル: 1_model.py プロジェクト: dartoon/my_code
        psf_peak = np.where(psf == psf.max())
        psf_peak = [psf_peak[0][0], psf_peak[1][0]]
        psf = psf[psf_peak[0] - psf_half_r:psf_peak[0] + psf_half_r + 1,
                  psf_peak[1] - psf_half_r:psf_peak[1] + psf_half_r + 1]

        kwargs_data = sim_util.data_configure_simple(
            numPix=framesize, deltaPix=deltaPix)  #,inverse=True)
        kwargs_data['image_data'] = lens_data
        kwargs_data['noise_map'] = len_std

        data_class = ImageData(**kwargs_data)
        kwargs_psf = {
            'psf_type': 'PIXEL',
            'kernel_point_source': psf,
            'pixel_size': deltaPix,
            'psf_error_map': np.ones_like(psf) * 0.01
        }
        psf_class = PSF(**kwargs_psf)

        #%%
        # lens model choicers
        fixed_lens = []
        kwargs_lens_init = []
        kwargs_lens_sigma = []
        kwargs_lower_lens = []
        kwargs_upper_lens = []
        fixed_lens.append({})
        fixed_lens.append({'ra_0': 0, 'dec_0': 0})
        kwargs_lens_init = kwargs_lens_list
        kwargs_lens_sigma.append({
            'theta_E': .2,
コード例 #33
0
X = featArray(data)
print X.shape
print 'X feature array loaded.'
#dpgmm = mixture.BayesianGaussianMixture(n_components=6,covariance_type='full',n_init=1,max_iter=1000,init_params='kmeans',weight_concentration_prior_type='dirichlet_process').fit(X)
#dpgmm = mixture.GaussianMixture(n_components=10,covariance_type='full',n_init=10,max_iter=1000,init_params='kmeans').fit(X)
#labels = dpgmm.predict(X)

km = KMeans(n_clusters=15).fit(X)
labels = km.labels_
#db = DBSCAN(eps=0.1,min_samples=200).fit(X)
#labels = db.labels_

sh = n.shape(data)
ml = findMaxLabel(labels)
labels = labels.reshape(sh[0], sh[1])
mask = n.ones_like(data)
#mask[labels!=ml] = 0
fig = plt.figure()
plt.ion()

options = {'y': -1, 'n': 100, 'f': 1000}
labelsC = n.copy(labels)
RFIlabels = []
for c in n.unique(labels):
    instaMask = n.ones_like(data)
    instaMask[labels != c] = 0
    plt.imshow(n.log10(n.abs(data * instaMask)), aspect='auto', cmap='jet')
    plt.show()
    text = raw_input(str(options))
    #if text in options.keys():
    labelsC[labels == c] = options[text]
コード例 #34
0
ファイル: TeatDip.py プロジェクト: bubblefoundry/MilkMachine
def rolling_window(array, window=(0,), asteps=None, wsteps=None, axes=None, toend=True):
    import numpy as np
    """Create a view of `array` which for every point gives the n-dimensional
    neighbourhood of size window. New dimensions are added at the end of
    `array` or after the corresponding original dimension.

    Parameters
    ----------
    array : array_like
        Array to which the rolling window is applied.
    window : int or tuple
        Either a single integer to create a window of only the last axis or a
        tuple to create it for the last len(window) axes. 0 can be used as a
        to ignore a dimension in the window.
    asteps : tuple
        Aligned at the last axis, new steps for the original array, ie. for
        creation of non-overlapping windows. (Equivalent to slicing result)
    wsteps : int or tuple (same size as window)
        steps for the added window dimensions. These can be 0 to repeat values
        along the axis.
    axes: int or tuple
        If given, must have the same size as window. In this case window is
        interpreted as the size in the dimension given by axes. IE. a window
        of (2, 1) is equivalent to window=2 and axis=-2.
    toend : bool
        If False, the new dimensions are right after the corresponding original
        dimension, instead of at the end of the array. Adding the new axes at the
        end makes it easier to get the neighborhood, however toend=False will give
        a more intuitive result if you view the whole array.

    Returns
    -------
    A view on `array` which is smaller to fit the windows and has windows added
    dimensions (0s not counting), ie. every point of `array` is an array of size
    window.

    Examples
    --------
    >>> a = np.arange(9).reshape(3,3)
    >>> rolling_window(a, (2,2))
    array([[[[0, 1],
             [3, 4]],

            [[1, 2],
             [4, 5]]],


           [[[3, 4],
             [6, 7]],

            [[4, 5],
             [7, 8]]]])

    Or to create non-overlapping windows, but only along the first dimension:
    >>> rolling_window(a, (2,0), asteps=(2,1))
    array([[[0, 3],
            [1, 4],
            [2, 5]]])

    Note that the 0 is discared, so that the output dimension is 3:
    >>> rolling_window(a, (2,0), asteps=(2,1)).shape
    (1, 3, 2)

    This is useful for example to calculate the maximum in all (overlapping)
    2x2 submatrixes:
    >>> rolling_window(a, (2,2)).max((2,3))
    array([[4, 5],
           [7, 8]])

    Or delay embedding (3D embedding with delay 2):
    >>> x = np.arange(10)
    >>> rolling_window(x, 3, wsteps=2)
    array([[0, 2, 4],
           [1, 3, 5],
           [2, 4, 6],
           [3, 5, 7],
           [4, 6, 8],
           [5, 7, 9]])
    """
    array = np.asarray(array)
    orig_shape = np.asarray(array.shape)
    window = np.atleast_1d(window).astype(int) # maybe crude to cast to int...

    if axes is not None:
        axes = np.atleast_1d(axes)
        w = np.zeros(array.ndim, dtype=int)
        for axis, size in zip(axes, window):
            w[axis] = size
        window = w

    # Check if window is legal:
    if window.ndim > 1:
        raise ValueError("`window` must be one-dimensional.")
    if np.any(window < 0):
        raise ValueError("All elements of `window` must be larger then 1.")
    if len(array.shape) < len(window):
        raise ValueError("`window` length must be less or equal `array` dimension.")

    _asteps = np.ones_like(orig_shape)
    if asteps is not None:
        asteps = np.atleast_1d(asteps)
        if asteps.ndim != 1:
            raise ValueError("`asteps` must be either a scalar or one dimensional.")
        if len(asteps) > array.ndim:
            raise ValueError("`asteps` cannot be longer then the `array` dimension.")
        # does not enforce alignment, so that steps can be same as window too.
        _asteps[-len(asteps):] = asteps

        if np.any(asteps < 1):
             raise ValueError("All elements of `asteps` must be larger then 1.")
    asteps = _asteps

    _wsteps = np.ones_like(window)
    if wsteps is not None:
        wsteps = np.atleast_1d(wsteps)
        if wsteps.shape != window.shape:
            raise ValueError("`wsteps` must have the same shape as `window`.")
        if np.any(wsteps < 0):
             raise ValueError("All elements of `wsteps` must be larger then 0.")

        _wsteps[:] = wsteps
        _wsteps[window == 0] = 1 # make sure that steps are 1 for non-existing dims.
    wsteps = _wsteps

    # Check that the window would not be larger then the original:
    if np.any(orig_shape[-len(window):] < window * wsteps):
        raise ValueError("`window` * `wsteps` larger then `array` in at least one dimension.")

    new_shape = orig_shape # just renaming...

    # For calculating the new shape 0s must act like 1s:
    _window = window.copy()
    _window[_window==0] = 1

    new_shape[-len(window):] += wsteps - _window * wsteps
    new_shape = (new_shape + asteps - 1) // asteps
    # make sure the new_shape is at least 1 in any "old" dimension (ie. steps
    # is (too) large, but we do not care.
    new_shape[new_shape < 1] = 1
    shape = new_shape

    strides = np.asarray(array.strides)
    strides *= asteps
    new_strides = array.strides[-len(window):] * wsteps

    # The full new shape and strides:
    if toend:
        new_shape = np.concatenate((shape, window))
        new_strides = np.concatenate((strides, new_strides))
    else:
        _ = np.zeros_like(shape)
        _[-len(window):] = window
        _window = _.copy()
        _[-len(window):] = new_strides
        _new_strides = _

        new_shape = np.zeros(len(shape)*2, dtype=int)
        new_strides = np.zeros(len(shape)*2, dtype=int)

        new_shape[::2] = shape
        new_strides[::2] = strides
        new_shape[1::2] = _window
        new_strides[1::2] = _new_strides

    new_strides = new_strides[new_shape != 0]
    new_shape = new_shape[new_shape != 0]

    return np.lib.stride_tricks.as_strided(array, shape=new_shape, strides=new_strides)
コード例 #35
0
def release_loc_ocean(param, fh):
    # Generates initial coordinates for particle releases at a single time
    # frame for the ocean. We release within a geographical region of interest
    # (taking into account the land mask) and assign the particle ID. The
    # particle ID can later be used to assign the fishing intensity value.

    pn = param['pn']

    # Firstly load the requisite fields:
    # - rho & psi coordinates
    # - lsm_rho mask
    # - id_psi mask (cell ids)

    with Dataset(fh['grid'], mode='r') as nc:
        lon_psi = np.array(nc.variables['lon_psi'][:])
        lat_psi = np.array(nc.variables['lat_psi'][:])

        lon_rho = np.array(nc.variables['lon_rho'][:])

        id_psi = np.array(nc.variables['source_id_psi'][:])
        lsm_psi = np.array(nc.variables['lsm_psi'][:])

    marine_release_loc = np.ones_like(id_psi, dtype=np.int32)

    plt.imshow(lsm_psi)
    plt.scatter(2819, 934)

    # Mask with limits
    y_idx_max = np.searchsorted(lat_psi, param['lat_north'])
    y_idx_min = np.searchsorted(lat_psi, param['lat_south'])
    x_idx_max = np.searchsorted(lon_psi, param['lon_east'])
    x_idx_min = np.searchsorted(lon_psi, param['lon_west'])

    marine_release_loc[:y_idx_min, :] = 0
    marine_release_loc[y_idx_max:, :] = 0
    marine_release_loc[:, :x_idx_min] = 0
    marine_release_loc[:, x_idx_max:] = 0

    # Remove Mediterranean
    x_idx_max_med = np.searchsorted(lon_psi, 60)
    y_idx_min_med = np.searchsorted(lat_psi, 30)

    marine_release_loc[y_idx_min_med:, :x_idx_max_med] = 0

    # Mask with lsm
    marine_release_loc *= (1 - lsm_psi)

    # We also need to calculate how many particles are being released within
    # this GFW cell (i.e. out of 144*pn_cell)

    # Assign a unique ID to each 1x1 degree (GFW) cell
    gfw_id = np.arange(np.shape(id_psi)[0] * np.shape(id_psi)[1] / 144,
                       dtype=np.int32)
    gfw_id = gfw_id.reshape((int(np.shape(id_psi)[0] / 12), -1))

    # Expand to 1/12 grid and mask with lsm
    gfw_id12 = np.kron(gfw_id, np.ones((12, 12), dtype=np.int32))
    gfw_id12[lsm_psi == 1] = -1
    gfw_id12[:y_idx_min, :] = -1
    gfw_id12[y_idx_max:, :] = -1
    gfw_id12[:, :x_idx_min] = -1
    gfw_id12[:, x_idx_max:] = -1

    gfw_uniques = np.unique(gfw_id12, return_counts=True)
    gfw_dict = dict(zip(gfw_uniques[0], gfw_uniques[1]))

    # Extract cell grid indices
    idx = list(np.where(marine_release_loc == 1))

    # Calculate the total number of particles
    nl = idx[0].shape[0]  # Number of locations
    id_list = id_psi[tuple(idx)]

    pn_cell = pn**2
    pn_tot = nl * pn_cell

    print('')
    print('Total number of particles generated per release: ' + str(pn_tot))

    dX = lon_rho[1] - lon_rho[0]  # Grid spacing

    lon_out = np.zeros((pn_tot, ), dtype=np.float64)
    lat_out = np.zeros((pn_tot, ), dtype=np.float64)
    id_out = np.zeros((pn_tot, ), dtype=np.int32)
    np_per_gfw_out = np.zeros((pn_tot, ), dtype=np.int32)

    for loc in range(nl):
        # Find cell location
        loc_yidx = idx[0][loc]
        loc_xidx = idx[1][loc]

        # Calculate initial positions
        dx = dX / pn  # Particle spacing
        gx = np.linspace((-dX / 2 + dx / 2), (dX / 2 - dx / 2), num=pn)
        gridx, gridy = [grid.flatten() for grid in np.meshgrid(gx, gx)]

        loc_y = lat_psi[loc_yidx]
        loc_x = lon_psi[loc_xidx]

        loc_id = id_psi[loc_yidx, loc_xidx]
        gfw_id_cell = gfw_id12[loc_yidx, loc_xidx]

        s_idx = loc * pn_cell
        e_idx = (loc + 1) * pn_cell

        lon_out[s_idx:e_idx] = gridx + loc_x
        lat_out[s_idx:e_idx] = gridy + loc_y
        id_out[s_idx:e_idx] = np.ones(np.shape(gridx), dtype=np.int32) * loc_id
        np_per_gfw_out[s_idx:e_idx] = np.ones(
            np.shape(gridx), dtype=np.int32) * gfw_dict[gfw_id_cell]

    pos0 = {
        'lon': lon_out,
        'lat': lat_out,
        'id': id_out,
        'gfw': np_per_gfw_out
    }

    return pos0
コード例 #36
0
ファイル: optimization.py プロジェクト: yjx520/simpeg
    def findSearchDirection(self):
        """
            findSearchDirection()
            Finds the search direction based on projected CG
        """

        Active = self.activeSet(self.xc)
        temp = sum((np.ones_like(self.xc.size) - Active))

        step = np.zeros(self.g.size)
        resid = -(1 - Active) * self.g

        r = resid - (1 - Active) * (self.H * step)

        p = self.approxHinv * r

        sold = np.dot(r, p)

        count = 0

        while np.all([np.linalg.norm(r) > self.tolCG, count < self.maxIterCG]):

            count += 1

            q = (1 - Active) * (self.H * p)

            alpha = sold / (np.dot(p, q))

            step += alpha * p

            r -= alpha * q

            h = self.approxHinv * r

            snew = np.dot(r, h)

            p = h + (snew / sold * p)

            sold = snew
            # End CG Iterations
        self.cg_count += count

        # Take a gradient step on the active cells if exist
        if temp != self.xc.size:

            rhs_a = (Active) * -self.g

            dm_i = max(abs(step))
            dm_a = max(abs(rhs_a))

            # perturb inactive set off of bounds so that they are included
            # in the step
            step = step + self.stepOffBoundsFact * (rhs_a * dm_i / dm_a)

        # Only keep gradients going in the right direction on the active
        # set
        indx = ((self.xc <= self.lower) &
                (step < 0)) | ((self.xc >= self.upper) & (step > 0))
        step[indx] = 0.0

        return step
コード例 #37
0
ファイル: optimization.py プロジェクト: yjx520/simpeg
 def _startup(self, x0):
     # ensure bound vectors are the same size as the model
     if not isinstance(self.lower, np.ndarray):
         self.lower = np.ones_like(x0) * self.lower
     if not isinstance(self.upper, np.ndarray):
         self.upper = np.ones_like(x0) * self.upper
コード例 #38
0
ファイル: Robot.py プロジェクト: Hanke98/3D-LIPM
    def reset(self):
        self.count = 0
        self.lipm.reset()
        [c.reset() for c in self.chains]


if __name__ == '__main__':
    root = [0, 0.04, 0.4]
    robot = Robot(root)
    robot.config('config/robot.json')
    ax.set_xlim(0, 1)
    ax.set_ylim(-0.25, 0.25)
    ax.set_zlim(0, 0.5)
    com = ax.plot(robot.com_x, robot.com_y,
                  np.ones_like(robot.com_x) * robot.h)
    left_plt, = ax.plot([0, 0], [0, 0], [0, 0], 'ro-')
    right_plt, = ax.plot([0, 0], [0, 0], [0, 0], 'go-')
    lt_plt, = ax.plot([0, 0], [0, 0], [0, 0], 'b--')
    rt_plt, = ax.plot([0, 0], [0, 0], [0, 0], 'b--')

    lfx = []
    lfy = []
    lfz = []
    rfx = []
    rfy = []
    rfz = []

    def update(newd):
        left_plt.set_data(newd[0][0], newd[0][1])
        left_plt.set_3d_properties(newd[0][-1])
コード例 #39
0
ファイル: postprocess.py プロジェクト: mattkinsey/bucky
def main(args=None):
    """Main method for postprocessing the raw outputs from an MC run."""
    if args is None:
        args = sys.argv[1:]
    args = parser.parse_args()

    # Start parsing args
    quantiles = args.quantiles
    verbose = args.verbose
    prefix = args.prefix
    use_gpu = args.gpu

    if verbose:
        logging.info(args)

    # File Management
    top_output_dir = args.output

    # Check if it exists, make if not
    if not os.path.exists(top_output_dir):
        os.makedirs(top_output_dir)

    # Use lookup, add prefix
    # TODO need to handle lookup weights
    if args.lookup is not None:
        lookup_df = read_lookup(args.lookup)
        if prefix is None:
            prefix = Path(args.lookup).stem
    # TODO if args.lookup we need to check it for weights

    # Create subfolder for this run using UUID of run
    uuid = args.file.split("/")[-2]

    if prefix is not None:
        uuid = prefix + "_" + uuid

    # Create directory if it doesn't exist
    output_dir = os.path.join(top_output_dir, uuid)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)

    data_dir = os.path.join(args.file, "data/")
    metadata_dir = os.path.join(args.file, "metadata/")

    adm_mapping = pd.read_csv(os.path.join(metadata_dir, "adm_mapping.csv"))
    dates = pd.read_csv(os.path.join(metadata_dir, "dates.csv"))
    dates = dates["date"].to_numpy()

    n_adm2 = len(adm_mapping)
    adm2_sorted_ind = xp.argsort(xp.array(adm_mapping["adm2"].to_numpy()))

    if use_gpu:
        enable_cupy(optimize=True)
        reimport_numerical_libs("postprocess")

    per_capita_cols = [
        "cumulative_reported_cases",
        "cumulative_deaths",
        "current_hospitalizations",
        "daily_reported_cases",
        "daily_deaths",
        "vacc_dose1",
        "vacc_dose2",
        "immune",
    ]
    pop_weighted_cols = [
        "case_reporting_rate",
        "R_eff",
        "frac_vacc_dose1",
        "frac_vacc_dose2",
        "frac_vacc_dose1_65",
        "frac_vacc_dose2_65",
        "frac_immune",
        "frac_immune_65",
        "state_phase",
    ]

    adm_mapping["adm0"] = 1
    adm_map = adm_mapping.to_dict(orient="list")
    adm_map = {k: xp.array(v)[adm2_sorted_ind] for k, v in adm_map.items()}
    adm_array_map = {
        k: xp.unique(v, return_inverse=True)[1]
        for k, v in adm_map.items()
    }
    adm_sizes = {
        k: xp.to_cpu(xp.max(v) + 1).item()
        for k, v in adm_array_map.items()
    }
    adm_level_values = {k: xp.to_cpu(xp.unique(v)) for k, v in adm_map.items()}
    adm_level_values["adm0"] = np.array(["US"])

    if args.lookup is not None and "weight" in lookup_df.columns:
        weight_series = lookup_df.set_index("adm2")["weight"].reindex(
            adm_mapping["adm2"], fill_value=0.0)
        weights = np.array(weight_series.to_numpy(), dtype=np.float32)
        # TODO we should ignore all the adm2 not in weights rather than just 0ing them (it'll go alot faster)
    else:
        weights = np.ones_like(adm2_sorted_ind, dtype=np.float32)

    write_queue = queue.Queue()

    def _writer():
        """Write thread that will pull from a queue."""
        # Call to_write.get() until it returns None
        file_tables = {}
        for fname, q_dict in iter(write_queue.get, None):
            df = pd.DataFrame(q_dict)
            id_col = df.columns[df.columns.str.contains("adm.")].values[0]
            df = df.set_index([id_col, "date", "quantile"])
            df = df.reindex(sorted(df.columns), axis=1)
            if fname in file_tables:
                tmp = pa.table(q_dict)
                file_tables[fname] = pa.concat_tables(
                    [file_tables[fname], tmp])
            else:
                file_tables[fname] = pa.table(q_dict)
            write_queue.task_done()

        # dump tables to disk
        for fname in tqdm.tqdm(file_tables):
            df = file_tables[fname].to_pandas()
            id_col = df.columns[df.columns.str.contains("adm.")].values[0]
            df = df.set_index([id_col, "date", "quantile"])
            df = df.reindex(sorted(df.columns), axis=1)
            df.to_csv(fname, header=True, mode="w")
        write_queue.task_done()

    write_thread = threading.Thread(target=_writer)
    write_thread.start()

    # TODO this depends on out of scope vars, need to clean that up
    def pa_array_quantiles(array, level):
        """Calculate the quantiles of a pyarrow array after shipping it to the GPU."""
        data = array.to_numpy().reshape(-1, n_adm2)
        data = data[:, adm2_sorted_ind]

        data_gpu = xp.array(data.T)

        if adm_sizes[level] == 1:
            # TODO need switching here b/c cupy handles xp.percentile weird with a size 1 dim :(
            if use_gpu:
                level_data_gpu = xp.sum(data_gpu, axis=0)  # need this if cupy
            else:
                level_data_gpu = xp.sum(data_gpu, axis=0,
                                        keepdims=True).T  # for numpy
            q_data_gpu = xp.empty((len(percentiles), adm_sizes[level]),
                                  dtype=level_data_gpu.dtype)
            # It appears theres a cupy bug when the 1st axis of the array passed to percentiles has size 1
            xp.percentile(level_data_gpu,
                          q=percentiles,
                          axis=0,
                          out=q_data_gpu)
        else:
            level_data_gpu = xp.zeros((adm_sizes[level], data_gpu.shape[1]),
                                      dtype=data_gpu.dtype)
            xp.scatter_add(level_data_gpu, adm_array_map[level], data_gpu)
            q_data_gpu = xp.empty((len(percentiles), adm_sizes[level]),
                                  dtype=level_data_gpu.dtype)
            xp.percentile(level_data_gpu,
                          q=percentiles,
                          axis=1,
                          out=q_data_gpu)
        return q_data_gpu

    try:
        percentiles = xp.array(quantiles, dtype=np.float64) * 100.0
        quantiles = np.array(quantiles)
        for date_i, date in enumerate(tqdm.tqdm(dates)):
            dataset = ds.dataset(data_dir,
                                 format="parquet",
                                 partitioning=["date"])
            table = dataset.to_table(filter=ds.field("date") == "date=" +
                                     str(date_i))
            table = table.drop(
                ("date", "rid", "adm2_id"))  # we don't need these b/c metadata
            pop_weight_table = table.select(pop_weighted_cols)
            table = table.drop(pop_weighted_cols)

            w = np.ravel(
                np.broadcast_to(
                    weights,
                    (table.shape[0] // weights.shape[0], weights.shape[0])))
            for i, col in enumerate(table.column_names):
                if pat.is_float64(table.column(i).type):
                    typed_w = w.astype(np.float64)
                else:
                    typed_w = w.astype(np.float32)

                tmp = pac.multiply_checked(table.column(i), typed_w)
                table = table.set_column(i, col, tmp)

            for col in pop_weighted_cols:
                if pat.is_float64(pop_weight_table[col].type):
                    typed_w = table["total_population"].to_numpy().astype(
                        np.float64)
                else:
                    typed_w = table["total_population"].to_numpy().astype(
                        np.float32)
                tmp = pac.multiply_checked(pop_weight_table[col], typed_w)
                table = table.append_column(col, tmp)

            for level in args.levels:
                all_q_data = {}
                for col in table.column_names:  # TODO can we do all at once since we dropped date?
                    all_q_data[col] = pa_array_quantiles(table[col], level)

                # all_q_data = {col: pa_array_quantiles(table[col]) for col in table.column_names}

                # we could do this outside the date loop and cache for each adm level...
                out_shape = (
                    len(percentiles), ) + adm_level_values[level].shape
                all_q_data[level] = np.broadcast_to(adm_level_values[level],
                                                    out_shape)
                all_q_data["date"] = np.broadcast_to(date, out_shape)
                all_q_data["quantile"] = np.broadcast_to(
                    quantiles[..., None], out_shape)

                for col in per_capita_cols:
                    all_q_data[col + "_per_100k"] = 100000.0 * all_q_data[
                        col] / all_q_data["total_population"]

                for col in pop_weighted_cols:
                    all_q_data[
                        col] = all_q_data[col] / all_q_data["total_population"]

                for col in all_q_data:
                    all_q_data[col] = xp.to_cpu(all_q_data[col].T.ravel())

                write_queue.put(
                    (os.path.join(output_dir,
                                  level + "_quantiles.csv"), all_q_data))

            del dataset
            gc.collect()

    except (KeyboardInterrupt, SystemExit):
        logging.warning("Caught SIGINT, cleaning up")
        write_queue.put(None)  # send signal to term loop
        write_thread.join()  # join the write_thread
    finally:
        write_queue.put(None)  # send signal to term loop
        write_thread.join()  # join the write_thread
コード例 #40
0
def Sinf(x, t):
    
    tt = t*np.ones_like(x)
    xx = tt - x

    return np.sin(xx)
コード例 #41
0
def applyoptions(md, data, options, fig, axgrid, gridindex):
    '''
	APPLYOPTIONS - apply options to current plot

	'plotobj' is the object returned by the specific plot call used to
	render the data.  This object is used for adding a colorbar.

		Usage:
			applyoptions(md,data,options)

		See also: PLOTMODEL, PARSE_OPTIONS
	'''

    # get handle to current figure and axes instance
    #fig = p.gcf()
    ax = axgrid[gridindex]

    # {{{ font
    fontsize = options.getfieldvalue('fontsize', 8)
    fontweight = options.getfieldvalue('fontweight', 'normal')
    fontfamily = options.getfieldvalue('fontfamily', 'sans-serif')
    font = {
        'fontsize': fontsize,
        'fontweight': fontweight,
        'family': fontfamily
    }
    # }}}
    # {{{ title
    if options.exist('title'):
        title = options.getfieldvalue('title')
        if options.exist('titlefontsize'):
            titlefontsize = options.getfieldvalue('titlefontsize')
        else:
            titlefontsize = fontsize
        if options.exist('titlefontweight'):
            titlefontweight = options.getfieldvalue('titlefontweight')
        else:
            titlefontweight = fontweight
        #title font
        titlefont = font.copy()
        titlefont['size'] = titlefontsize
        titlefont['weight'] = titlefontweight
        ax.set_title(title, **titlefont)
    # }}}
    # {{{ xlabel, ylabel, zlabel
    if options.exist('labelfontsize'):
        labelfontsize = options.getfieldvalue('labelfontsize')
    else:
        labelfontsize = fontsize
    if options.exist('labelfontweight'):
        labelfontweight = options.getfieldvalue('labelfontweight')
    else:
        labelfontweight = fontweight

    #font dict for labels
    labelfont = font.copy()
    labelfont['fontsize'] = labelfontsize
    labelfont['fontweight'] = labelfontweight

    if options.exist('xlabel'):
        ax.set_xlabel(options.getfieldvalue('xlabel'), **labelfont)
    if options.exist('ylabel'):
        ax.set_ylabel(options.getfieldvalue('ylabel'), **labelfont)
    if options.exist('zlabel'):
        ax.set_zlabel(options.getfieldvalue('zlabel'), **labelfont)
    # }}}
    # {{{ xticks, yticks, zticks (tick locations)
    if options.exist('xticks'):
        if options.exist('xticklabels'):
            xticklabels = options.getfieldvalue('xticklabels')
            ax.set_xticks(options.getfieldvalue('xticks'), xticklabels)
        else:
            ax.set_xticks(options.getfieldvalue('xticks'))
    if options.exist('yticks'):
        if options.exist('yticklabels'):
            yticklabels = options.getfieldvalue('yticklabels')
            ax.set_yticks(options.getfieldvalue('yticks'), yticklabels)
        else:
            ax.set_yticks(options.getfieldvalue('yticks'))
    if options.exist('zticks'):
        if options.exist('zticklabels'):
            zticklabels = options.getfieldvalue('zticklabels')
            ax.set_zticks(options.getfieldvalue('zticks'), zticklabels)
        else:
            ax.set_zticks(options.getfieldvalue('zticks'))
    # }}}
    # {{{ xticklabels,yticklabels,zticklabels
    if options.getfieldvalue('ticklabels',
                             'off') == 'off' or options.getfieldvalue(
                                 'ticklabels', 0) == 0:
        options.addfielddefault('xticklabels', [])
        options.addfielddefault('yticklabels', [])
        # TODO check if ax has a z-axis (e.g. is 3D)
    if options.exist('xticklabels'):
        xticklabels = options.getfieldvalue('xticklabels')
        ax.set_xticklabels(xticklabels)
    if options.exist('yticklabels'):
        yticklabels = options.getfieldvalue('yticklabels')
        ax.set_yticklabels(yticklabels)
    if options.exist('zticklabels'):
        zticklabels = options.getfieldvalue('zticklabels')
        ax.set_zticklabels(zticklabels)
    # }}}
    # {{{ ticklabel notation
    #ax.ticklabel_format(style='sci',scilimits=(0,0))
    # }}}
    # {{{ ticklabelfontsize
    if options.exist('ticklabelfontsize'):
        for label in ax.get_xticklabels() + ax.get_yticklabels():
            label.set_fontsize(options.getfieldvalue('ticklabelfontsize'))
        if int(md.mesh.dimension) == 3:
            for label in ax.get_zticklabels():
                label.set_fontsize(options.getfieldvalue('ticklabelfontsize'))
    # }}}
    # {{{ view TOFIX
    #if int(md.mesh.dimension) == 3 and options.exist('layer'):
    #	#options.getfieldvalue('view') ?
    #	ax=fig.gca(projection='3d')
    #plt.show()
    # }}}
    # {{{ axis
    if options.exist('axis'):
        if options.getfieldvalue('axis', True) == 'off':
            ax.ticklabel_format(style='plain')
            p.setp(ax.get_xticklabels(), visible=False)
            p.setp(ax.get_yticklabels(), visible=False)
    # }}}
    # {{{ box
    if options.exist('box'):
        eval(options.getfieldvalue('box'))
    # }}}
    # {{{ xlim, ylim, zlim
    if options.exist('xlim'):
        ax.set_xlim(options.getfieldvalue('xlim'))
    if options.exist('ylim'):
        ax.set_ylim(options.getfieldvalue('ylim'))
    if options.exist('zlim'):
        ax.set_zlim(options.getfieldvalue('zlim'))
    # }}}
    # {{{ latlon TODO
    # }}}
    # {{{ Basinzoom TODO
    # }}}
    # {{{ ShowBasins TODO
    # }}}
    # {{{ clim
    if options.exist('clim'):
        lims = options.getfieldvalue('clim')
        assert len(
            lims) == 2, 'error, clim should be passed as a list of length 2'
    elif options.exist('caxis'):
        lims = options.getfieldvalue('caxis')
        assert len(
            lims) == 2, 'error, caxis should be passed as a list of length 2'
        options.addfielddefault('clim', lims)
    else:
        if len(data) > 0:
            lims = [data.min(), data.max()]
        else:
            lims = [0, 1]
    # }}}
    # {{{ shading TODO
    #if options.exist('shading'):
    # }}}
    # {{{ grid
    if options.exist('grid'):
        if 'on' in options.getfieldvalue('grid', 'on'):
            ax.grid()
    # }}}
    # {{{ colormap
    if options.exist('colornorm'):
        norm = options.getfieldvalue('colornorm')
    if options.exist('colormap'):
        cmap = options.getfieldvalue('colormap')
    cbar_extend = 0
    if options.exist('cmap_set_over'):
        cbar_extend += 1
    if options.exist('cmap_set_under'):
        cbar_extend += 2
    # }}}
    # {{{ contours
    if options.exist('contourlevels'):
        plot_contour(md, data, options, ax)
    # }}}
    # {{{ wrapping TODO
    # }}}
    # {{{ colorbar
    if options.getfieldvalue('colorbar', 1) == 1:
        if cbar_extend == 0:
            extend = 'neither'
        elif cbar_extend == 1:
            extend = 'max'
        elif cbar_extend == 2:
            extend = 'min'
        elif cbar_extend == 3:
            extend = 'both'
        cb = mpl.colorbar.ColorbarBase(ax.cax,
                                       cmap=cmap,
                                       norm=norm,
                                       extend=extend)
        if options.exist('alpha'):
            cb.set_alpha(options.getfieldvalue('alpha'))
        if options.exist('colorbarnumticks'):
            cb.locator = MaxNLocator(
                nbins=options.getfieldvalue('colorbarnumticks', 5))
        else:
            cb.locator = MaxNLocator(nbins=5)  # default 5 ticks
        if options.exist('colorbartickspacing'):
            locs = np.arange(lims[0], lims[1] + 1,
                             options.getfieldvalue('colorbartickspacing'))
            cb.set_ticks(locs)
        if options.exist('colorbarlines'):
            locs = np.arange(lims[0], lims[1] + 1,
                             options.getfieldvalue('colorbarlines'))
            cb.add_lines(locs, ['k' for i in range(len(locs))],
                         np.ones_like(locs))
        if options.exist('colorbarlineatvalue'):
            locs = options.getfieldvalue('colorbarlineatvalue')
            colors = options.getfieldvalue('colorbarlineatvaluecolor',
                                           ['k' for i in range(len(locs))])
            widths = options.getfieldvalue('colorbarlineatvaluewidth',
                                           np.ones_like(locs))
            cb.add_lines(locs, colors, widths)
        if options.exist('colorbartitle'):
            if options.exist('colorbartitlepad'):
                cb.set_label(
                    options.getfieldvalue('colorbartitle'),
                    labelpad=options.getfieldvalue('colorbartitlepad'),
                    fontsize=fontsize)
            else:
                cb.set_label(options.getfieldvalue('colorbartitle'),
                             fontsize=fontsize)
        cb.ax.tick_params(labelsize=fontsize)
        cb.solids.set_rasterized(True)
        cb.update_ticks()
        cb.set_alpha(1)
        cb.draw_all()
        if options.exist('colorbarfontsize'):
            colorbarfontsize = options.getfieldvalue('colorbarfontsize')
            cb.ax.tick_params(labelsize=colorbarfontsize)
            # cb.set_ticks([0,-10])
            # cb.set_ticklabels([-10,0,10])
        if options.exist('colorbarticks'):
            colorbarticks = options.getfieldvalue('colorbarticks')
            cb.set_ticks(colorbarticks)
        plt.sca(ax)  # return to original axes control
    # }}}
    # {{{ expdisp
    if options.exist('expdisp'):
        expdisp(ax, options)
    # }}}
    # {{{ area TODO
    # }}}
    # {{{ text
    if options.exist('text'):
        text = options.getfieldvalue('text')
        textx = options.getfieldvalue('textx')
        texty = options.getfieldvalue('texty')
        textcolor = options.getfieldvalue('textcolor')
        textweight = options.getfieldvalue('textweight')
        textrotation = options.getfieldvalue('textrotation')
        textfontsize = options.getfieldvalue('textfontsize')
        for label, x, y, size, color, weight, rotation in zip(
                text, textx, texty, textfontsize, textcolor, textweight,
                textrotation):
            ax.text(x,
                    y,
                    label,
                    transform=ax.transAxes,
                    fontsize=size,
                    color=color,
                    weight=weight,
                    rotation=rotation)
    # }}}
    # {{{ north arrow TODO
    # }}}
    # {{{ scale ruler TODO
    # }}}
    # {{{ streamlines TOFIX
    if options.exist('streamlines'):
        plot_streamlines(md, options, ax)
コード例 #42
0
def Sinb(x, t):
    
    tt = t*np.ones_like(x)
    xx = tt + x

    return np.sin(xx)
コード例 #43
0
ファイル: states.py プロジェクト: milainny/mannar
def newCircles(imgOrg, org, circles,accept):
    newCircles = []
    cir = []
    accept = set(accept)
    imgRes = copy.deepcopy(imgOrg)
    for i in range(len(circles)):
        x0 = circles[i][1][0]
        y0 = circles[i][1][1]
        radius = circles[i][1][2]
        name = (str(x0) + str(y0))
        acc = False
        if (name in accept):
            acc = True
        nimg = np.ones_like(imgOrg)*255
        nimg = np.where(imgOrg == 0, 0, nimg)
        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
        nimg = cv2.morphologyEx(nimg, cv2.MORPH_ERODE, kernel)

        lin = 1
        if(nimg[x0][y0] == 0):
            lin +=10
            while True:
                if(lin > radius):
                    lin -= 1
                else:
                    break
            while True:
                if(nimg[x0+lin][y0] == 255):
                    break
                else:
                    lin -= 1
        else:
            per = radius*50//100
            while True:
                lin +=1
                r = x0 + lin
                if(nimg[r][y0] == 0):
                    break
                if(r >= (x0+radius-per)):
                    lin = 0
                    break
            while True and lin >0:
                lin +=1
                r = x0 + lin
                if(nimg[r][y0] == 255):
                    break
                if(r >= (x0+radius-per)):
                    lin = 0
                    break

        point = (x0+lin,y0)
        label = 100

        nimg, counter = us.bfs4neig(nimg,point,label)
        imgcir = np.ones_like(imgOrg)*255

        img = np.ones_like(imgOrg)*0
        img = np.where(nimg==label,255,img)
        minX = np.min(np.where(nimg == label)[0])
        maxX = np.max(np.where(nimg == label)[0])
        minY = np.min(np.where(nimg == label)[1])
        maxY = np.max(np.where(nimg == label)[1])
        radX = (maxX - minX) // 2
        radY = (maxY - minY) // 2
        medX = radX + minX
        medY = radY + minY
        if (radX > radY):
            rad = radX
        else:
            rad = radY
        orgAr = radius**2 * 3.14
        newAr = rad**2 * 3.14

        imgRealSize = np.ones_like(imgOrg)*255

        imgRealSize = drawCircle(imgRealSize,medX,medY,rad)
        imgRealSize, counter = us.bfs4neig(imgRealSize,(medX,medY),label)
        sizeEll = us.sizeEllipse(newAr)

        sizeImg = imgOrg.shape[0]* imgOrg.shape[1]

        loop = False
        if (sizeImg < newAr):
            loop = True
        if (not loop):
            imgcir = np.where(nimg == label, 0, imgcir)
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(sizeEll,sizeEll))
            imgcir = cv2.morphologyEx(imgcir, cv2.MORPH_OPEN, kernel)

            imglabel = np.ones_like(imgOrg)*255
            imglabel = np.where(imgcir==0,org,imglabel)
            imglabel = np.where(imglabel >= 245, 255, imglabel)
            imglabel = cv2.threshold(imglabel,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)[1]
            text = ocr.labelToText(imglabel)

            sizeEdge,acc = discoverEdge(imgOrg,acc,minX,maxX,minY,maxY,medX,medY)

            rad = (sizeEdge//3) + rad
            imgcir = cv2.threshold(imgcir,0,255,cv2.THRESH_BINARY_INV)[1]
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(sizeEdge,sizeEdge))
            imgcir = cv2.morphologyEx(imgcir, cv2.MORPH_DILATE, kernel)
            imgRes = imgRes + imgcir

            imgcir = cv2.Canny(imgcir,100,200)
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(sizeEdge//3,sizeEdge//3))
            imgcir = cv2.morphologyEx(imgcir, cv2.MORPH_DILATE, kernel)
            label = 255
            minX = np.min(np.where(imgcir == label)[0])
            maxX = np.max(np.where(imgcir == label)[0])
            minY = np.min(np.where(imgcir == label)[1])
            maxY = np.max(np.where(imgcir == label)[1])
            point = (minX,minY)

            cir = set(cir)
            if(str(medX)+str(medY) not in cir):
                newCircles.append((circles[0],(medX,medY,rad),loop,acc,text,point,imgcir[minX:maxX,minY:maxY]))
            cir = list(cir)
            cir.append(str(medX)+str(medY))
            name = 'img'+str(x0)+str(y0)

        else:
            newCircles.append((circles[0],(x0,y0,radius),loop,acc,'',(0,0),[]))

    return newCircles,imgRes
コード例 #44
0
    reg2 = LinearRegression().fit(W2, Y)
    print("linear regression: {:.3f}".format((int_x[1] - int_x[0]) * reg1.coef_[0] * reg2.coef_[0][0]))

    ####################### instrumental variable ##################
    X, Y, Z = data[:, 0][:], data[:, 1], data[:, 2]
    array_YZ = np.vstack((Y, Z))
    cov_YZ = np.cov(array_YZ)[0][1]
    array_XZ = np.vstack((X, Z))
    cov_XZ = np.cov(array_XZ)[0][1]
    print("instrumental variable: {:.3f}".format((int_x[1] - int_x[0]) * cov_YZ / cov_XZ))

    ###################### propensity score ########################
    X, Y, Z = data[:, 0][:, np.newaxis], data[:, 1][:, np.newaxis], data[:, 2][:, np.newaxis]
    W1, W2 = data[:, 3][:, np.newaxis], data[:, 4][:, np.newaxis]
    #### step 1 ####
    fan = np.hstack((np.ones_like(X), Z, W1, W2))
    reg1 = LinearRegression(fit_intercept=False).fit(fan, X)
    Beta = reg1.coef_.transpose()
    variance = np.mean((X - np.dot(fan, Beta))**2)
    R = normal(X, X - np.dot(fan, Beta), variance)
    #### step 2 ####
    inpt = np.hstack((np.ones_like(X), X,  R, X*R))
    reg2 = LinearRegression(fit_intercept=False).fit(inpt, Y)
    Alpha = reg2.coef_.transpose()
    #### step3 ####
    t0 = int_x[0] * np.ones_like(X)
    r0 = normal(t0, np.dot(fan, Beta), variance)
    inpt0 = np.hstack((np.ones_like(X), t0, r0, t0*r0))
    y0 = np.mean(np.dot(inpt0, Alpha))

    t1 = int_x[1] * np.ones_like(X)
コード例 #45
0
ファイル: scf.py プロジェクト: niyaa/TurbuStat
    def fit_plaw(self, xlow=None, xhigh=None, verbose=False, bootstrap=False,
                 **bootstrap_kwargs):
        '''
        Fit a power-law to the SCF spectrum.

        Parameters
        ----------
        xlow : `~astropy.units.Quantity`, optional
            Lower lag value limit to consider in the fit.
        xhigh : `~astropy.units.Quantity`, optional
            Upper lag value limit to consider in the fit.
        verbose : bool, optional
            Show fit summary when enabled.
        '''

        pix_lags = self._to_pixel(self.lags)

        x = np.log10(pix_lags.value)
        y = np.log10(self.scf_spectrum)

        if xlow is not None:
            if not isinstance(xlow, u.Quantity):
                raise TypeError("xlow must be an astropy.units.Quantity.")

            # Convert xlow into the same units as the lags
            xlow = self._to_pixel(xlow)

            self._xlow = xlow

            lower_limit = x >= np.log10(xlow.value)
        else:
            lower_limit = \
                np.ones_like(self.scf_spectrum, dtype=bool)
            self._xlow = np.abs(self.lags).min()

        if xhigh is not None:
            if not isinstance(xhigh, u.Quantity):
                raise TypeError("xlow must be an astropy.units.Quantity.")
            # Convert xhigh into the same units as the lags
            xhigh = self._to_pixel(xhigh)

            self._xhigh = xhigh

            upper_limit = x <= np.log10(xhigh.value)
        else:
            upper_limit = \
                np.ones_like(self.scf_spectrum, dtype=bool)
            self._xhigh = np.abs(self.lags).max()

        within_limits = np.logical_and(lower_limit, upper_limit)

        if not within_limits.any():
            raise ValueError("Limits have removed all lag values. Make xlow"
                             " and xhigh less restrictive.")

        y = y[within_limits]
        x = x[within_limits]

        x = sm.add_constant(x)

        # If the std were computed, use them as weights
        # Converting to the log stds doesn't matter since the weights
        # remain proportional to 1/sigma^2, and an overal normalization is
        # applied in the fitting routine.
        weights = self.scf_spectrum_stddev[within_limits] ** -2

        model = sm.WLS(y, x, missing='drop', weights=weights)

        self.fit = model.fit(cov_type='HC3')

        self._slope = self.fit.params[1]

        if bootstrap:
            stderrs = residual_bootstrap(self.fit,
                                         **bootstrap_kwargs)
            self._slope_err = stderrs[1]

        else:
            self._slope_err = self.fit.bse[1]

        self._bootstrap_flag = bootstrap

        if verbose:
            print(self.fit.summary())

            if self._bootstrap_flag:
                print("Bootstrapping used to find stderrs! "
                      "Errors may not equal those shown above.")
コード例 #46
0
ファイル: run_pipeline.py プロジェクト: mehdirezaie/LSSutils
    def read(self):
        
        if self.comm.rank==0:
            
            self.logger.info('reading the input files')
            
            # read galaxy map
            self.galmap = hp.read_map(self.args['galmap'], verbose=False)
            self.ranmap = hp.read_map(self.args['ranmap'], verbose=False)
            self.mask = hp.read_map(self.args['mask'], verbose=False).astype('bool')
            
            # read weight map
            if os.path.isfile(self.args['wmap']):
                self.wmap = hp.read_map(self.args['wmap'], verbose=False)
            else:
                self.logger.info('{} does not exit'.format(self.args['wmap']))
                self.wmap = np.ones_like(self.galmap)
                        
            # read the dataframe            
            if self.args['photattrs'].endswith('.fits'):
                raise RuntimeWarning('fix column slicing')                
                self.df = ft.read(self.args['photattrs'], lower=True)
                self.df = self.df[:, self.args['axfit']]
                self.args['columns'] = self.df.dtype.names
                
            elif self.args['photattrs'].endswith('.h5'):
                self.df = pd.read_hdf(self.args['photattrs'], key='templates', lower=True)
                self.df = self.df.iloc[:, self.args['axfit']] 
                self.args['columns'] = self.df.columns.values
                
            else:
                raise RuntimeError('{} unknown ext'.format(self.args['photattrs']))
            
            #self.logger.info('attributes : {}'.format(self.args['columns']))   
                        
            # check pixels for infinite galaxy, random, weight, or 
            # imaging attrs
            self.logger.info(f'# pixels : {self.mask.sum()}')
            if (~np.isfinite(self.galmap[self.mask])).sum()!=0:
                self.mask &= np.isfinite(self.galmap)
                self.logger.info(f'# pixels (inf galmap) : {self.mask.sum()}')
                
            if (~np.isfinite(self.ranmap[self.mask])).sum()!=0:
                self.mask &= np.isfinite(self.ranmap)
                self.logger.info(f'# pixels (inf ranmap) : {self.mask.sum()}')

            if (~np.isfinite(self.wmap[self.mask])).sum()!=0:
                self.mask &= np.isfinite(self.wmap)
                self.logger.info(f'# pixels (inf wmap) : {self.mask.sum()}')                
                
            for column in self.args['columns']:
                if (~np.isfinite(self.df[column][self.mask])).sum() !=0:
                    self.mask &= np.isfinite(self.df[column])
                    self.logger.info(f'# pixels (inf {column}) : {self.mask.sum()}')
                    
            self.args['npixels'] = self.mask.sum()
            
            # check galaxy and random on mask   
            self.logger.info(f'galmap : {np.percentile(self.galmap[self.mask], [0, 1, 99, 100])}')
            self.logger.info(f'ranmap : {np.percentile(self.ranmap[self.mask], [0, 1, 99, 100])}') 
            self.logger.info(f'wmap : {np.percentile(self.wmap[self.mask], [0, 1, 99, 100])}')
            for column in self.args['columns']:
                self.logger.info(f'{column} : {np.percentile(self.df[column][self.mask], [0, 1, 99, 100])}')      
            self.df = self.df.values 
        else:
            self.mask = None
            self.galmap = None
            self.ranmap = None
            self.df = None
            self.wmap = None
            self.args = None
            
        # bcast
        self.args = self.comm.bcast(self.args, root=0)
        self.mask = self.comm.bcast(self.mask, root=0)
        self.galmap = self.comm.bcast(self.galmap, root=0)
        self.ranmap = self.comm.bcast(self.ranmap, root=0)
        self.df = self.comm.bcast(self.df, root=0)
        self.wmap = self.comm.bcast(self.wmap, root=0)
def plot_hist_distribution(distribution_data,
                           filename=None,
                           values_to_scatter=None,
                           n_bins=None,
                           use_log=False,
                           x_range=None,
                           labels=None,
                           scatter_shapes=None,
                           colors=None,
                           tight_x_range=False,
                           twice_more_bins=False,
                           scale_them_all=False,
                           background_color="black",
                           hist_facecolor="white",
                           hist_edgeccolor="white",
                           axis_labels_color="white",
                           axis_color="white",
                           axis_label_font_size=20,
                           ticks_labels_color="white",
                           ticks_label_size=14,
                           xlabel=None,
                           ylabel=None,
                           fontweight=None,
                           fontfamily=None,
                           size_fig=None,
                           dpi=100,
                           path_results=None,
                           save_formats="pdf",
                           ax_to_use=None,
                           color_to_use=None,
                           legend_str=None,
                           density=False,
                           save_figure=False,
                           with_timestamp_in_file_name=True,
                           max_value=None):
    """
    Plot a distribution in the form of an histogram, with option for adding some scatter values
    :param distribution_data:
    :param description:
    :param param:
    :param values_to_scatter:
    :param labels:
    :param scatter_shapes:
    :param colors:
    :param tight_x_range:
    :param twice_more_bins:
    :param xlabel:
    :param ylabel:
    :param save_formats:
    :return:
    """
    distribution = np.array(distribution_data)

    if x_range is not None:
        min_range = x_range[0]
        max_range = x_range[1]
    elif tight_x_range:
        max_range = np.max(distribution)
        min_range = np.min(distribution)
    else:
        max_range = 100
        min_range = 0
    weights = (np.ones_like(distribution) / (len(distribution))) * 100
    # weights=None

    if ax_to_use is None:
        fig, ax1 = plt.subplots(nrows=1,
                                ncols=1,
                                gridspec_kw={'height_ratios': [1]},
                                figsize=size_fig,
                                dpi=dpi)
        ax1.set_facecolor(background_color)
        fig.patch.set_facecolor(background_color)
    else:
        ax1 = ax_to_use
    if n_bins is not None:
        bins = n_bins
    else:
        bins = int(np.sqrt(len(distribution)))
        if twice_more_bins:
            bins *= 2

    hist_color = hist_facecolor
    if bins > 100:
        edge_color = hist_color
    else:
        edge_color = hist_edgeccolor
    ax1.spines['bottom'].set_color(axis_color)
    ax1.spines['left'].set_color(axis_color)

    hist_plt, edges_plt, patches_plt = ax1.hist(distribution,
                                                bins=bins,
                                                range=(min_range, max_range),
                                                facecolor=hist_color,
                                                log=use_log,
                                                edgecolor=edge_color,
                                                label=legend_str,
                                                weights=weights,
                                                density=density)
    if values_to_scatter is not None:
        scatter_bins = np.ones(len(values_to_scatter), dtype="int16")
        scatter_bins *= -1

        for i, edge in enumerate(edges_plt):
            # print(f"i {i}, edge {edge}")
            if i >= len(hist_plt):
                # means that scatter left are on the edge of the last bin
                scatter_bins[scatter_bins == -1] = i - 1
                break

            if len(values_to_scatter[values_to_scatter <= edge]) > 0:
                if (i + 1) < len(edges_plt):
                    bool_list = values_to_scatter < edge  # edges_plt[i + 1]
                    for i_bool, bool_value in enumerate(bool_list):
                        if bool_value:
                            if scatter_bins[i_bool] == -1:
                                new_i = max(0, i - 1)
                                scatter_bins[i_bool] = new_i
                else:
                    bool_list = values_to_scatter < edge
                    for i_bool, bool_value in enumerate(bool_list):
                        if bool_value:
                            if scatter_bins[i_bool] == -1:
                                scatter_bins[i_bool] = i

        decay = np.linspace(1.1, 1.15, len(values_to_scatter))
        for i, value_to_scatter in enumerate(values_to_scatter):
            if i < len(labels):
                ax1.scatter(x=value_to_scatter,
                            y=hist_plt[scatter_bins[i]] * decay[i],
                            marker=scatter_shapes[i],
                            color=colors[i],
                            s=60,
                            zorder=20,
                            label=labels[i])
            else:
                ax1.scatter(x=value_to_scatter,
                            y=hist_plt[scatter_bins[i]] * decay[i],
                            marker=scatter_shapes[i],
                            color=colors[i],
                            s=60,
                            zorder=20)
    ax1.legend()

    if tight_x_range:
        ax1.set_xlim(min_range, max_range)
    else:
        ax1.set_xlim(0, 100)
        xticks = np.arange(0, 110, 10)

        ax1.set_xticks(xticks)
        # sce clusters labels
        ax1.set_xticklabels(xticks)
    ax1.yaxis.set_tick_params(labelsize=ticks_label_size)
    ax1.xaxis.set_tick_params(labelsize=ticks_label_size)
    ax1.tick_params(axis='y', colors=axis_labels_color)
    ax1.tick_params(axis='x', colors=axis_labels_color)
    # TO remove the ticks but not the labels
    # ax1.xaxis.set_ticks_position('none')

    if ylabel is None:
        ax1.set_ylabel("Distribution (%)",
                       fontsize=axis_label_font_size,
                       labelpad=20,
                       fontweight=fontweight,
                       fontfamily=fontfamily)
    else:
        ax1.set_ylabel(ylabel,
                       fontsize=axis_label_font_size,
                       labelpad=20,
                       fontweight=fontweight,
                       fontfamily=fontfamily)
    ax1.set_xlabel(xlabel,
                   fontsize=axis_label_font_size,
                   labelpad=20,
                   fontweight=fontweight,
                   fontfamily=fontfamily)

    ax1.xaxis.label.set_color(axis_labels_color)
    ax1.yaxis.label.set_color(axis_labels_color)

    if ax_to_use is None:
        # padding between ticks label and  label axis
        # ax1.tick_params(axis='both', which='major', pad=15)
        fig.tight_layout()
        if save_figure and (path_results is not None):
            # transforming a string in a list
            if isinstance(save_formats, str):
                save_formats = [save_formats]
            time_str = ""
            if with_timestamp_in_file_name:
                time_str = datetime.now().strftime("%Y_%m_%d.%H-%M-%S")
            for save_format in save_formats:
                if not with_timestamp_in_file_name:
                    fig.savefig(os.path.join(f'{path_results}',
                                             f'{filename}.{save_format}'),
                                format=f"{save_format}",
                                facecolor=fig.get_facecolor())
                else:
                    fig.savefig(os.path.join(
                        f'{path_results}',
                        f'{filename}{time_str}.{save_format}'),
                                format=f"{save_format}",
                                facecolor=fig.get_facecolor())
        plt.close()
コード例 #48
0
ファイル: data.py プロジェクト: YXJiang1996/ColorMatching
def generate(total_dataset_size, model='km', ydim=31, info=info, prior_bound=[0, 1, 0, 1], seed=0):
    np.random.seed(seed)
    N = total_dataset_size

    # 获取涂料信息
    background = info[:ydim]  # 背景
    colors = np.arange(0, (info.shape[-1] - ydim) // (ydim + 1), 1)  # 从0到涂料种类数
    initial_concentration = np.zeros(colors.size * 1)  # 初始化浓度为0
    ingredients = np.zeros(colors.size * ydim).reshape(colors.size, ydim)  # 初始化分光反射率为0
    # 涂料信息的初始化
    for i, c in enumerate(colors):
        initial_concentration[i] = info[ydim + i * (ydim + 1)]
        ingredients[i] = info[ydim + i * (ydim + 1) + 1:ydim + (i + 1) * (ydim + 1)]
    # 初始化浓度信息,从0-1的均匀分布中随机采样,维度为:生成样本数*涂料种数
    concentrations = np.random.uniform(0, 1, size=(N, colors.size))
    # 将浓度信息约束到指定的范围中
    for i in colors:
        concentrations[:, i] = prior_bound[0] + (prior_bound[1] - prior_bound[0]) * concentrations[:, i]

    # 在21中颜色中选18种的排列组合
    r = list(combinations(np.arange(0, colors.size, 1), 18))
    r_num = r.__len__()
    n = N // r_num
    # 根据排列组合将对应的位置为0
    for i in range(r_num - 1):
        concentrations[i * n:(i + 1) * n, r[i]] = 0.
    concentrations[(r_num - 1) * n:, r[r_num - 1]] = 0.
    # 波长的序列和索引序列
    xvec = np.arange(400, 710, 10)
    xidx = np.arange(0, ydim, 1)
    # 原本为21*1,重复为ydim=31次,再转变为21*31
    initial_conc_array = np.repeat(initial_concentration.reshape(colors.size, 1), ydim).reshape(colors.size, ydim)

    #使用km模型的情况
    if model == 'km':
        # 基底的K/S值,论文公式4-6a
        fsb = (np.ones_like(background) - background) ** 2 / (background * 2)
        # 各种色浆的单位K/S值,论文公式4-6b
        fst=((np.ones_like(ingredients)-ingredients)**2/(ingredients*2)-fsb)/initial_conc_array
        # ydim*N的0
        fss=np.zeros(N*ydim).reshape(ydim,N)
        # 涂料的K/S值,论文公式4-6c
        for i in xidx:
            for j in colors:
                fss[i, :] += concentrations[:, j] * fst[j, i]
            fss[i, :] += np.ones(N) * fsb[i]
        # 涂料的分光反射率,论文公式4-6d
        reflectance = fss - ((fss + 1) ** 2 - 1) ** 0.5 + 1
        # 转置
        reflectance = reflectance.transpose()
    else:
        print('Sorry no model of that name')
        exit(1)

    # 对数据进行打乱
    shuffling = np.random.permutation(N)
    concentrations = torch.tensor(concentrations[shuffling], dtype=torch.float)
    reflectance = torch.tensor(reflectance[shuffling], dtype=torch.float)
    # concentrations:各个样本对应的浓度
    # reflectance:配方对应的分光反射率
    # xvec:400-710,波长的取值
    # info:基础信息
    return concentrations, reflectance, xvec, info
コード例 #49
0
ファイル: gradient_check.py プロジェクト: omega1996/dlcourse
 def helper_func(x):
     output = layer.forward(x)
     loss = np.sum(output * output_weight)
     d_out = np.ones_like(output) * output_weight
     grad = layer.backward(d_out)
     return loss, grad
def ensemble_prediction(models_list, idate, datearray, adjClose, num_stocks, sort_mode='sharpe'):
    #--------------------------------------------------------------
    # loop through best models and pick companies from ensemble prediction
    #--------------------------------------------------------------

    ensemble_symbols = []
    ensemble_Ytrain = []
    ensemble_sharpe = []
    ensemble_recent_sharpe = []
    ensemble_equal = []
    ensemble_rank = []
    for iii,imodel in enumerate(models_list):

        # --------------------------------------------------
        # build DL model
        # --------------------------------------------------

        config_filename = os.path.join(models_folder, imodel).replace('.hdf','.txt')
        print(".", end='')
        model = build_model(config_filename, verbose=False)

        # collect meta data for weighting ensemble_symbols
        params = get_params(config_filename)

        num_periods_history = params['num_periods_history']
        increments = params['increments']

        symbols_predict = symbols
        Xpredict, Ypredict,\
                  dates_predict,\
                  companies_predict = generateExamples3layerForDate(idate,\
                                             datearray,\
                                             adjClose, \
                                             num_periods_history,\
                                             increments, \
                                             output_incr='monthly')

        dates_predict = np.array(dates_predict)
        companies_predict = np.array(companies_predict)

        # --------------------------------------------------
        # make predictions monthly for backtesting
        # - there might be some bias since entire preiod
        #   has data used for training
        # --------------------------------------------------

        weights_filename = os.path.join(models_folder, imodel)
        try:
            model.load_weights(weights_filename)
        except:
            pass

        # show predictions for (single) last date
        _Xtrain = Xpredict[dates_predict == idate]
        _Ytrain = Ypredict[dates_predict == idate][:,0]
        _dates = np.array(dates_predict[dates_predict == idate])
        _companies = np.array(companies_predict[dates_predict == idate])
        _forecast = model.predict(_Xtrain)[:, 0]
        _symbols = np.array(symbols_predict)[_companies]

        del model
        K.clear_session()

        forecast_indices = _forecast.argsort()[-num_stocks:]
        sorted_Xtrain = _Xtrain[forecast_indices,:,:,:]
        sorted_Ytrain = _Ytrain[forecast_indices]
        sorted_companies = _companies[forecast_indices]
        sorted_forecast = _forecast[forecast_indices]
        sorted_symbols = _symbols[forecast_indices]
        ensemble_sharpe_weights = np.ones(sorted_companies.shape, 'float')
        ensemble_recent_sharpe_weights = np.ones_like(ensemble_sharpe_weights)
        for icompany in range(sorted_companies.shape[0]):
            if sort_mode == 'sharpe':
                ensemble_sharpe_weights[icompany] = allstats((sorted_Xtrain[icompany,:,-1,0]+1.).cumprod()).sharpe(periods_per_year=252./increments[-1])
                ensemble_recent_sharpe_weights[icompany] = allstats((sorted_Xtrain[icompany,:,int(sorted_Xtrain.shape[2]/2),0]+1.).cumprod()).sharpe(periods_per_year=252./increments[0])
            elif sort_mode == 'sharpe_plus_sortino':
                ensemble_sharpe_weights[icompany] = allstats((sorted_Xtrain[icompany,:,-1,0]+1.).cumprod()).sharpe(periods_per_year=252./increments[-1]) + \
                                                    allstats((sorted_Xtrain[icompany,:,-1,0]+1.).cumprod()).sortino()
                ensemble_recent_sharpe_weights[icompany] = allstats((sorted_Xtrain[icompany,:,int(sorted_Xtrain.shape[2]/2),0]+1.).cumprod()).sharpe(periods_per_year=252./increments[0]) + \
                                                           allstats((sorted_Xtrain[icompany,:,int(sorted_Xtrain.shape[2]/2),0]+1.).cumprod()).sortino()
            elif sort_mode == 'sortino':
                ensemble_sharpe_weights[icompany] = allstats((sorted_Xtrain[icompany,:,-1,0]+1.).cumprod()).sortino()
                ensemble_recent_sharpe_weights[icompany] = allstats((sorted_Xtrain[icompany,:,int(sorted_Xtrain.shape[2]/2),0]+1.).cumprod()).sortino()
        ensemble_equal_weights = np.ones_like(ensemble_sharpe_weights)
        ensemble_rank_weights = np.arange(np.array(sorted_symbols[-num_stocks:]).shape[0])[::-1]

        ensemble_symbols.append(sorted_symbols[-num_stocks:])
        ensemble_Ytrain.append(sorted_Ytrain[-num_stocks:])
        ensemble_sharpe.append(ensemble_sharpe_weights)
        ensemble_recent_sharpe.append(ensemble_recent_sharpe_weights)
        ensemble_equal.append(ensemble_recent_sharpe_weights)
        ensemble_rank.append(ensemble_rank_weights)

    # sift through ensemble symbols
    ensemble_symbols = np.array(ensemble_symbols).flatten()
    ensemble_Ytrain = np.array(ensemble_Ytrain).flatten()
    ensemble_sharpe = np.array(ensemble_sharpe).flatten()
    ensemble_recent_sharpe = np.array(ensemble_recent_sharpe).flatten()
    ensemble_equal = np.array(ensemble_equal).flatten()
    ensemble_rank = np.array(ensemble_rank).flatten()

    unique_symbols = list(set(list(np.array(ensemble_symbols).flatten())))
    unique_ensemble_symbols = []
    unique_ensemble_Ytrain = []
    unique_ensemble_sharpe = []
    unique_ensemble_recent_sharpe = []
    unique_ensemble_equal = []
    unique_ensemble_rank = []
    for k, ksymbol in enumerate(unique_symbols):
        unique_ensemble_symbols.append(np.array(ensemble_symbols)[ensemble_symbols == ksymbol][0])
        unique_ensemble_Ytrain.append(ensemble_Ytrain[ensemble_symbols == ksymbol].mean())
        unique_ensemble_sharpe.append(ensemble_sharpe[ensemble_symbols == ksymbol].sum())
        unique_ensemble_recent_sharpe.append(ensemble_recent_sharpe[ensemble_symbols == ksymbol].sum())
        unique_ensemble_equal.append(ensemble_equal[ensemble_symbols == ksymbol].sum())
        unique_ensemble_rank.append(ensemble_rank[ensemble_symbols == ksymbol].sum())

    indices_recent = np.argsort(unique_ensemble_recent_sharpe)[-num_stocks:]
    sorted_recent_sharpe = np.array(unique_ensemble_recent_sharpe)[indices_recent]
    sorted_recent_sharpe = np.array(sorted_recent_sharpe)

    unique_ensemble_sharpe = np.array(unique_ensemble_sharpe) + np.array(unique_ensemble_recent_sharpe)

    indices = np.argsort(unique_ensemble_sharpe)[-num_stocks:]
    sorted_sharpe = np.array(unique_ensemble_sharpe)[indices]
    sorted_sharpe = np.array(sorted_sharpe)
    sorted_symbols = np.array(unique_ensemble_symbols)[indices]
    sorted_Ytrain = np.array(unique_ensemble_Ytrain)[indices]

    try:
        _Ytrain = _Ytrain[dates_predict == idate]
        sorted_Ytrain = sorted_Ytrain[-num_stocks:]
        BH_gain = _Ytrain.mean()
    except:
        BH_gain = 0.0

    avg_gain = sorted_Ytrain.mean()

    return avg_gain, BH_gain, sorted_symbols
コード例 #51
0
vpvs_vm = vp_vm/vs_vm


de_pl = np.array([0,   -0.1,   0,    -0.05])
ep_pl = np.array([0,    0,    -0.1,  -0.05])
#ga_pl = np.array([-0.1,  0,     0,   -0.15])

#ga_vti_pl = -ga_pl/(1 + 2*ga_pl)
ga_vti_pl = np.array([0.1,  0,     0,   0.15])
ga_pl = -ga_vti_pl/(1 + 2*ga_vti_pl)
az0_pl = np.array([0,0,0,0])-90

Nmodels = len(de_pl)

depth = np.arange(dh, (H_layer + H_between)*Nmodels + H_between, dh)
vp = np.ones_like(depth).astype(float) * vp_vm
vs = np.ones_like(depth).astype(float) * vs_vm
dn = np.ones_like(depth).astype(float) * dn_vm
ep = np.zeros_like(depth).astype(float)
de = np.zeros_like(depth).astype(float)
ga = np.zeros_like(depth).astype(float)
az0 = np.zeros_like(depth).astype(float)


for i in xrange(Nmodels):
    i_start = np.floor((i*(H_between + H_layer) + H_between)/dh).astype(int)
    i_end = np.floor((i+1)*(H_between + H_layer)/dh).astype(int)
    
    vp[i_start:i_end] = vp_pl
    vs[i_start:i_end] = vs_pl
    dn[i_start:i_end] = dn_pl
コード例 #52
0
ファイル: plotPaper.py プロジェクト: cronburg/150-ppl
fig.set_figheight(15.0)
fig.set_figwidth(9.0)

c1 = tuple(np.array([255,165,0,125])/255.)
c2 = tuple(np.array([0,128,0,200])/255.)

for (t,i,ax1) in zip(idx,range(len(idx)),ax_array):
  
  #add_subplot(3, 1, i + 1) # draw the (i+1)th bar plot
  ax2 = ax1.twinx()
  ax2.yaxis.set_major_formatter(plt.FuncFormatter(to_percentage))
  #ax2.set_yticks([0,.02,.04,.06,.08])
  #ax2.set_ylim([0,.08])

  hist_data = param0[where(turn == t)[0]]
  weights = np.ones_like(hist_data) #/ len(hist_data)
  
  print (275.0/len(hist_data)),ymax2,len(hist_data),len(data)
  ax2.set_yticks(np.linspace(0.0, ymax2, 6)) #[0,.02,.04,.06,.08])
  ax2.set_ylim([0, ymax2])

#  ax2.hist(hist_data, bins=30, color=c1, weights=weights/len(hist_data))

  ax1.hist(hist_data, bins=30, color=c2)
  ax1.set_xlim(0.0, 1.0)
  ax1.set_ylim(0, 275)
  #ax1.set_yticks([
  #ax2.set_title("P($p_0$ | $t_e$ = %d)"%(t,))
  ax1.text(.5, .88, "P($p_0$ | $t_e$ = %d)"%(t,),
           horizontalalignment='center',
           transform=ax1.transAxes,
def plot3D_discrete(im):

	font = {'family' : 'normal',
		'weight' : 'normal',
		'size'   : 14}
	matplotlib.rc('font', **font)
	l = 20
	psf = im[64-l:64+l,64-l:64+l]

	fig = plt.figure(figsize=(10, 10))
	ax1 = fig.add_subplot(111, projection='3d')
	_x = np.arange(psf.shape[0])
	_y = np.arange(psf.shape[1])
	x, y = np.meshgrid(_x, _y)

	xpos = x.flatten()   # Convert positions to 1D array
	ypos = y.flatten()
	zpos = np.zeros(psf.shape[0]*psf.shape[1])

	dx = 1* np.ones_like(zpos)
	dy = dx.copy()
	dz = psf.flatten()

	# generate colors
	import matplotlib.colors as colors
	import matplotlib.cm as cmx

	cmap = plt.cm.jet # Get desired colormap - you can change this!
	max_height = np.max(dz)   # get range of colorbars so we can normalize
	min_height = np.min(dz)
	# scale each z to [0,1], and get their rgb values
	rgba = [cmap((k-min_height)/max_height) for k in dz]
	c_id = dz.argsort()

	# Get the camera's location in Cartesian coordinates.
	ax1.view_init(30, -115)
	x1, y1, z1 = sph2cart(*sphview(ax1))
	camera = np.array((x1,y1,0))
	# Calculate the distance of each bar from the camera.
	z_order = getDistances(camera, xpos, ypos, dz)
	max = np.max(z_order)

	n = psf.shape[0]*psf.shape[1]
	for i in range(n):
		pl = ax1.bar3d(xpos[i], ypos[i], zpos[i], dx[i], dy[i], dz[i],
		         color=rgba[i], alpha=1, zsort='max')
		# The z-order must be set explicitly.
		#
		# z-order values are somewhat backwards in magnitude, in that the largest
		# value is closest to the camera - unlike, in say, a coordinate system.
		# Therefore, subtracting the maximum distance from the calculated distance
		# inverts the z-order to the proper form.
		pl._sort_zpos = max - z_order[i]

	#ax1.bar3d(xpos,ypos,zpos, dx, dy, dz, color='0.85', zsort='max')
	plt.rcParams['grid.color'] = "lightgray"
	plt.rcParams['grid.linestyle'] = '--'
	ax1.set_xlabel("x")
	ax1.set_ylabel("y")
	ax1.w_xaxis.set_pane_color((1, 1, 1, 1.0))
	ax1.w_yaxis.set_pane_color((1, 1, 1, 1.0))
	ax1.w_zaxis.set_pane_color((1, 1, 1, 1.0))
	#ax1.set_xticks([])
	#ax1.set_yticks([])
	#ax1.set_zticks([])
	ax1.set_xticklabels([])
	ax1.set_yticklabels([])
	ax1.set_zticklabels([])
	#ax1.w_zaxis.line.set_lw(0.)
	plt.autoscale(enable=True, axis='both', tight=True)
	plt.show()
コード例 #54
0
def record(name):
    fig0 = plt.figure(figsize=(20, 10))
    fig0.tight_layout()
    fig0ax0 = fig0.add_subplot(3, 2, 1)
    fig0ax1 = fig0.add_subplot(3, 2, 2)
    fig0ax2 = fig0.add_subplot(3, 2, 3)
    fig0ax3 = fig0.add_subplot(3, 2, 4)
    fig0ax4 = fig0.add_subplot(3, 2, 5)
    fig0ax5 = fig0.add_subplot(3, 2, 6)

    fig1 = plt.figure(figsize=(20, 10))
    fig1.tight_layout()
    fig1ax0 = fig1.add_subplot(3, 2, 1)
    fig1ax1 = fig1.add_subplot(3, 2, 2)
    fig1ax2 = fig1.add_subplot(3, 2, 3)
    fig1ax3 = fig1.add_subplot(3, 2, 4)
    fig1ax4 = fig1.add_subplot(3, 2, 5)
    fig1ax5 = fig1.add_subplot(3, 2, 6)

    weight = params.mass * params.g * np.ones_like(t_s)
    fig0ax0 = utils.add_plots(fig0ax0, t_s, [F_t, weight], ["-", "--"],
                              ["r", "k"], ["F", "m*g"],
                              "Rotor Thrust -F- over time", 't {s}', 'F {N}')
    fig0ax0.legend(loc='lower right', shadow=True, fontsize='small')

    # Torques
    u2 = map(lambda a: a[0], M_t)  # extract ux for all points in time
    u3 = map(lambda a: a[1], M_t)
    u4 = map(lambda a: a[2], M_t)

    fig0ax1 = utils.add_plots(fig0ax1, t_s, [u2, u3, u4], ["-", "-", "-"],
                              ["r", "g", "b"], ["u2", "u3", "u4"],
                              "Components of torque vector M over time",
                              "t {s}", "{N*m}")
    fig0ax1.legend(loc='lower right', shadow=True, fontsize='small')

    # X position
    q_x = map(lambda a: a[0][0], q_s)  # get quad x position
    d_x = map(lambda a: a.pos[0], d_s)  # get desired x position
    x_e = map(lambda a, b: 10 * (a - b), d_x, q_x)  # compute error

    fig0ax2 = utils.add_plots(fig0ax2, t_s, [q_x, d_x, x_e], ["-", "--", "-"],
                              ["g", "r", "b"],
                              ["quad -x", "des x", "x error (x10)"],
                              "X - axis position of quadrotor", "t {s}",
                              "x {m}")
    fig0ax2.legend(loc='lower right', shadow=True, fontsize='small')

    # Y position
    q_y = map(lambda a: a[0][1], q_s)
    d_y = map(lambda a: a.pos[1], d_s)
    y_e = map(lambda a, b: 10 * (a - b), d_y, q_y)

    fig0ax3 = utils.add_plots(fig0ax3, t_s, [q_y, d_y, y_e], ["-", "--", "-"],
                              ["g", "r", "b"],
                              ["quad -y", "des y", "y error (x10)"],
                              "Y - axis position of quadrotor", "t {s}",
                              "y {m}")
    fig0ax3.legend(loc='lower right', shadow=True, fontsize='small')

    # Z position
    q_z = map(lambda a: a[0][2], q_s)
    d_z = map(lambda a: a.pos[2], d_s)
    z_e = map(lambda a, b: 10 * (a - b), d_z, q_z)

    fig0ax4 = utils.add_plots(fig0ax4, t_s, [q_z, d_z, z_e], ["-", "--", "-"],
                              ["g", "r", "b"],
                              ["quad z", "des z", "z error (x10)"],
                              "Z - axis position of quadrotor", "t {s}",
                              "z {m}")
    fig0ax4.legend(loc='lower right', shadow=True, fontsize='small')

    # Euler angles
    q_phi = map(lambda a: a[2][0] * 180.0 / np.pi, q_s)
    q_theta = map(lambda a: a[2][1] * 180.0 / np.pi, q_s)
    q_psi = map(lambda a: a[2][2] * 180.0 / np.pi, q_s)

    fig0ax5 = utils.add_plots(fig0ax5, t_s, [q_phi, q_theta, q_psi],
                              ["-", "-", "-"], ["r", "g", "b"],
                              ["phi", "theta", "psi"],
                              "Angular position of quadrotor", 't {s}',
                              'phi, theta, psi {degree}')
    fig0ax5.legend(loc='lower right', shadow=True, fontsize='small')

    #  X Linear velocity
    q_vx = map(lambda a: a[1][0], q_s)
    d_vx = map(lambda a: a.vel[0], d_s)
    vx_e = map(lambda a, b: 10 * (a - b), d_vx, q_vx)

    fig1ax0 = utils.add_plots(fig1ax0, t_s, [q_vx, d_vx, vx_e],
                              ["-", "--", "-"], ["g", "r", "b"],
                              ["quad Vx", "des Vx", "Vx error (x10)"],
                              "X axis linear Velocities of quadrotor", 't {s}',
                              'Vx {m/s}')
    fig1ax0.legend(loc='lower right', shadow=True, fontsize='small')

    #  Y Linear velocity
    q_vy = map(lambda a: a[1][1], q_s)
    d_vy = map(lambda a: a.vel[1], d_s)
    vy_e = map(lambda a, b: 10 * (a - b), d_vy, q_vy)

    fig1ax1 = utils.add_plots(fig1ax1, t_s, [q_vy, d_vy, vy_e],
                              ["-", "--", "-"], ["g", "r", "b"],
                              ["quad Vy", "des Vy", "Vy error (x10)"],
                              "Y axis linear Velocities of quadrotor", 't {s}',
                              'Vy {m/s}')
    fig1ax1.legend(loc='lower right', shadow=True, fontsize='small')

    #  Z Linear velocity
    q_vz = map(lambda a: a[1][2], q_s)
    d_vz = map(lambda a: a.vel[2], d_s)
    vz_e = map(lambda a, b: 10 * (a - b), d_vz, q_vz)

    fig1ax2 = utils.add_plots(fig1ax2, t_s, [q_vz, d_vz, vz_e],
                              ["-", "--", "-"], ["g", "r", "b"],
                              ["quad Vz", "des Vz", "Vz error (x10)"],
                              "Z axis linear Velocities of quadrotor", 't {s}',
                              'Vz {m/s}')
    fig1ax2.legend(loc='lower right', shadow=True, fontsize='small')

    # Angular velocities
    q_wx = map(lambda a: a[3][0] * 180.0 / np.pi, q_s)
    q_wy = map(lambda a: a[3][1] * 180.0 / np.pi, q_s)
    q_wz = map(lambda a: a[3][2] * 180.0 / np.pi, q_s)

    fig1ax3 = utils.add_plots(fig1ax3, t_s, [q_wx, q_wy, q_wz],
                              ["-", "-", "-"], ["r", "g", "b"],
                              ["wx", "wy", "wz"],
                              "Angular velocities of quadrotor", 't {s}',
                              'wx, wy, wz {degree/s}')
    fig1ax3.legend(loc='lower right', shadow=True, fontsize='small')

    # rotor speeds
    w_0 = map(
        lambda a: np.sqrt(a[0][0]) if a[0][0] > 0 else -np.sqrt(-a[0][0]), w_i)
    w_1 = map(
        lambda a: np.sqrt(a[1][0]) if a[1][0] > 0 else -np.sqrt(-a[1][0]), w_i)
    w_2 = map(
        lambda a: np.sqrt(a[2][0]) if a[2][0] > 0 else -np.sqrt(-a[2][0]), w_i)
    w_3 = map(
        lambda a: np.sqrt(a[3][0]) if a[3][0] > 0 else -np.sqrt(-a[3][0]), w_i)

    fig1ax4 = utils.add_plots(fig1ax4, t_s, [w_0, w_1, w_2, w_3],
                              ["-", "-", "-", "-"], ["r", "g", "b", "c"],
                              ["w0", "w1", "w2", "w3"], "Rotor Speeds",
                              't {s}', '{rpm}')
    fig1ax4.legend(loc='lower right', shadow=True, fontsize='small')
    # save
    fig0.savefig("t_" + name, dpi=300)  #translation variables
    fig1.savefig("r_" + name, dpi=300)  #rotation variables
    print("Saved t_{} and r_{}.".format(name, name))
コード例 #55
0
 plt.plot(np.dot(A, omp_solver.coef_))
 plt.plot(y_thinned)
 plt.show()
 
 #%% estimate spin density by L1 minimization 
 # do L1 optimization
 #vx = cvx.Variable(n)
 #objective = cvx.Minimize(cvx.norm(vx, 1))
 #constraints = [A*vx == y2]
 #prob = cvx.Problem(objective, constraints)
 #spins_l1 = prob.solve(verbose=True)
 
 
 #%% estimate spin density by Bayesian analysis - does not work so far
 
 p_r  = np.ones_like(rs)/n #estimated spin density in real space
 
 p_fx = .5* (1 + y/y[0])  #evidence: p(Sz=0)@fx
 
 p_fx_bar_r = spfft.idct(np.identity(n))
 p_fx_bar_r = .5*(1 + p_fx_bar_r/p_fx_bar_r.max())
 
 #plt.figure()
 #plt.plot(fx, p_fx)
 #plt.show()
 
 plt.figure() 
 plt.imshow(p_fx_bar_r)
 plt.colorbar()
 plt.plot()
 
コード例 #56
0
 def _pmf(self, k, low, high):
     p = np.ones_like(k) / (high - low)
     return np.where((k >= low) & (k < high), p, 0.)
コード例 #57
0
 def get_handvis2d(self, idx):
     handvis = np.ones_like(self.get_handverts2d(idx))
     return handvis
コード例 #58
0
def best_in_sample_point(
    Xs: Union[List[torch.Tensor], List[np.ndarray]],
    model: Union[NumpyModel, TorchModel],
    bounds: List[Tuple[float, float]],
    objective_weights: Optional[Tensoray],
    outcome_constraints: Optional[Tuple[Tensoray, Tensoray]] = None,
    linear_constraints: Optional[Tuple[Tensoray, Tensoray]] = None,
    fixed_features: Optional[Dict[int, float]] = None,
    options: Optional[TConfig] = None,
) -> Optional[Tuple[Tensoray, float]]:
    """Select the best point that has been observed.

    Implements two approaches to selecting the best point.

    For both approaches, only points that satisfy parameter space constraints
    (bounds, linear_constraints, fixed_features) will be returned. Points must
    also be observed for all objective and constraint outcomes. Returned
    points may violate outcome constraints, depending on the method below.

    1: Select the point that maximizes the expected utility
    (objective_weights^T posterior_objective_means - baseline) * Prob(feasible)
    Here baseline should be selected so that at least one point has positive
    utility. It can be specified in the options dict, otherwise
    min (objective_weights^T posterior_objective_means)
    will be used, where the min is over observed points.

    2: Select the best-objective point that is feasible with at least
    probability p.

    The following quantities may be specified in the options dict:

    - best_point_method: 'max_utility' (default) or 'feasible_threshold'
      to select between the two approaches described above.
    - utility_baseline: Value for the baseline used in max_utility approach. If
      not provided, defaults to min objective value.
    - probability_threshold: Threshold for the feasible_threshold approach.
      Defaults to p=0.95.
    - feasibility_mc_samples: Number of MC samples used for estimating the
      probability of feasibility (defaults 10k).

    Args:
        Xs: Training data for the points, among which to select the best.
        model: Numpy or Torch model.
        bounds: A list of (lower, upper) tuples for each feature.
        objective_weights: The objective is to maximize a weighted sum of
            the columns of f(x). These are the weights.
        outcome_constraints: A tuple of (A, b). For k outcome constraints
            and m outputs at f(x), A is (k x m) and b is (k x 1) such that
            A f(x) <= b.
        linear_constraints: A tuple of (A, b). For k linear constraints on
            d-dimensional x, A is (k x d) and b is (k x 1) such that
            A x <= b.
        fixed_features: A map {feature_index: value} for features that
            should be fixed to a particular value in the best point.
        options: A config dictionary with settings described above.

    Returns:
        A two-element tuple or None if no feasible point exist. In tuple:
        - d-array of the best point,
        - utility at the best point.
    """
    # Parse options
    if options is None:
        options = {}
    # pyre-fixme[9]: method has type `str`; used as `Union[AcquisitionFunction,
    #  float, int, str]`.
    method: str = options.get("best_point_method", "max_utility")
    # pyre-fixme[9]: B has type `Optional[float]`; used as
    #  `Optional[Union[AcquisitionFunction, float, int, str]]`.
    B: Optional[float] = options.get("utility_baseline", None)
    # pyre-fixme[9]: threshold has type `float`; used as `Union[AcquisitionFunction,
    #  float, int, str]`.
    threshold: float = options.get("probability_threshold", 0.95)
    # pyre-fixme[9]: nsamp has type `int`; used as `Union[AcquisitionFunction,
    #  float, int, str]`.
    nsamp: int = options.get("feasibility_mc_samples", 10000)
    # Get points observed for all objective and constraint outcomes
    if objective_weights is None:
        return None  # pragma: no cover
    objective_weights_np = as_array(objective_weights)
    X_obs = get_observed(
        Xs=Xs,
        objective_weights=objective_weights,
        outcome_constraints=outcome_constraints,
    )
    # Filter to those that satisfy constraints.
    X_obs = filter_constraints_and_fixed_features(
        X=X_obs,
        bounds=bounds,
        linear_constraints=linear_constraints,
        fixed_features=fixed_features,
    )
    if len(X_obs) == 0:
        # No feasible points
        return None
    # Predict objective and P(feas) at these points for Torch models.
    if isinstance(Xs[0], torch.Tensor):
        X_obs = X_obs.detach().clone()
    f, cov = as_array(model.predict(X_obs))
    obj = objective_weights_np @ f.transpose()  # pyre-ignore
    pfeas = np.ones_like(obj)
    if outcome_constraints is not None:
        A, b = as_array(outcome_constraints)  # (m x j) and (m x 1)
        # Use Monte Carlo to compute pfeas, to properly handle covariance
        # across outcomes.
        for i, _ in enumerate(X_obs):
            z = np.random.multivariate_normal(mean=f[i, :],
                                              cov=cov[i, :, :],
                                              size=nsamp)  # (nsamp x j)
            pfeas[i] = (A @ z.transpose() <= b).all(axis=0).mean()
    # Identify best point
    if method == "feasible_threshold":
        utility = obj
        utility[pfeas < threshold] = -np.Inf
    elif method == "max_utility":
        if B is None:
            B = obj.min()
        utility = (obj - B) * pfeas
    i = np.argmax(utility)
    if utility[i] == -np.Inf:
        return None
    else:
        return X_obs[i, :], utility[i]
コード例 #59
0
def phase_diffs(phases: np.array) -> np.array:
    base = phases[0]
    return phases - np.ones_like(phases) * base
コード例 #60
0
 def get_objvis2d(self, idx):
     objvis = np.ones_like(self.get_objverts2d(idx))
     return objvis