Ejemplo n.º 1
1
def measure_image_moments(image):
    """Compute 0th, 1st and 2nd moments of an image.

    NaN values are ignored in the computation.

    Parameters
    ----------
    image :`astropy.io.fits.ImageHDU`
        Image to measure on.

    Returns
    -------
    image moments : list
        List of image moments:
        [A, x_cms, y_cms, x_sigma, y_sigma, sqrt(x_sigma * y_sigma)]
    """
    x, y = coordinates(image, lon_sym=True)
    A = image.data[np.isfinite(image.data)].sum()

    # Center of mass
    x_cms = (x * image.data)[np.isfinite(image.data)].sum() / A
    y_cms = (y * image.data)[np.isfinite(image.data)].sum() / A

    # Second moments
    x_var = ((x - x_cms) ** 2 * image.data)[np.isfinite(image.data)].sum() / A
    y_var = ((y - y_cms) ** 2 * image.data)[np.isfinite(image.data)].sum() / A
    x_sigma = np.sqrt(x_var)
    y_sigma = np.sqrt(y_var)

    return A, x_cms, y_cms, x_sigma, y_sigma, np.sqrt(x_sigma * y_sigma)
Ejemplo n.º 2
1
Archivo: f3.py Proyecto: jpcoles/ZM
    def plot(self):
        ax = self.subplot

        xs = planets["Pl. Sol"]
        ys = planets["Pl. Grav"]
        self.active = w = logical_and(isfinite(xs), isfinite(ys)).nonzero()[0]
        xs, ys, clrs = xs[w], ys[w], colors[w]

        ax.set_xscale("log")
        ax.set_yscale("log")

        if not self.issmall:
            artist = ax.scatter(xs, ys, s=80, c=clrs, lw=1, picker=80.0, zorder=1000)
            ax.grid(True, which="both", ls="-", c="#222222")
            ax.axvspan(1.0 / 4, 1.0 / 0.56, facecolor="#111111", zorder=-1000)
            ax.set_xlabel(_(r"Sunlight strength (Earth units)"))
            ax.set_ylabel(_(r"Gravity strength (Earth units)"))
            # ax.xaxis.set_major_formatter(FormatStrFormatter(r'$%0.2f$'))
            # ax.yaxis.set_major_formatter(FormatStrFormatter(r'$%0.2f$'))
            ax.xaxis.set_major_formatter(EPFormatter(True))
            ax.yaxis.set_major_formatter(EPFormatter())
        else:
            artist = ax.scatter(xs, ys, s=40, c=clrs, lw=1, picker=80.0)
            ax.set_xlabel(_(r"Sunlight vs Gravity"))
            ax.xaxis.set_major_locator(NullLocator())
            ax.yaxis.set_major_locator(NullLocator())

        # ax.axis('scaled')
        ax.set_xlim(1e-9, 2e4)
        ax.set_ylim(1e-1, 1e2)

        return artist
Ejemplo n.º 3
0
    def test_missing(self):
        #Test missing data handling for calling from the api.  Missing
        #data handling does not currently work for formulas.

        endog = np.random.normal(size=100)
        exog = np.random.normal(size=(100, 3))
        exog[:, 0] = 1
        groups = np.kron(lrange(20), np.ones(5))

        endog[0] = np.nan
        endog[5:7] = np.nan
        exog[10:12, 1] = np.nan

        mod1 = GEE(endog, exog, groups, missing='drop')
        rslt1 = mod1.fit()

        assert_almost_equal(len(mod1.endog), 95)
        assert_almost_equal(np.asarray(mod1.exog.shape), np.r_[95, 3])

        ii = np.isfinite(endog) & np.isfinite(exog).all(1)

        mod2 = GEE(endog[ii], exog[ii, :], groups[ii], missing='none')
        rslt2 = mod2.fit()

        assert_almost_equal(rslt1.params, rslt2.params)
        assert_almost_equal(rslt1.bse, rslt2.bse)
Ejemplo n.º 4
0
    def transform(self, data):

        assert np.isfinite(data).all()

        ntest = len(data)

        data = data.copy()

        data.shape = ntest, -1

        assert np.isfinite(data).all()

        print ">>> Computing traintest linear kernel"
        start = time.time()
        kernel_traintest = np.dot(data,
                                  self._train_data.T)

        assert not np.isnan(kernel_traintest).any()
        assert not np.isinf(kernel_traintest).any()

        kernel_traintest /= self._ktrace

        assert not np.isnan(kernel_traintest).any()
        assert not np.isinf(kernel_traintest).any()

        end = time.time()
        print "Time: %s" % (end-start)

        return self._clf.decision_function(kernel_traintest).ravel()
Ejemplo n.º 5
0
def censor_diagnosis(genotype_file,phenotype_file,final_pfile, final_gfile,field ='na',start_time=float('nan'),end_time=float('nan')):
        import pandas as pd
        import numpy as np
        genotypes = pd.read_csv(genotype_file)
        phenotypes = pd.read_csv(phenotype_file)
        mg=pd.merge(phenotypes,genotypes,on='id')
        if np.isnan(start_time) and np.isnan(end_time):
                print("Choose appropriate time period")
        if field=='na':
                if np.isfinite(start_time) and np.isnan(end_time):
                        final = mg[mg['AgeAtICD']>=start_time]
                elif np.isnan(start_time) and np.isfinite(end_time):
                        final = mg[mg['AgeAtICD']<=end_time]
                else:
                        final = mg[(mg['AgeAtICD']>=start_time)&(mg['AgeAtICD']<=end_time)]

        else:
                mg['diff']=mg[field]-mg['AgeAtICD']
                if np.isfinite(start_time) and np.isnan(end_time):
                        final = mg[(mg['diff']>=start_time)|(np.isnan(mg['diff']))]
                elif np.isnan(start_time) and np.isfinite(end_time):
                        final = mg[(mg['diff']<=end_time)|(np.isnan(mg['diff']))]
                else:
                        final = mg[(mg['diff']>=start_time)&(mg['diff']<=end_time)|(np.isnan(mg['diff']))]
        final[['id','icd9','AgeAtICD']].to_csv(final_pfile)
        final_gp = final.drop_duplicates('id')
        del final_gp['icd9']
        del final_gp['AgeAtICD']
        final_gp.to_csv(final_gfile)
Ejemplo n.º 6
0
    def max_err(self, g_pt, abs_tol, rel_tol):
        """Find the biggest error between g_pt and self.gf.

        What is measured is the violation of relative and absolute errors,
        wrt the provided tolerances (abs_tol, rel_tol).
        A value > 1 means both tolerances are exceeded.

        Return the argmax of min(abs_err / abs_tol, rel_err / rel_tol) over
        g_pt, as well as abs_err and rel_err at this point.
        """
        pos = []
        errs = []
        abs_errs = []
        rel_errs = []

        abs_rel_errs = self.abs_rel_errors(g_pt)
        for abs_err, rel_err in abs_rel_errs:
            if not numpy.all(numpy.isfinite(abs_err)):
                raise ValueError('abs_err not finite', repr(abs_err))
            if not numpy.all(numpy.isfinite(rel_err)):
                raise ValueError('rel_err not finite', repr(rel_err))
            scaled_err = numpy.minimum(abs_err / abs_tol, rel_err / rel_tol)
            max_i = scaled_err.argmax()

            pos.append(max_i)
            errs.append(scaled_err.flatten()[max_i])
            abs_errs.append(abs_err.flatten()[max_i])
            rel_errs.append(rel_err.flatten()[max_i])

        # max over the arrays in g_pt
        max_arg = numpy.argmax(errs)
        max_pos = pos[max_arg]
        return (max_arg, pos[max_arg], abs_errs[max_arg], rel_errs[max_arg])
Ejemplo n.º 7
0
def _assert_all_finite(X):
    """Like assert_all_finite, but only for ndarray."""
    X = np.asanyarray(X)
    if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
            and not np.isfinite(X).all()):
        raise ValueError("Input contains NaN, infinity"
                         " or a value too large for %r." % X.dtype)
Ejemplo n.º 8
0
def remove_unused_values(var, data):
    column_data = Table.from_table(
        Domain([var]),
        data
    )
    array = column_data.X.ravel()
    mask = np.isfinite(array)
    unique = np.array(np.unique(array[mask]), dtype=int)

    if len(unique) == len(var.values):
        return var

    used_values = [var.values[i] for i in unique]
    translation_table = np.array([np.NaN] * len(var.values))
    translation_table[unique] = range(len(used_values))

    base_value = -1
    if 0 >= var.base_value < len(var.values):
        base = translation_table[var.base_value]
        if np.isfinite(base):
            base_value = int(base)

    return DiscreteVariable("{}".format(var.name),
                            values=used_values,
                            base_value=base_value,
                            compute_value=Lookup(var, translation_table)
                            )
Ejemplo n.º 9
0
def _plot(x, mph, mpd, threshold, edge, valley, ax, ind):
    """Plot results of the detect_peaks function, see its help."""
    try:
        import matplotlib.pyplot as plt
    except ImportError:
        print('matplotlib is not available.')
    else:
        if ax is None:
            _, ax = plt.subplots(1, 1, figsize=(8, 4))

        ax.plot(x, 'b', lw=1)
        if ind.size:
            label = 'valley' if valley else 'peak'
            label = label + 's' if ind.size > 1 else label
            ax.plot(ind, x[ind], '+', mfc=None, mec='r', mew=2, ms=8,
                    label='%d %s' % (ind.size, label))
            ax.legend(loc='best', framealpha=.5, numpoints=1)
        ax.set_xlim(-.02*x.size, x.size*1.02-1)
        ymin, ymax = x[np.isfinite(x)].min(), x[np.isfinite(x)].max()
        yrange = ymax - ymin if ymax > ymin else 1
        ax.set_ylim(ymin - 0.1*yrange, ymax + 0.1*yrange)
        ax.set_xlabel('Data #', fontsize=14)
        ax.set_ylabel('Amplitude', fontsize=14)
        mode = 'Valley detection' if valley else 'Peak detection'
        ax.set_title("%s (mph=%s, mpd=%d, threshold=%s, edge='%s')"
                     % (mode, str(mph), mpd, str(threshold), edge))
        # plt.grid()
        plt.show()
Ejemplo n.º 10
0
Archivo: f3.py Proyecto: jpcoles/ZM
    def plot(self):
        ax = self.subplot

        xs = planets["Pl. Vref"]
        ys = planets["Pl. Mass"]
        self.active = w = logical_and(isfinite(xs), isfinite(ys)).nonzero()[0]
        xs, ys, clrs = xs[w], ys[w], colors[w]

        ax.set_xscale("log")
        ax.set_yscale("log")

        if not self.issmall:
            artist = ax.scatter(xs, ys, s=80, c=clrs, lw=1, picker=80.0, zorder=1000)
            ax.grid(True, which="both", ls="-", c="#222222")
            ax.set_xlabel(_(r"Reflex velocity (m/s)"))
            ax.set_ylabel(_(r"Mass (Multiples of Earth)"))
            ax.xaxis.set_major_formatter(EPFormatter())
            ax.yaxis.set_major_formatter(EPFormatter())
        else:
            artist = ax.scatter(xs, ys, s=40, c=clrs, lw=1, picker=80.0)
            ax.set_xlabel(_(r"Reflex velocity vs. Mass"))
            ax.xaxis.set_major_locator(NullLocator())
            ax.yaxis.set_major_locator(NullLocator())

        # ax.axis('scaled')
        ax.set_xlim(1e-3, 1e4)
        ax.set_ylim(1e-2, 1e4)

        return artist
Ejemplo n.º 11
0
    def isclose(a, b, rtol=1e-05, atol=1e-08, equal_nan=False):
        def within_tol(x, y, atol, rtol):
            result = np.less_equal(np.abs(x-y), atol + rtol * np.abs(y))
            if np.isscalar(a) and np.isscalar(b):
                result = np.bool(result)
            return result

        x = np.array(a, copy=False, subok=True, ndmin=1)
        y = np.array(b, copy=False, subok=True, ndmin=1)
        xfin = np.isfinite(x)
        yfin = np.isfinite(y)
        if np.all(xfin) and np.all(yfin):
            return within_tol(x, y, atol, rtol)
        else:
            finite = xfin & yfin
            cond = np.zeros_like(finite, subok=True)
            # Because we're using boolean indexing, x & y must be the same shape.
            # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
            # lib.stride_tricks, though, so we can't import it here.
            x = x * np.ones_like(cond)
            y = y * np.ones_like(cond)
            # Avoid subtraction with infinite/nan values...
            cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
            # Check for equality of infinite values...
            cond[~finite] = (x[~finite] == y[~finite])
            if equal_nan:
                # Make NaN == NaN
                cond[np.isnan(x) & np.isnan(y)] = True
            return cond
Ejemplo n.º 12
0
Archivo: f3.py Proyecto: jpcoles/ZM
    def plot(self):
        ax = self.subplot

        xs = planets["Pl. Semi-axis"]
        ys = planets["Pl. Mass"]
        self.active = w = logical_and(isfinite(xs), isfinite(ys)).nonzero()[0]
        xs, ys, clrs = xs[w], ys[w], colors[w]

        ax.set_xscale("log")
        ax.set_yscale("log")

        if not self.issmall:
            # artist = ax.scatter(xs, ys, s=80, c=clrs, lw=1, picker=80., zorder=1000)
            print "HERERERERER"
            # ax.grid(True, which='both', ls='-', c='#222222')
            ax.set_xlabel(_(r"Orbital radius (Astronomical Units)"))
            ax.set_ylabel(_(r"Mass (Multiples of Earth)"))
            # ax.xaxis.set_major_formatter(EPFormatter())
            # ax.yaxis.set_major_formatter(EPFormatter())
        else:
            # artist = ax.scatter(xs, ys, s=40, c=clrs, lw=1, picker=80.)
            ax.set_xlabel(_(r"Orbital radius vs. Mass"))
            # ax.xaxis.set_major_locator(NullLocator())
            # ax.yaxis.set_major_locator(NullLocator())

        # ax.axis('scaled')
        ax.set_xlim(1e-3, 1e3)
        ax.set_ylim(1e-2, 1e4)

        # print ax.get_position().bounds
        # ax.figure.canvas.blit(mpl.transforms.Bbox.from_bounds(0,0,1,1))
        ax.figure.canvas.draw()
        return None
        return artist
Ejemplo n.º 13
0
def central_ratio(num, dnm, centerfn=np.median, finite=True):
    """Computes the central tendency (median, by default) of the ratios
    between `num` and `dnm`.  By default, this function gives the
    "Turing ratio" used in the paper by Majaj, Hong, Solomon, and DiCarlo.

    Parameters
    ----------
    num: array-like
        Numerators of ratios

    dnm: array-like, shape = `num.shape()`
        Denominators of ratios.  `num` and `dnm` must have the same shape.

    centerfn: function, optional (default=np.median)
        Function to compute the central tendency.

    finite: boolean, optional (default=True)
        If True, only finite numbers in `num` and `dnm` will be used for
        the computation of the central tendency.
    """

    num = np.array(num, dtype=DTYPE)
    dnm = np.array(dnm, dtype=DTYPE)
    assert num.shape == dnm.shape

    num = num.ravel()
    dnm = dnm.ravel()

    if finite:
        fi = np.isfinite(dnm) & np.isfinite(num)
        num = num[fi]
        dnm = dnm[fi]

    return centerfn(num / dnm)
Ejemplo n.º 14
0
def fom_to_hl(fom, phi):
    '''Convert FOMs to HLA and HLB - Kevin Cowtan - www.ysbl.york.ac.uk/~cowtan/clipper'''
    x0 = np.abs(fom)
    a0 = -7.107935 * x0
    a1 = 3.553967 - 3.524142 * x0
    a2 = 1.639294 - 2.228716 * x0
    a3 = 1.0 - x0
    w = a2 / (3.0 * a3)
    p = a1 / (3.0 * a3) - w * w
    q = -w * w * w + 0.5 * (a1 * w - a0) / a3
    d = np.sqrt(q * q + p * p * p)
    q1 = q + d
    q2 = q - d
    r1 = np.power(np.abs(q1), 1.0 / 3.0)
    r2 = np.power(np.abs(q2), 1.0 / 3.0)
    if q1 <= 0.0:
        r1 = -r1
    if q2 <= 0.0:
        r2 = -r2
    x = r1 + r2 - w
    HLA = x * np.cos(phi)
    HLB = x * np.sin(phi)
    if np.isfinite(HLA) and np.isfinite(HLB):
        return HLA, HLB
    else:
        print(" Error determining HL coefficients for FOM = "+str(fom)+' and phase = '+str(phi))
        return None, None
Ejemplo n.º 15
0
    def _set_minmax(self):
        data = self._get_fast_data()
        try:
            self.maxval = numpy.nanmax(data)
            self.minval = numpy.nanmin(data)
        except Exception:
            self.maxval = 0
            self.minval = 0

        # TODO: see if there is a faster way to ignore infinity
        try:
            if numpy.isfinite(self.maxval):
                self.maxval_noinf = self.maxval
            else:
                self.maxval_noinf = numpy.nanmax(data[numpy.isfinite(data)])
        except:
            self.maxval_noinf = self.maxval

        try:
            if numpy.isfinite(self.minval):
                self.minval_noinf = self.minval
            else:
                self.minval_noinf = numpy.nanmin(data[numpy.isfinite(data)])
        except:
            self.minval_noinf = self.minval
Ejemplo n.º 16
0
    def __init__(self, meshsize, cellsize=None, origin=(0, 0, 0),
                 array_order=ZYX, size=None):
        if cellsize is None and size is not None:
            cellsize = np.array(
                size, dtype=float) / np.array(meshsize, dtype=float)
        self.mesh_size = np.array(meshsize, dtype=int)
        self.cell_size = np.array(cellsize, dtype=float)
        self.origin = np.array(origin, dtype=float)
        self.array_order = array_order
        self.n = np.prod(self.mesh_size)
        self.mesh_size_ao = self.mesh_size[list(array_order)]
        self.cell_size_ao = self.cell_size[list(array_order)]

        # Check validity
        assert self.mesh_size.shape == (3,)
        assert self.cell_size.shape == (3,)
        assert self.origin.shape == (3,)
        assert len(array_order) == 3

        assert self.cell_size[0] > 0 and self.cell_size[
            1] > 0 and self.cell_size[2] > 0
        assert self.mesh_size[0] > 0 and self.mesh_size[
            1] > 0 and self.mesh_size[2] > 0
        assert all(np.isfinite(self.cell_size)) and all(
            np.isfinite(self.origin))
        assert sorted(array_order) == [0, 1, 2]
Ejemplo n.º 17
0
def _extrapolate_out_mask(data, mask, iterations=1):
    """ Extrapolate values outside of the mask.
    """
    if iterations > 1:
        data, mask = _extrapolate_out_mask(data, mask,
                                          iterations=iterations - 1)
    new_mask = ndimage.binary_dilation(mask)
    larger_mask = np.zeros(np.array(mask.shape) + 2, dtype=np.bool)
    larger_mask[1:-1, 1:-1, 1:-1] = mask
    # Use nans as missing value: ugly
    masked_data = np.zeros(larger_mask.shape + data.shape[3:])
    masked_data[1:-1, 1:-1, 1:-1] = data.copy()
    masked_data[np.logical_not(larger_mask)] = np.nan
    outer_shell = larger_mask.copy()
    outer_shell[1:-1, 1:-1, 1:-1] = np.logical_xor(new_mask, mask)
    outer_shell_x, outer_shell_y, outer_shell_z = np.where(outer_shell)
    extrapolation = list()
    for i, j, k in [(1, 0, 0), (-1, 0, 0), 
                    (0, 1, 0), (0, -1, 0),
                    (0, 0, 1), (0, 0, -1)]:
        this_x = outer_shell_x + i
        this_y = outer_shell_y + j
        this_z = outer_shell_z + k
        extrapolation.append(masked_data[this_x, this_y, this_z])

    extrapolation = np.array(extrapolation)
    extrapolation = (np.nansum(extrapolation, axis=0)
                     / np.sum(np.isfinite(extrapolation), axis=0))
    extrapolation[np.logical_not(np.isfinite(extrapolation))] = 0
    new_data = np.zeros_like(masked_data)
    new_data[outer_shell] = extrapolation
    new_data[larger_mask] = masked_data[larger_mask]
    return new_data[1:-1, 1:-1, 1:-1], new_mask
Ejemplo n.º 18
0
def _check_maxexp(np_type, maxexp):
    """ True if fp type `np_type` seems to have `maxexp` maximum exponent

    We're testing "maxexp" as returned by numpy. This value is set to one
    greater than the maximum power of 2 that `np_type` can represent.

    Assumes base 2 representation.  Very crude check

    Parameters
    ----------
    np_type : numpy type specifier
        Any specifier for a numpy dtype
    maxexp : int
        Maximum exponent to test against

    Returns
    -------
    tf : bool
        True if `maxexp` is the correct maximum exponent, False otherwise.
    """
    dt = np.dtype(np_type)
    np_type = dt.type
    two = np_type(2).reshape((1,))  # to avoid upcasting
    return (np.isfinite(two ** (maxexp - 1)) and
            not np.isfinite(two ** maxexp))
Ejemplo n.º 19
0
def shifted_corr(reference, image, displacement):
    """Calculate the correlation between the reference and the image shifted
    by the given displacement.

    Parameters
    ----------
    reference : np.ndarray
    image : np.ndarray
    displacement : np.ndarray

    Returns
    -------
    correlation : float

    """

    ref_cuts = np.maximum(0, displacement)
    ref = reference[ref_cuts[0]:, ref_cuts[1]:, ref_cuts[2]:]
    im_cuts = np.maximum(0, -displacement)
    im = image[im_cuts[0]:, im_cuts[1]:, im_cuts[2]:]
    s = np.minimum(im.shape, ref.shape)
    ref = ref[:s[0], :s[1], :s[2]]
    im = im[:s[0], :s[1], :s[2]]
    ref -= nanmean(ref.reshape(-1, ref.shape[-1]), axis=0)
    ref = np.nan_to_num(ref)
    im -= nanmean(im.reshape(-1, im.shape[-1]), axis=0)
    im = np.nan_to_num(im)
    assert np.all(np.isfinite(ref)) and np.all(np.isfinite(im))
    corr = nanmean(
        [old_div(np.sum(i * r), np.sqrt(np.sum(i * i) * np.sum(r * r))) for
         i, r in zip(np.rollaxis(im, -1), np.rollaxis(ref, -1))])
    return corr
Ejemplo n.º 20
0
    def start(self, f, a, b, args=()):
        r"""Prepare for the iterations."""
        self.function_calls = 0
        self.iterations = 0

        self.f = f
        self.args = args
        self.ab[:] = [a, b]
        if not np.isfinite(a) or np.imag(a) != 0:
            raise ValueError("Invalid x value: %s " % (a))
        if not np.isfinite(b) or np.imag(b) != 0:
            raise ValueError("Invalid x value: %s " % (b))

        fa = self._callf(a)
        if not np.isfinite(fa) or np.imag(fa) != 0:
            raise ValueError("Invalid function value: f(%f) -> %s " % (a, fa))
        if fa == 0:
            return _ECONVERGED, a
        fb = self._callf(b)
        if not np.isfinite(fb) or np.imag(fb) != 0:
            raise ValueError("Invalid function value: f(%f) -> %s " % (b, fb))
        if fb == 0:
            return _ECONVERGED, b

        if np.sign(fb) * np.sign(fa) > 0:
            raise ValueError("a, b must bracket a root f(%e)=%e, f(%e)=%e " %
                             (a, fa, b, fb))
        self.fab[:] = [fa, fb]

        return _EINPROGRESS, sum(self.ab) / 2.0
Ejemplo n.º 21
0
def reflective_transformation(y, lb, ub):
    """Compute reflective transformation and its gradient."""
    if in_bounds(y, lb, ub):
        return y, np.ones_like(y)

    lb_finite = np.isfinite(lb)
    ub_finite = np.isfinite(ub)

    x = y.copy()
    g_negative = np.zeros_like(y, dtype=bool)

    mask = lb_finite & ~ub_finite
    x[mask] = np.maximum(y[mask], 2 * lb[mask] - y[mask])
    g_negative[mask] = y[mask] < lb[mask]

    mask = ~lb_finite & ub_finite
    x[mask] = np.minimum(y[mask], 2 * ub[mask] - y[mask])
    g_negative[mask] = y[mask] > ub[mask]

    mask = lb_finite & ub_finite
    d = ub - lb
    t = np.remainder(y[mask] - lb[mask], 2 * d[mask])
    x[mask] = lb[mask] + np.minimum(t, 2 * d[mask] - t)
    g_negative[mask] = t > d[mask]

    g = np.ones_like(y)
    g[g_negative] = -1

    return x, g
Ejemplo n.º 22
0
def plot(state,splits,merges):
    plt.clf()
    plt.axis([0, lifetime, -(maxl/2), maxl/2])
    for track in range(elements):
        newplot = []
        gaps=[]
        gapst=[]
        start = find_first(state[track])
        end = find_last(state[track])

        if np.isfinite(splits[track]):
            parent = splits[track]
            plt.plot([start-1,start],[state[parent][start-1][0],state[track][start][0]],'--')


        if np.isfinite(start) and np.isfinite(end):
            for t in range(start,end+1):
                newplot.append(state[track][t][0])
                if np.isfinite(state[track][t][0]): # to plot dotted lines between the gaps
                    gapst.append(t)
                    gaps.append(state[track][t][0])


            plt.plot(gapst, gaps, ':')
            plt.draw()
            plt.plot(range(start, end+1), newplot, '.-')
            plt.draw()
    plt.show()
Ejemplo n.º 23
0
def test_underflow_or_overlow():
    with np.errstate(all="raise"):
        # Generate some weird data with hugely unscaled features
        rng = np.random.RandomState(0)
        n_samples = 100
        n_features = 10

        X = rng.normal(size=(n_samples, n_features))
        X[:, :2] *= 1e300
        assert_true(np.isfinite(X).all())

        # Use MinMaxScaler to scale the data without introducing a numerical
        # instability (computing the standard deviation naively is not possible
        # on this data)
        X_scaled = MinMaxScaler().fit_transform(X)
        assert_true(np.isfinite(X_scaled).all())

        # Define a ground truth on the scaled data
        ground_truth = rng.normal(size=n_features)
        y = (np.dot(X_scaled, ground_truth) > 0.0).astype(np.int32)
        assert_array_equal(np.unique(y), [0, 1])

        model = SGDClassifier(alpha=0.1, loss="squared_hinge", n_iter=500)

        # smoke test: model is stable on scaled data
        model.fit(X_scaled, y)
        assert_true(np.isfinite(model.coef_).all())

        # model is numerically unstable on unscaled data
        msg_regxp = (
            r"Floating-point under-/overflow occurred at epoch #.*"
            " Scaling input data with StandardScaler or MinMaxScaler"
            " might help."
        )
        assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
Ejemplo n.º 24
0
def prepare_logged(x, y):
    """
    Transform `x` and `y` to a log scale while dealing with zeros.

    This function scales `x` and `y` such that the points that are zero in one
    array are set to the min of the other array.

    When plotting expression data, frequently one sample will have reads in
    a particular feature but the other sample will not.  Expression data also
    tends to look better on a log scale, but log(0) is undefined and therefore
    cannot be shown on a plot.  This function allows these points to be shown,
    piled up along one side of the plot.

    :param x,y: NumPy arrays
    """
    xi = np.log2(x)
    yi = np.log2(y)

    xv = np.isfinite(xi)
    yv = np.isfinite(yi)

    global_min = min(xi[xv].min(), yi[yv].min())
    global_max = max(xi[xv].max(), yi[yv].max())

    xi[~xv] = global_min
    yi[~yv] = global_min

    return xi, yi
Ejemplo n.º 25
0
    def regularization(self, regularization):
        if regularization is None:
            self._regularization = None
            return None
        
        # Can be positive float, or positive values for all pixels.
        try:
            regularization = float(regularization)
        except (TypeError, ValueError):
            regularization = np.array(regularization).flatten()

            if regularization.size != len(self.dispersion):
                raise ValueError("regularization must be a positive value or "
                                 "an array of positive values for each pixel "
                                 "({0} != {1})".format(
                                    regularization.size,
                                    len(self.dispersion)))

            if any(0 > regularization) \
            or not np.all(np.isfinite(regularization)):
                raise ValueError("regularization terms must be "
                                 "positive and finite")
        else:
            if 0 > regularization or not np.isfinite(regularization):
                raise ValueError("regularization term must be "
                                 "positive and finite")
            regularization = np.ones_like(self.dispersion) * regularization
        self._regularization = regularization
        return None
Ejemplo n.º 26
0
def fft_to_hkl(h, k, l, val, coeffs, fsc_curve, resolution, full_size, flag_frac):
    '''Reformat fft record as hkl record'''
    if h or k or l:
        res = full_size / (np.linalg.norm(np.asarray([h, k, l])))
    else:
        res = 0.0

    if res < resolution or not np.isfinite(res):
        return None, None

    mag = np.abs(val)
    angle = np.angle(val, deg = True)

    if angle < 0:
        angle += 360.0

    fsc = curve_function((1. / res), coeffs, fsc_curve)
    sig = fsc_to_sigf(mag, fsc)
    fom = fsc_to_fom(fsc)
    hla, hlb = fom_to_hl(fom, np.angle(val))
    rf = bernoulli.rvs(flag_frac)
    record = np.array([h, k, l, mag, sig, angle, fom, hla, hlb, 0.0, 0.0, rf], dtype = np.float32)
    
    if not np.all(np.isfinite(record)):
        print("Skipping record %i %i %i - " %(h, k, l)),
        print(record)
        return None, None

    return record, res
Ejemplo n.º 27
0
def nextpow2(n):
    """Return the next power of 2 such as 2^p >= n.

    Notes
    -----

    Infinite and nan are left untouched, negative values are not allowed."""
    if np.any(n < 0):
        raise ValueError("n should be > 0")

    if np.isscalar(n):
        f, p = np.frexp(n)
        if f == 0.5:
            return p-1
        elif np.isfinite(f):
            return p
        else:
            return f
    else:
        f, p = np.frexp(n)
        res = f
        bet = np.isfinite(f)
        exa = (f == 0.5)
        res[bet] = p[bet]
        res[exa] = p[exa] - 1
        return res
Ejemplo n.º 28
0
    def calculateLevels(self):
        """Calculate contour levels from data and settings.

        Returns levels as 1d numpy
        """

        # get dataset
        s = self.settings
        d = self.document

        minval, maxval = 0., 1.
        if s.data in d.data:
            # scan data
            data = d.data[s.data].data
            minval, maxval = N.nanmin(data), N.nanmax(data)
            if not N.isfinite(minval):
                minval = 0.
            if not N.isfinite(maxval):
                maxval = 1.

        # override if not auto
        if s.min != 'Auto':
            minval = s.min
        if s.max != 'Auto':
            maxval = s.max

        numlevels = s.numLevels
        scaling = s.scaling

        if numlevels == 1 and scaling != 'manual':
            # calculations below assume numlevels > 1
            levels = N.array([minval,])
        else:
            # trap out silly cases
            if minval == maxval:
                minval = 0.
                maxval = 1.
                
            # calculate levels for each scaling
            if scaling == 'linear':
                delta = (maxval - minval) / (numlevels-1)
                levels = minval + N.arange(numlevels)*delta
            elif scaling == 'sqrt':
                delta = N.sqrt(maxval - minval) / (numlevels-1)
                levels = minval + (N.arange(numlevels)*delta)**2
            elif scaling == 'log':
                delta = N.log(maxval/minval) / (numlevels-1)
                levels = N.exp(N.arange(numlevels)*delta)*minval
            elif scaling == 'squared':
                delta = (maxval - minval)**2 / (numlevels-1)
                levels = minval + N.sqrt(N.arange(numlevels)*delta)
            else:
                # manual
                levels = N.array(s.manualLevels)

        # for the user later
        # we do this to convert array to list of floats
        s.levelsOut = [float(i) for i in levels]

        return minval, maxval, levels
Ejemplo n.º 29
0
 def maybe_expand_bounds(bounds):
     minval, maxval = bounds
     if not (np.isfinite(minval) and np.isfinite(maxval)):
         minval, maxval = -1.0, 1.0
     elif minval == maxval:
         minval, maxval = minval-1, minval+1
     return minval, maxval
Ejemplo n.º 30
0
    def getChiImage(self, imgi=-1, img=None, srcs=None, minsb=0.):
        if img is None:
            img = self.getImage(imgi)

        # print('getChiImage:', img, ':', img.shape)
        # if srcs is None:
        #     print('Sources:')
        #     for src in self.catalog:
        #         print('  ', src)
        # else:
        #     print('Sources:', srcs)
        # print('LogPriorDerivatives:', self.getLogPriorDerivatives())
            
        mod = self.getModelImage(img, srcs=srcs, minsb=minsb)
        #print('mod:', mod.shape)
        chi = (img.getImage() - mod) * img.getInvError()
        if not np.all(np.isfinite(chi)):
            print('Chi not finite')
            print('Image finite?', np.all(np.isfinite(img.getImage())))
            print('Mod finite?', np.all(np.isfinite(mod)))
            print('InvErr finite?', np.all(np.isfinite(img.getInvError())))
            print('Current thawed parameters:')
            self.printThawedParams()
            print('Current sources:')
            for src in self.getCatalog():
                print('  ', src)
            print('Image:', img)
            print('sky:', img.getSky())
            print('psf:', img.getPsf())
        return chi
Ejemplo n.º 31
0
 def __call__(self, name):
     mjd = utils.ctime2mjd(self.data["t"])
     mjd[~np.isfinite(mjd)] = 0
     pos = coordinates.ephem_pos(name, mjd)
     pos /= utils.degree
     return pos
Ejemplo n.º 32
0
def patch_image(img,
                mask,
                dxdy=[(-1, 0), (1, 0), (0, -1), (0, 1)],
                required=None):
    '''
    Patch masked pixels by iteratively averaging non-masked neighboring pixels.

    WARNING: this modifies BOTH the "img" and "mask" arrays!

    mask: True for good pixels
    required: if non-None: True for pixels you want to be patched.
    dxdy: Pixels to average in, relative to pixels to be patched.

    Returns True if patching was successful.
    '''
    assert (img.shape == mask.shape)
    assert (len(img.shape) == 2)
    h, w = img.shape
    Nlast = -1
    while True:
        needpatching = np.logical_not(mask)
        if required is not None:
            needpatching *= required
        I = np.flatnonzero(needpatching)
        if len(I) == 0:
            break
        if len(I) == Nlast:
            return False
        #print 'Patching', len(I), 'pixels'
        Nlast = len(I)
        iy, ix = np.unravel_index(I, img.shape)
        psum = np.zeros(len(I), img.dtype)
        pn = np.zeros(len(I), int)

        for dx, dy in dxdy:
            ok = True
            if dx < 0:
                ok = ok * (ix >= (-dx))
            if dx > 0:
                ok = ok * (ix <= (w - 1 - dx))
            if dy < 0:
                ok = ok * (iy >= (-dy))
            if dy > 0:
                ok = ok * (iy <= (h - 1 - dy))

            # darn, NaN * False = NaN, not zero.
            finite = np.isfinite(img[iy[ok] + dy, ix[ok] + dx])
            ok[ok] *= finite

            psum[ok] += (img[iy[ok] + dy, ix[ok] + dx] *
                         mask[iy[ok] + dy, ix[ok] + dx])
            pn[ok] += mask[iy[ok] + dy, ix[ok] + dx]

            # print 'ix', ix
            # print 'iy', iy
            # print 'dx,dy', dx,dy
            # print 'ok', ok
            # print 'psum', psum
            # print 'pn', pn

        img.flat[I] = (psum / np.maximum(pn, 1)).astype(img.dtype)
        mask.flat[I] = (pn > 0)
        #print 'Patched', np.sum(pn > 0)
    return True
Ejemplo n.º 33
0
 def lnprob(theta, x, y, yerr):
     lp = lnprior(theta)
     if not np.isfinite(lp):
         return -np.inf
     return lp + lnlike(theta, x, y, yerr)
Ejemplo n.º 34
0
 def test_daofind_flux_negative(self):
     """Test handling of negative flux (here created by large sky)."""
     data = np.ones((5, 5))
     data[2, 2] = 10.
     t = daofind(data, threshold=0.1, fwhm=1.0, sky=10)
     assert not np.isfinite(t['mag'])
Ejemplo n.º 35
0



xc = np.unique(xc_list)
yc = np.unique(yc_list)

nr, nc = yc.size, xc.size

plot_helper = [(betaN, 'beta'), (ztN, 'zt'), (dzN, 'dz'), (CN, 'C')]

fig, (ax1,ax2,ax3,ax4) = plt.subplots(1, 4, sharey=True, figsize=(16,3.5))
for i, ax in enumerate([ax1, ax2, ax3, ax4]):
    title = plot_helper[i][1]
    data  = plot_helper[i][0]
    mask  = np.isfinite(data)
    
    ax.set_title(title)
    sci = ax.scatter(xc_list[mask]/1e3, yc_list[mask]/1e3, c=data[mask])
    fig.colorbar(sci, ax=ax)
    fig.savefig('CPD_initial.png', bbox_inches='tight')
    
    print("{:5} mean={:.2f} std={:.2f}".format(title, data.mean(), np.std(data)))

"""
**Second pass**

Take the mean and standard deviation of all parameters and add them
as priors within the misfit function, then run the optimisation again.

beta, C, and z_t have the lowest standard deviation therefore the
Ejemplo n.º 36
0
def classify(p_frame, gcdfile=None, surface=None):
    if is_data(p_frame):
        return True
    pclass = 101 # only for security
    if surface is None:
        if gcdfile is None:
            surface = icecube.MuonGun.ExtrudedPolygon.from_I3Geometry(p_frame['I3Geometry'])
        else:
            surface = icecube.MuonGun.ExtrudedPolygon.from_file(gcdfile, padding=0)
    I3Tree = p_frame['I3MCTree']
    neutrino = find_all_neutrinos(p_frame)
    children = I3Tree.children(neutrino)
    p_types = [np.abs(child.pdg_encoding) for child in children]
    p_strings = [child.type_string for child in children]
    p_frame.Put("visible_nu", neutrino)
    IC_hit = np.any([((has_signature(tp, surface) != -1) & np.isfinite(tp.length)) for tp in children])
    if p_frame['I3MCWeightDict']['InteractionType'] == 3 and (len(p_types) == 1 and p_strings[0] == 'Hadrons'):
        pclass = 7  # Glashow Cascade
    else:
        if (11 in p_types) or (p_frame['I3MCWeightDict']['InteractionType'] == 2):
            if IC_hit:
                pclass = 1  # Cascade
            else:
                pclass = 0 # Uncontainced Cascade
        elif (13 in p_types):
            mu_ind = p_types.index(13)
            p_frame.Put("visible_track", children[mu_ind])
            if not IC_hit:
                pclass = 11 # Passing Track
            elif p_frame['I3MCWeightDict']['InteractionType'] == 3:
                if has_signature(children[mu_ind], surface) == 0:
                    pclass = 8  # Glashow Track
            elif has_signature(children[mu_ind], surface) == 0:
                pclass = 3  # Starting Track
            elif has_signature(children[mu_ind], surface) == 1:
                pclass = 2  # Through Going Track
            elif has_signature(children[mu_ind], surface) == 2:
                pclass = 4  # Stopping Track
        elif (15 in p_types):
            tau_ind = p_types.index(15)
            p_frame.Put("visible_track", children[tau_ind])
            if not IC_hit:
                pclass = 12 # uncontained tau something...
            else:
                # consider to use the interactiontype here...
                if p_frame['I3MCWeightDict']['InteractionType'] == 3:
                    pclass =  9  # Glashow Tau
                else:
                    had_ind = p_strings.index('Hadrons')
                    try:
                        tau_child = I3Tree.children(children[tau_ind])[-1]
                    except:
                        tau_child = None
                    if tau_child:
                        if np.abs(tau_child.pdg_encoding) == 13:
                            if has_signature(tau_child, surface) == 0:
                                pclass = 3  # Starting Track
                            if has_signature(tau_child, surface) == 1:
                                pclass = 2  # Through Going Track
                            if has_signature(tau_child, surface) == 2:
                                pclass = 4  # Stopping Track
                        else:
                            if has_signature(children[tau_ind], surface) == 0 and has_signature(tau_child, surface) == 0:
                                pclass = 5  # Double Bang
                            if has_signature(children[tau_ind], surface) == 0 and has_signature(tau_child, surface) == -1:
                                pclass = 3  # Starting Track
                            if has_signature(children[tau_ind], surface) == 2 and has_signature(tau_child, surface) == 0:
                                pclass = 6  # Stopping Tau
                            if has_signature(children[tau_ind], surface) == 1:
                                pclass = 2  # Through Going Track
                    else: # Tau Decay Length to large, so no childs are simulated
                        if has_signature(children[tau_ind], surface) == 0:
                            pclass = 3 # Starting Track
                        if has_signature(children[tau_ind], surface) == 1:
                            pclass = 2  # Through Going Track
                        if has_signature(children[tau_ind], surface) == 2:
                            pclass = 4  # Stopping Track
        else:
            pclass = 100 # unclassified
    #print('Classification: {}'.format(pclass))
    p_frame.Put("classification", icetray.I3Int(pclass))
    return
Ejemplo n.º 37
0
def lnlike(p):

    # Deal with uniform priors
    if not np.all((uni_pri[:, 0] <= p) & (p <= uni_pri[:, 1])):
        return bad_res

    # Deal with Gaussian priors
    lnp = 0.
    if len(ini['gauss_priors']) > 0:
        lnp = np.sum(
            -0.5 * (p[ix_gauss_pri] - gauss_pri[:, 0])**2.
            / gauss_pri[:, 1]**2.)

    # Create parameters dictionnary for class and likelihoods
    class_input = ini['base_par_class'].copy()
    likes_input = ini['base_par_likes'].copy()

    # Loop over parameters
    for i, par in enumerate(ini['var_par']):
        if par[0] == 'var_class':
            class_input[par[1]] = p[i]
        else:
            likes_input[par[1]] = p[i]

    # Deal with constraints
    for cst in ini['constraints']:
        exec(cst)

    # Deal with parameter arrays
    final_class_input = class_input.copy()
    for n in ini['array_var'].keys():
        all_val = []
        for i in range(ini['array_var'][n]):
            all_val.append(str(final_class_input['%s_val_%s' % (n, i)]))
            final_class_input.pop('%s_val_%s' % (n, i))
        final_class_input[n] = ','.join(all_val)

    # Run class
    class_run = classy.Class()
    class_run.set(final_class_input)
    try:
        class_run.compute()
    except Exception as e:
        if ini['debug_mode']:
            print(e)
        class_run.struct_cleanup()
        class_run.empty()
        return bad_res

    # Compute likelihoods
    lnls = [0.]*len(likes)
    for i, like in enumerate(likes):
        try:
            lnls[i] = float(like(class_input, likes_input, class_run))
        except Exception as e:
            if ini['debug_mode']:
                print(e)
            class_run.struct_cleanup()
            class_run.empty()
            return bad_res
        if not np.isfinite(lnls[i]):
            if ini['debug_mode']:
                print("The likelihood '%s' is not finite." % ini['likelihoods'][i])
            class_run.struct_cleanup()
            class_run.empty()
            return bad_res

    # Computed derived parameters if requested
    derivs = []
    if len(ini['derivs']) > 0:
        bg = class_run.get_background() # in case it's needed
        for deriv in ini['derivs']:
            exec('derivs.append(%s)' % deriv[1])
            # Computed prior on derived parameter if requested
            if deriv[0] in drv_gauss_pri:
                ix = drv_gauss_pri.index(deriv[0])
                pri = ini['drv_gauss_priors'][ix]
                lnp += -0.5 * (derivs[-1] - pri[1])**2. / pri[2]**2.
            if deriv[0] in drv_uni_pri:
                ix = drv_uni_pri.index(deriv[0])
                pri = ini['drv_uni_priors'][ix]
                test_uni_pri = pri[1] < derivs[-1] < pri[2]
                if not test_uni_pri:
                    class_run.struct_cleanup()
                    class_run.empty()
                    return bad_res

    # Clean up after CLASS
    class_run.struct_cleanup()
    class_run.empty()

    # Return log(likes*prior)/T, log(prior), log(likes), derivs
    res = [(sum(lnls) + lnp) / ini['temperature'], lnp] + lnls + derivs
    return tuple(res)
Ejemplo n.º 38
0
if (__name__ == "__main__") & (not ini['debug_mode']):

    sampler = emcee.EnsembleSampler(
        n_walkers,
        n_dim,
        lnlike,
        moves=emcee.moves.StretchMove(a=ini['stretch']),
        pool=pool,
        backend=backend,
        blobs_dtype=blobs_dtype,
    )
    ct = 0
    for result in sampler.sample(p_start, iterations=n_steps, thin_by=thin_by):
        # One-time check for infinities
        if ct == 0:
            n_finite = np.isfinite(result.log_prob).sum()
            if n_finite < 2:
                raise ValueError(
                    "Your chain cannot progress: "
                    "less than 2 of your walkers are starting at a finite value of the posterior. "
                    "Please check if your starting positions are correct, and/or use "
                    "debug mode to check your likelihoods."
                )
            elif n_finite < (0.5 * n_walkers):
                print(
                    "Warning, your chain will take time to converge: "
                    "only %s%% of your walkers are starting at a finite value of the posterior. "
                    "Please check if your starting positions are correct, and/or use "
                    "debug mode to check your likelihoods." % (n_finite * 100. / n_walkers)
                )
        # Always save the last MCMC step as input file for future chain
Ejemplo n.º 39
0
def _fast_kde(x, cumulative=False, bw=4.5, xmin=None, xmax=None):
    """Fast Fourier transform-based Gaussian kernel density estimate (KDE).

    The code was adapted from https://github.com/mfouesneau/faststats

    Parameters
    ----------
    x : Numpy array or list
    cumulative : bool
        If true, estimate the cdf instead of the pdf
    bw : float
        Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the
        smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule
        of thumb (the default rule used by SciPy).
    xmin : float
        Manually set lower limit.
    xmax : float
        Manually set upper limit.

    Returns
    -------
    density: A gridded 1D KDE of the input points (x)
    xmin: minimum value of x
    xmax: maximum value of x
    """
    x = np.asarray(x, dtype=float)
    x = x[np.isfinite(x)]
    if x.size == 0:
        warnings.warn("kde plot failed, you may want to check your data")
        return np.array([np.nan]), np.nan, np.nan

    len_x = len(x)
    n_points = 200 if (xmin or xmax) is None else 500

    if xmin is None:
        xmin = np.min(x)
    if xmax is None:
        xmax = np.max(x)

    assert np.min(x) >= xmin
    assert np.max(x) <= xmax

    log_len_x = np.log(len_x) * bw

    n_bins = min(int(len_x**(1 / 3) * log_len_x * 2), n_points)
    if n_bins < 2:
        warnings.warn("kde plot failed, you may want to check your data")
        return np.array([np.nan]), np.nan, np.nan

    hist, bin_edges = np.histogram(x, bins=n_bins, range=(xmin, xmax))
    grid = hist / (hist.sum() * np.diff(bin_edges))

    # _, grid, _ = histogram(x, n_bins, range_hist=(xmin, xmax))

    scotts_factor = len_x**(-0.2)
    kern_nx = int(scotts_factor * 2 * np.pi * log_len_x)
    kernel = gaussian(kern_nx, scotts_factor * log_len_x)

    npad = min(n_bins, 2 * kern_nx)
    grid = np.concatenate(
        [grid[npad:0:-1], grid, grid[n_bins:n_bins - npad:-1]])
    density = convolve(grid, kernel, mode="same",
                       method="direct")[npad:npad + n_bins]
    norm_factor = (2 * np.pi * log_len_x**2 * scotts_factor**2)**0.5

    density /= norm_factor

    if cumulative:
        density = density.cumsum() / density.sum()

    return density, xmin, xmax
Ejemplo n.º 40
0
def _fast_kde_2d(x, y, gridsize=(128, 128), circular=False):
    """
    2D fft-based Gaussian kernel density estimate (KDE).

    The code was adapted from https://github.com/mfouesneau/faststats

    Parameters
    ----------
    x : Numpy array or list
    y : Numpy array or list
    gridsize : tuple
        Number of points used to discretize data. Use powers of 2 for fft optimization
    circular: bool
        If True, use circular boundaries. Defaults to False
    Returns
    -------
    grid: A gridded 2D KDE of the input points (x, y)
    xmin: minimum value of x
    xmax: maximum value of x
    ymin: minimum value of y
    ymax: maximum value of y
    """
    x = np.asarray(x, dtype=float)
    x = x[np.isfinite(x)]
    y = np.asarray(y, dtype=float)
    y = y[np.isfinite(y)]

    xmin, xmax = x.min(), x.max()
    ymin, ymax = y.min(), y.max()

    len_x = len(x)
    weights = np.ones(len_x)
    n_x, n_y = gridsize

    d_x = (xmax - xmin) / (n_x - 1)
    d_y = (ymax - ymin) / (n_y - 1)

    xyi = _stack(x, y).T
    xyi -= [xmin, ymin]
    xyi /= [d_x, d_y]
    xyi = np.floor(xyi, xyi).T

    scotts_factor = len_x**(-1 / 6)
    cov = _cov(xyi)
    std_devs = np.diag(cov**0.5)
    kern_nx, kern_ny = np.round(scotts_factor * 2 * np.pi * std_devs)

    inv_cov = np.linalg.inv(cov * scotts_factor**2)

    x_x = np.arange(kern_nx) - kern_nx / 2
    y_y = np.arange(kern_ny) - kern_ny / 2
    x_x, y_y = np.meshgrid(x_x, y_y)

    kernel = _stack(x_x.flatten(), y_y.flatten())
    kernel = _dot(inv_cov, kernel) * kernel
    kernel = np.exp(-kernel.sum(axis=0) / 2)
    kernel = kernel.reshape((int(kern_ny), int(kern_nx)))

    boundary = "wrap" if circular else "symm"

    grid = coo_matrix((weights, xyi), shape=(n_x, n_y)).toarray()
    grid = convolve2d(grid, kernel, mode="same", boundary=boundary)

    norm_factor = np.linalg.det(2 * np.pi * cov * scotts_factor**2)
    norm_factor = len_x * d_x * d_y * norm_factor**0.5

    grid /= norm_factor

    return grid, xmin, xmax, ymin, ymax
def main():
    render = False

    # Parameters
    total_repeats = 50


    for env_name in ['SawyerSlide', 'SawyerReach','SawyerPush' ]: #, 'SawyerReach', 'SawyerSlide'
        for use_white_noise in [ False]: # True
            if(not use_white_noise):
                parameter_predictions = []
                oracle_parameters = []

            if(env_name == 'SawyerReach'):
                state_dim = 6
                action_dim = 3
                horizon = 50
                task = 'reaching'
            elif(env_name == 'SawyerPush'):
                horizon = 80
                state_dim = 10
                action_dim = 2
                task = 'pushing'
            elif(env_name == 'SawyerSlide'):
                horizon = 60
                state_dim = 12
                action_dim = 2
                task = 'sliding'

            env = make_env(task,render)
            env.reset()

            osi_l = 5

            method = 'UPOSI'

            RANDOMISZED_ONLY = True
            DYNAMICS_ONLY = True
            CAT_INTERNAL = True
            if (task == 'sliding'):
                CAT_INTERNAL = False

            params = query_params(env, randomised_only=RANDOMISZED_ONLY, dynamics_only=DYNAMICS_ONLY)
            params_dim = params.shape[0]  # dimension of parameters for prediction
            if CAT_INTERNAL:
                internal_state_dim = env.get_internal_state_dimension()
                _, _, _, info = env.step(np.zeros(action_dim))
                internal_action_dim = np.array(info["joint_velocities"]).shape[0]
                osi_input_dim = osi_l * (state_dim + action_dim + internal_state_dim + internal_action_dim)
            else:
                osi_input_dim = osi_l * (state_dim + action_dim)

            state_dim += params_dim

            alg_name = 'uposi_td3'

            if(task == 'reaching'):
                GOALS = REACH_GOALS
            elif(task == 'pushing'):
                GOALS = PUSH_GOALS
            elif(task == 'sliding'):
                GOALS = SLIDE_GOALS

            for goal_idx, goal_pos in enumerate(GOALS):
                goal_idx = goal_idx+1
                ###### SETTING UP THE ENVIRONMENT ######
                if(task == 'reaching'):
                    env._set_goal_neutral_offset(*goal_pos)
                elif(task == 'pushing'):
                    start_pos = np.array([0., -16.75e-2])
                    env._set_gripper_neutral_offset(*start_pos)
                    env._set_goal_neutral_offset(*goal_pos)

                env.reset()

                if (render):
                    env.viewer.set_camera(camera_id=0)
                    env.render()

                base_pos_in_world = env.sim.data.get_body_xpos("base")
                base_rot_in_world = env.sim.data.get_body_xmat("base").reshape((3, 3))
                base_pose_in_world = T.make_pose(base_pos_in_world, base_rot_in_world)
                world_pose_in_base = T.pose_inv(base_pose_in_world)

                # choose which randomisation is applied
                randomisation_type = 'reach_full-randomisation'
                if (task == 'reaching'):
                    number_random_params = 14
                elif (task == 'pushing'):
                    number_random_params = 23
                elif (task == 'sliding'):
                    number_random_params = 22

                path = '../../../../sawyer/src/sim2real_dynamics_sawyer/assets/rl/'+method +'/' + alg_name + '/model/' + env_name + str(
                    number_random_params) + '_' + alg_name
                try:
                    policy = load(path=path, alg=alg_name, state_dim=state_dim,
                                  action_dim=action_dim)

                    if(not use_white_noise):
                        osi_model = load_model(model_name='osi', path=path, input_dim=osi_input_dim,
                                               output_dim=params_dim)
                except :
                    print(method,',',randomisation_type)

                    continue

                log_save_name = '{}_{}_{}_{}'.format(method, alg_name, randomisation_type, number_random_params)

                for repeat in range(total_repeats):
                    #Reset environment
                    obs = env.reset()

                    #Establish extra frame transforms
                    if(task != 'sliding'):
                        base_rot_in_eef = env.init_right_hand_orn.T

                    #Setup logger
                    if(use_white_noise):
                        noise_folder = 'noise'
                    else:
                        noise_folder = 'normal'

                    log_path = '../../../../data/uposi_ablation/{}/{}/{}/goal_{}/trajectory_log_{}.csv'.format(task, noise_folder,  log_save_name,goal_idx,repeat)

                    if (task == 'reaching'):
                        log_list = ["step", "time",
                                    "cmd_eef_vx", "cmd_eef_vy", "cmd_eef_vz",
                                    "eef_x", "eef_y", "eef_z",
                                    "eef_vx", "eef_vy", "eef_vz",
                                    "goal_x", "goal_y", "goal_z",
                                    "obs_0", "obs_1", "obs_2",
                                    "obs_3", "obs_4", "obs_5"
                                    ]

                    elif (task == 'pushing'):
                        log_list = ["step", "time",
                                    "cmd_eef_vx", "cmd_eef_vy",
                                    "goal_x", "goal_y", "goal_z",
                                    "eef_x", "eef_y", "eef_z",
                                    "eef_vx", "eef_vy", "eef_vz",
                                    "object_x", "object_y", "object_z",
                                    "object_vx", "object_vy", "object_vz",
                                    "z_angle",
                                    "obs_0", "obs_1", "obs_2",
                                    "obs_3", "obs_4", "obs_5",
                                    "obs_6", "obs_7", "obs_8", "obs_9"]
                    elif (task == 'sliding'):
                        log_list = ["step", "time",
                                    "cmd_j5", "cmd_j6",
                                    "obj_x", "obj_y", "obj_z",
                                    "sin_z", "cos_z",
                                    "obj_vx", "obj_vy", "obj_vz",
                                    "a_j5", "a_j6",
                                    "v_j5", "v_j6",
                                    ]

                    logger = Logger(log_list, log_path, verbatim=render)

                    i = 0
                    mujoco_start_time = env.sim.data.time

                    if(use_white_noise):
                        params = np.random.uniform(-1.,1.,size =(params_dim,))
                        params_state = np.concatenate((params, obs))
                    else:
                        epi_traj = []
                        params = query_params(env, randomised_only=RANDOMISZED_ONLY, dynamics_only=DYNAMICS_ONLY)
                        zero_osi_input = np.zeros(osi_input_dim)
                        pre_params = osi_model(zero_osi_input).detach().numpy()

                        oracle_parameters.append(params)
                        parameter_predictions.append(pre_params)

                        params_state = np.concatenate((pre_params, obs))


                    while (True):

                        mujoco_elapsed = env.sim.data.time - mujoco_start_time

                        #### CHOOSING THE ACTION #####

                        if CAT_INTERNAL:
                            internal_state = env.get_internal_state()
                            full_state = np.concatenate([obs, internal_state])
                        else:
                            full_state = obs

                        action = policy.get_action(params_state, noise_scale=0.0)

                        ##############################

                        try:
                            next_obs, reward, done, info = env.step(action)
                        except MujocoException():
                            print ('Mujoco exceptiop')

                        if (use_white_noise):
                            params = np.random.uniform(-1., 1., size = (params_dim,))
                            next_params_state = np.concatenate((params, next_obs))
                            params_state = next_params_state
                        else:
                            if CAT_INTERNAL:
                                target_joint_action = info["joint_velocities"]
                                full_action = np.concatenate([action, target_joint_action])
                            else:
                                full_action = action
                            epi_traj.append(np.concatenate((full_state, full_action)))

                            if len(epi_traj)>=osi_l:
                                osi_input = stack_data(epi_traj, osi_l)
                                pre_params = osi_model(osi_input).detach().numpy()
                            else:
                                zero_osi_input = np.zeros(osi_input_dim)
                                pre_params = osi_model(zero_osi_input).detach().numpy()

                            oracle_parameters.append(params)
                            parameter_predictions.append(pre_params)

                            next_params_state = np.concatenate((pre_params, next_obs))
                            params_state = next_params_state



                        if(task == 'reaching'):
                            # Grab logging data
                            eef_pos_in_base, eef_vel_in_base, goal_pos_in_base = grab_reach_data(info, world_pose_in_base)
                            action_in_base = base_rot_in_eef.dot(action)
                            logger.log(i, mujoco_elapsed,
                                       action_in_base[0], action_in_base[1], action_in_base[2],
                                       eef_pos_in_base[0], eef_pos_in_base[1], eef_pos_in_base[2],
                                       eef_vel_in_base[0], eef_vel_in_base[1], eef_vel_in_base[2],
                                       goal_pos_in_base[0], goal_pos_in_base[1], goal_pos_in_base[2],
                                       obs[0], obs[1], obs[2],
                                       obs[3], obs[4], obs[5],
                                       )
                        elif (task == 'pushing'):
                            goal_pos_in_base, eef_pos_in_base, eef_vel_in_base, \
                            object_pos_in_base, object_vel_in_base, z_angle, = grab_push_data(info, world_pose_in_base)

                            action_3d = np.concatenate([action, [0.0]])
                            action_3d_in_base = base_rot_in_eef.dot(action_3d)

                            logger.log(i, mujoco_elapsed,
                                       action_3d_in_base[0], action_3d_in_base[1],
                                       goal_pos_in_base[0], goal_pos_in_base[1], goal_pos_in_base[2],
                                       eef_pos_in_base[0], eef_pos_in_base[1], eef_pos_in_base[2],
                                       eef_vel_in_base[0], eef_vel_in_base[1], eef_vel_in_base[2],
                                       object_pos_in_base[0], object_pos_in_base[1], object_pos_in_base[2],
                                       object_vel_in_base[0], object_vel_in_base[1], object_vel_in_base[2],
                                       z_angle[0],
                                       obs[0], obs[1], obs[2],
                                       obs[3], obs[4], obs[5],
                                       obs[6], obs[7], obs[8],
                                       obs[9],
                                       )
                        elif (task == 'sliding'):
                            logger.log(i, mujoco_elapsed,
                                       action[0], action[1],
                                       obs[0], obs[1], obs[2],
                                       obs[3], obs[4], obs[5], obs[6],
                                       obs[7], obs[8], obs[9],
                                       obs[10], obs[11],
                                       )

                        obs = next_obs

                        if(render):
                            env.render()

                        i += 1
                        if (i >= horizon):
                            break


            if(not use_white_noise):
                parameter_predictions = np.array(parameter_predictions)
                oracle_parameters = np.array(oracle_parameters)
                percent_diff = np.abs((parameter_predictions-oracle_parameters)/2.)*100

                average_percent_diff = np.nanmean(percent_diff[np.isfinite(percent_diff)])
                print('{} percent diff :{}'.format(task,average_percent_diff))
                save_path = '../../../../data/uposi_ablation/{}/{}/'.format(task,noise_folder)
                if(os.path.exists(save_path)):
                    file = open(os.path.join(save_path, 'percent_diff.txt'), 'w')
                else:
                    file = open(os.path.join(save_path, 'percent_diff.txt'), 'a')
                file.write('{} percent diff :{}'.format(task,average_percent_diff))
                file.close()

    env.close()
                                      datasource["fn_pattern"], datasource["fn_ext"],
                                      datasource["timestep"], num_prev_files=9)

        R,_,metadata = io.readers.read_timeseries(fns, importer,
                                                  **datasource["importer_kwargs"])
        if domain == "fmi":
            R, metadata = conversion.to_rainrate(R, metadata, a=223.0, b=1.53)
        if upscale_factor > 1:
            R_ = []
            for i in range(R.shape[0]):
                R_.append(upscale_precip_field(R[i, :, :], upscale_factor))
            R = np.stack(R_)

        missing_data = False
        for i in range(R.shape[0]):
            if not np.any(np.isfinite(R[i, :, :])):
                print("Skipping, no finite values found for time step %d" % (i+1))
                missing_data = True
                break

        if missing_data:
            curdate += timedelta(minutes=timestep)
            continue

        R[~np.isfinite(R)] = metadata["zerovalue"]
        R = transformation.dB_transform(R, metadata=metadata)[0]

        obs_fns = io.archive.find_by_date(curdate, root_path, datasource["path_fmt"],
                                          datasource["fn_pattern"], datasource["fn_ext"],
                                          datasource["timestep"],
                                          num_next_files=num_timesteps)
Ejemplo n.º 43
0
    def process(args, request):
        origin, timedelta, bands, value, src_projection = args
        if origin is None or timedelta is None or bands is None:
            return
        td_seconds = timedelta.total_seconds()
        lo = origin
        start = request.get("start", None)
        stop = request.get("stop", None)

        if start is None:
            # take the latest
            bands_lo = bands - 1
            bands_hi = bands
        elif stop is None:
            # take the nearest to start
            start_band = (request["start"] - lo).total_seconds() / td_seconds
            bands_lo = min(max(int(round(start_band)), 0), bands - 1)
            bands_hi = bands_lo + 1
        else:
            bands_lo = (request["start"] - lo).total_seconds() / td_seconds
            bands_hi = (request["stop"] - lo).total_seconds() / td_seconds
            bands_lo = max(int(math.ceil(bands_lo)), 0)
            bands_hi = min(int(math.floor(bands_hi)) + 1, bands)

        depth = bands_hi - bands_lo

        if depth <= 0:
            return

        if request["mode"] == "time":
            return {"time": [origin + i * timedelta for i in range(bands_lo, bands_hi)]}
        if request["mode"] == "meta":
            return {
                "meta": [
                    "Testmeta for band {}".format(i) for i in range(bands_lo, bands_hi)
                ]
            }
        if request["mode"] != "vals":
            raise ValueError('Invalid mode "{}"'.format(request["mode"]))

        height = request.get("height", 1)
        width = request.get("width", 1)
        shape = (depth, height, width)

        # simple mode: return a filled value with type uint8
        if not hasattr(value, "shape"):
            fillvalue = 255
            result = np.full(shape, value, dtype=np.uint8)
            return {"values": result, "no_data_value": fillvalue}

        # there is an actual data array
        fillvalue = get_dtype_max(value.dtype)
        bbox = request.get("bbox", (0, 0, width, height))
        projection = request.get("projection", "EPSG:3857")
        if projection != src_projection:
            extent = Extent(bbox, get_sr(projection))
            bbox = extent.transformed(get_sr(src_projection)).bbox
        x1, y1, x2, y2 = [int(round(x)) for x in bbox]

        if x1 == x2 or y1 == y2:  # point request
            if x1 < 0 or x1 >= value.shape[1] or y1 < 0 or y1 >= value.shape[0]:
                result = np.array([[255]], dtype=np.uint8)
            else:
                result = value[y1 : y1 + 1, x1 : x1 + 1]
        else:
            _x1 = max(x1, 0)
            _y1 = max(y1, 0)
            _x2 = min(x2, value.shape[1])
            _y2 = min(y2, value.shape[0])
            result = value[_y1:_y2, _x1:_x2]
            result = np.pad(
                result,
                ((_y1 - y1, y2 - _y2), (_x1 - x1, x2 - _x2)),
                mode=str("constant"),
                constant_values=fillvalue,
            )
            if result.shape != (height, width):
                zoom = (height / result.shape[0], width / result.shape[1])
                mask = ndimage.zoom((result == fillvalue).astype(np.float), zoom) > 0.5
                result[result == fillvalue] = 0
                result = ndimage.zoom(result, zoom)
                result[mask] = fillvalue
        result = np.repeat(result[np.newaxis], depth, axis=0)

        # fill nan values
        result[~np.isfinite(result)] = fillvalue
        return {"values": result, "no_data_value": fillvalue}
Ejemplo n.º 44
0
# end=datetime.datetime.utcnow() #noaa.time[-1]
# hp.plot_insitu(noaa, start, end,'NOAA_RTSW','/home/cmoestl/pycode/heliocats')

####################################### OMNI2
fileomni = "omni_1963_now.p"
if get_new_data: hd.save_omni_data(data_path, fileomni)
[o, ho] = pickle.load(open(data_path + fileomni, "rb"))

start = datetime.datetime.utcnow() - datetime.timedelta(days=365)
end = datetime.datetime.utcnow()
hp.plot_insitu_update(o, start, end, 'OMNI2', plot_path, now=True)

########################## add NOAA RTSW to OMNI data and make combined plot

#get index of last OMNI data
last_omni_index = np.where(np.isfinite(o.bt) == True)[0][-1]
#get time for this index
last_omni_time = o.time[last_omni_index]
#add utc timezone awareness
last_omni_time_utc = last_omni_time.astimezone(tz=datetime.timezone.utc)
#get index where omni ends in noaa data
noaa_omni_end = np.where(noaa.time > last_omni_time_utc)[0][0]

#length of NOAA data
size_noaa = np.size(noaa) - noaa_omni_end

combi_omni_noaa=np.zeros(last_omni_index+size_noaa,dtype=[('time',object),('bx', float),('by', float),\
                ('bz', float),('bt', float),('vt', float),('np', float),('tp', float),\
                ('x', float),('y', float),('z', float),\
                ('r', float),('lat', float),('lon', float)])
        f=Dataset(k,'r')
        lv_HYBL1_a, lv_HYBL1_b=f.variables['lv_HYBL1_a'][:],f.variables['lv_HYBL1_b'][:]
        Ttemp,qtemp=f.variables['T'][:],f.variables['q'][:]
        Ttemp=np.swapaxes(Ttemp,0,1)#*mask_land 
        qtemp=np.swapaxes(qtemp,0,1)#*mask_land 
        f.close()

        f=Dataset(m,'r')
        surfp=f.variables['pres'][:]
        f.close()

        ### Slice oceans and preserve vertical structure information ###
        
        prc_temp=prc[i1:i2,...]*mask_land
        
        mask_ind=np.where(np.isfinite(prc_temp))
        prc_slice=prc[mask_ind[0],mask_ind[1],mask_ind[2]]
        T_slice=Ttemp[:,mask_ind[0],mask_ind[1],mask_ind[2]]
        q_slice=qtemp[:,mask_ind[0],mask_ind[1],mask_ind[2]]
        surfp_slice=surfp[mask_ind]
        
        print 'SAVING FILE'
        fout='/glade/p/univ/p35681102/fiaz/erai_data/cwv_that_erai_trmm_ocn/daily_files/'
        filo=fout+'Tqvert_ocns_'+str(k[-13:-3])+'.nc'

        try:ncfile.close()
        except:pass
    
        ncfile = Dataset(filo, mode='w', format='NETCDF4')
    
        ncfile.createDimension('lev',lv_HYBL1_a.size)
Ejemplo n.º 46
0
def mstamp(seq, sub_len, return_dimension=False):
    """ multidimensional matrix profile with mSTAMP (stamp based)

    Parameters
    ----------
    seq : numpy matrix, shape (n_dim, seq_len)
        input sequence
    sub_len : int
        subsequence length
    return_dimension : bool
        if True, also return the matrix profile dimension. It takses O(d^2 n)
        to store and O(d^2 n^2) to compute. (default is False)

    Returns
    -------
    matrix_profile : numpy matrix, shape (n_dim, sub_num)
        matrix profile
    profile_index : numpy matrix, shape (n_dim, sub_num)
        matrix profile index
    profile_dimension : list, optional, shape (n_dim)
        matrix profile dimension, this is only returned when return_dimension
        is True

    Notes
    -----
    C.-C. M. Yeh, N. Kavantzas, and E. Keogh, "Matrix Profile VI: Meaningful
    Multidimensional Motif Discovery," IEEE ICDM 2017.
    https://sites.google.com/view/mstamp/
    http://www.cs.ucr.edu/~eamonn/MatrixProfile.html
    """
    if sub_len < 4:
        raise RuntimeError('Subsequence length (sub_len) must be at least 4')
    exc_zone = sub_len // 2
    seq = np.array(seq, dtype=float, copy=True)

    if seq.ndim == 1:
        seq = np.expand_dims(seq, axis=0)

    seq_len = seq.shape[1]
    sub_num = seq.shape[1] - sub_len + 1
    n_dim = seq.shape[0]
    skip_loc = np.zeros(sub_num, dtype=bool)
    for i in range(sub_num):
        if not np.all(np.isfinite(seq[:, i:i + sub_len])):
            skip_loc[i] = True
    seq[~np.isfinite(seq)] = 0

    matrix_profile = np.empty((n_dim, sub_num))
    matrix_profile[:] = np.inf
    profile_index = -np.ones((n_dim, sub_num), dtype=int)
    seq_freq = np.empty((n_dim, seq_len * 2), dtype=np.complex128)
    seq_mu = np.empty((n_dim, sub_num))
    seq_sig = np.empty((n_dim, sub_num))
    if return_dimension:
        profile_dimension = []
        for i in range(n_dim):
            profile_dimension.append(np.empty((i + 1, sub_num), dtype=int))
    for i in range(n_dim):
        seq_freq[i, :], seq_mu[i, :], seq_sig[i, :] = \
            _mass_pre(seq[i, :], sub_len)

    dist_profile = np.empty((n_dim, sub_num))
    que_sig = np.empty(n_dim)
    tic = time.time()
    for i in range(sub_num):
        cur_prog = (i + 1) / sub_num
        time_left = ((time.time() - tic) / (i + 1)) * (sub_num - i - 1)
        print('\rProgress [{0:<50s}] {1:5.1f}% {2:8.1f} sec'.format(
            '#' * int(cur_prog * 50), cur_prog * 100, time_left),
              end="")
        for j in range(n_dim):
            que = seq[j, i:i + sub_len]
            dist_profile[j, :], que_sig[j] = _mass(seq_freq[j, :], que,
                                                   seq_len, sub_len,
                                                   seq_mu[j, :], seq_sig[j, :])

        if skip_loc[i] or np.any(que_sig < _EPS):
            continue

        exc_zone_st = max(0, i - exc_zone)
        exc_zone_ed = min(sub_num, i + exc_zone)
        dist_profile[:, exc_zone_st:exc_zone_ed] = np.inf
        dist_profile[:, skip_loc] = np.inf
        dist_profile[seq_sig < _EPS] = np.inf
        dist_profile = np.sqrt(dist_profile)

        dist_profile_dim = np.argsort(dist_profile, axis=0)
        dist_profile_sort = np.sort(dist_profile, axis=0)
        dist_profile_cumsum = np.zeros(sub_num)
        for j in range(n_dim):
            dist_profile_cumsum += dist_profile_sort[j, :]
            dist_profile_mean = dist_profile_cumsum / (j + 1)
            update_pos = dist_profile_mean < matrix_profile[j, :]
            profile_index[j, update_pos] = i
            matrix_profile[j, update_pos] = dist_profile_mean[update_pos]
            if return_dimension:
                profile_dimension[j][:, update_pos] = \
                    dist_profile_dim[:j + 1, update_pos]

    # matrix_profile = np.sqrt(matrix_profile)
    if return_dimension:
        return matrix_profile, profile_index, profile_dimension
    else:
        return matrix_profile, profile_index,
Ejemplo n.º 47
0
def _assert_quats(actual,
                  desired,
                  dist_tol=0.003,
                  angle_tol=5.,
                  err_rtol=0.5,
                  gof_rtol=0.001,
                  vel_atol=2e-3):  # 2 mm/s
    """Compare estimated cHPI positions."""
    __tracebackhide__ = True
    trans_est, rot_est, t_est = head_pos_to_trans_rot_t(actual)
    trans, rot, t = head_pos_to_trans_rot_t(desired)
    quats_est = rot_to_quat(rot_est)
    gofs, errs, vels = desired[:, 7:].T
    gofs_est, errs_est, vels_est = actual[:, 7:].T
    del actual, desired

    # maxfilter produces some times that are implausibly large (weird)
    if not np.isclose(t[0], t_est[0], atol=1e-1):  # within 100 ms
        raise AssertionError('Start times not within 100 ms: %0.3f != %0.3f' %
                             (t[0], t_est[0]))
    use_mask = (t >= t_est[0]) & (t <= t_est[-1])
    t = t[use_mask]
    trans = trans[use_mask]
    quats = rot_to_quat(rot)
    quats = quats[use_mask]
    gofs, errs, vels = gofs[use_mask], errs[use_mask], vels[use_mask]

    # double-check our angle function
    for q in (quats, quats_est):
        angles = _angle_between_quats(q, q)
        assert_allclose(angles, 0., atol=1e-5)

    # limit translation difference between MF and our estimation
    trans_est_interp = interp1d(t_est, trans_est, axis=0)(t)
    distances = np.sqrt(np.sum((trans - trans_est_interp)**2, axis=1))
    assert np.isfinite(distances).all()
    arg_worst = np.argmax(distances)
    assert distances[arg_worst] <= dist_tol, (
        '@ %0.3f seconds: %0.3f > %0.3f mm' %
        (t[arg_worst], 1000 * distances[arg_worst], 1000 * dist_tol))

    # limit rotation difference between MF and our estimation
    # (note that the interpolation will make this slightly worse)
    quats_est_interp = interp1d(t_est, quats_est, axis=0)(t)
    angles = 180 * _angle_between_quats(quats_est_interp, quats) / np.pi
    arg_worst = np.argmax(angles)
    assert angles[arg_worst] <= angle_tol, (
        '@ %0.3f seconds: %0.3f > %0.3f deg' %
        (t[arg_worst], angles[arg_worst], angle_tol))

    # error calculation difference
    errs_est_interp = interp1d(t_est, errs_est)(t)
    assert_allclose(errs_est_interp,
                    errs,
                    rtol=err_rtol,
                    atol=1e-3,
                    err_msg='err')  # 1 mm

    # gof calculation difference
    gof_est_interp = interp1d(t_est, gofs_est)(t)
    assert_allclose(gof_est_interp,
                    gofs,
                    rtol=gof_rtol,
                    atol=1e-7,
                    err_msg='gof')

    # velocity calculation difference
    vel_est_interp = interp1d(t_est, vels_est)(t)
    assert_allclose(vel_est_interp, vels, atol=vel_atol, err_msg='velocity')
Ejemplo n.º 48
0
def isfinite(_array):
    """are any of the values finite?"""
    return _array is not None and np.any(np.isfinite(_array))
Ejemplo n.º 49
0
def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False):
    """Compute the log of the sum of exponentials of input elements.

    Parameters
    ----------
    a : array_like
        Input array.
    axis : None or int or tuple of ints, optional
        Axis or axes over which the sum is taken. By default `axis` is None,
        and all elements are summed.

        .. versionadded:: 0.11.0
    keepdims : bool, optional
        If this is set to True, the axes which are reduced are left in the
        result as dimensions with size one. With this option, the result
        will broadcast correctly against the original array.

        .. versionadded:: 0.15.0
    b : array-like, optional
        Scaling factor for exp(`a`) must be of the same shape as `a` or
        broadcastable to `a`. These values may be negative in order to
        implement subtraction.

        .. versionadded:: 0.12.0
    return_sign : bool, optional
        If this is set to True, the result will be a pair containing sign
        information; if False, results that are negative will be returned
        as NaN. Default is False (no sign information).

        .. versionadded:: 0.16.0
    Returns
    -------
    res : ndarray
        The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically
        more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))``
        is returned.
    sgn : ndarray
        If return_sign is True, this will be an array of floating-point
        numbers matching res and +1, 0, or -1 depending on the sign
        of the result. If False, only one result is returned.

    See Also
    --------
    numpy.logaddexp, numpy.logaddexp2

    Notes
    -----
    Numpy has a logaddexp function which is very similar to `logsumexp`, but
    only handles two arguments. `logaddexp.reduce` is similar to this
    function, but may be less stable.

    Examples
    --------
    >>> from scipy.misc import logsumexp
    >>> a = np.arange(10)
    >>> np.log(np.sum(np.exp(a)))
    9.4586297444267107
    >>> logsumexp(a)
    9.4586297444267107

    With weights

    >>> a = np.arange(10)
    >>> b = np.arange(10, 0, -1)
    >>> logsumexp(a, b=b)
    9.9170178533034665
    >>> np.log(np.sum(b*np.exp(a)))
    9.9170178533034647

    Returning a sign flag

    >>> logsumexp([1,2],b=[1,-1],return_sign=True)
    (1.5413248546129181, -1.0)

    Notice that `logsumexp` does not directly support masked arrays. To use it
    on a masked array, convert the mask into zero weights:

    >>> a = np.ma.array([np.log(2), 2, np.log(3)],
    ...                  mask=[False, True, False])
    >>> b = (~a.mask).astype(int)
    >>> logsumexp(a.data, b=b), np.log(5)
    1.6094379124341005, 1.6094379124341005

    """
    a = _asarray_validated(a, check_finite=False)
    if b is not None:
        a, b = broadcast_arrays(a, b)
        if np.any(b == 0):
            a = a + 0.  # promote to at least float
            a[b == 0] = -np.inf

    a_max = amax(a, axis=axis, keepdims=True)

    if a_max.ndim > 0:
        a_max[~isfinite(a_max)] = 0
    elif not isfinite(a_max):
        a_max = 0

    if b is not None:
        b = asarray(b)
        tmp = b * exp(a - a_max)
    else:
        tmp = exp(a - a_max)

    # suppress warnings about log of zero
    with np.errstate(divide='ignore'):
        s = np.sum(tmp, axis=axis, keepdims=keepdims)
        if return_sign:
            sgn = sign(s)
            s *= sgn  # /= makes more sense but we need zero -> zero
        out = log(s)

    if not keepdims:
        a_max = squeeze(a_max, axis=axis)
    out += a_max

    if return_sign:
        return out, sgn
    else:
        return out
Ejemplo n.º 50
0
def unwise_forcedphot(cat,
                      tiles,
                      band=1,
                      roiradecbox=None,
                      use_ceres=True,
                      ceres_block=8,
                      save_fits=False,
                      get_models=False,
                      ps=None,
                      psf_broadening=None,
                      pixelized_psf=False,
                      get_masks=None,
                      move_crpix=False,
                      modelsky_dir=None):
    '''
    Given a list of tractor sources *cat*
    and a list of unWISE tiles *tiles* (a fits_table with RA,Dec,coadd_id)
    runs forced photometry, returning a FITS table the same length as *cat*.

    *get_masks*: the WCS to resample mask bits into.
    '''
    from tractor import PointSource, Tractor, ExpGalaxy, DevGalaxy
    from tractor.sersic import SersicGalaxy

    if not pixelized_psf and psf_broadening is None:
        # PSF broadening in post-reactivation data, by band.
        # Newer version from Aaron's email to decam-chatter, 2018-06-14.
        broadening = {1: 1.0405, 2: 1.0346, 3: None, 4: None}
        psf_broadening = broadening[band]

    if False:
        from astrometry.util.plotutils import PlotSequence
        ps = PlotSequence('wise-forced-w%i' % band)
    plots = (ps is not None)
    if plots:
        import pylab as plt

    wantims = (plots or save_fits or get_models)
    wanyband = 'w'
    if get_models:
        models = []

    wband = 'w%i' % band

    Nsrcs = len(cat)
    phot = fits_table()
    # Filled in based on unique tile overlap
    phot.wise_coadd_id = np.array(['        '] * Nsrcs, dtype='U8')
    phot.wise_x = np.zeros(Nsrcs, np.float32)
    phot.wise_y = np.zeros(Nsrcs, np.float32)
    phot.set('psfdepth_%s' % wband, np.zeros(Nsrcs, np.float32))
    nexp = np.zeros(Nsrcs, np.int16)
    mjd = np.zeros(Nsrcs, np.float64)
    central_flux = np.zeros(Nsrcs, np.float32)

    ra = np.array([src.getPosition().ra for src in cat])
    dec = np.array([src.getPosition().dec for src in cat])

    fskeys = ['prochi2', 'profracflux']
    fitstats = {}

    if get_masks:
        mh, mw = get_masks.shape
        maskmap = np.zeros((mh, mw), np.uint32)

    tims = []
    for tile in tiles:
        info('Reading WISE tile', tile.coadd_id, 'band', band)
        tim = get_unwise_tractor_image(tile.unwise_dir,
                                       tile.coadd_id,
                                       band,
                                       bandname=wanyband,
                                       roiradecbox=roiradecbox)
        if tim is None:
            debug('Actually, no overlap with WISE coadd tile', tile.coadd_id)
            continue

        if plots:
            sig1 = tim.sig1
            plt.clf()
            plt.imshow(tim.getImage(),
                       interpolation='nearest',
                       origin='lower',
                       cmap='gray',
                       vmin=-3 * sig1,
                       vmax=10 * sig1)
            plt.colorbar()
            tag = '%s W%i' % (tile.coadd_id, band)
            plt.title('%s: tim data' % tag)
            ps.savefig()
            plt.clf()
            plt.hist((tim.getImage() * tim.inverr)[tim.inverr > 0].ravel(),
                     range=(-5, 10),
                     bins=100)
            plt.xlabel('Per-pixel intensity (Sigma)')
            plt.title(tag)
            ps.savefig()

        if move_crpix and band in [1, 2]:
            realwcs = tim.wcs.wcs
            x, y = realwcs.crpix
            tile_crpix = tile.get('crpix_w%i' % band)
            dx = tile_crpix[0] - 1024.5
            dy = tile_crpix[1] - 1024.5
            realwcs.set_crpix(x + dx, y + dy)
            debug('unWISE', tile.coadd_id, 'band', band, 'CRPIX', x, y,
                  'shift by', dx, dy, 'to', realwcs.crpix)

        if modelsky_dir and band in [1, 2]:
            fn = os.path.join(modelsky_dir,
                              '%s.%i.mod.fits' % (tile.coadd_id, band))
            if not os.path.exists(fn):
                raise RuntimeError('WARNING: does not exist:', fn)
            x0, x1, y0, y1 = tim.roi
            bg = fitsio.FITS(fn)[2][y0:y1, x0:x1]
            assert (bg.shape == tim.shape)

            if plots:
                plt.clf()
                plt.subplot(1, 2, 1)
                plt.imshow(tim.getImage(),
                           interpolation='nearest',
                           origin='lower',
                           cmap='gray',
                           vmin=-3 * sig1,
                           vmax=5 * sig1)
                plt.subplot(1, 2, 2)
                plt.imshow(bg,
                           interpolation='nearest',
                           origin='lower',
                           cmap='gray',
                           vmin=-3 * sig1,
                           vmax=5 * sig1)
                tag = '%s W%i' % (tile.coadd_id, band)
                plt.suptitle(tag)
                ps.savefig()
                plt.clf()
                ha = dict(range=(-5, 10), bins=100, histtype='step')
                plt.hist((tim.getImage() * tim.inverr)[tim.inverr > 0].ravel(),
                         color='b',
                         label='Original',
                         **ha)
                plt.hist(((tim.getImage() - bg) *
                          tim.inverr)[tim.inverr > 0].ravel(),
                         color='g',
                         label='Minus Background',
                         **ha)
                plt.axvline(0, color='k', alpha=0.5)
                plt.xlabel('Per-pixel intensity (Sigma)')
                plt.legend()
                plt.title(tag + ': background')
                ps.savefig()

            # Actually subtract the background!
            tim.data -= bg

        # Floor the per-pixel variances,
        # and add Poisson contribution from sources
        if band in [1, 2]:
            # in Vega nanomaggies per pixel
            floor_sigma = {1: 0.5, 2: 2.0}
            poissons = {1: 0.15, 2: 0.3}
            with np.errstate(divide='ignore'):
                new_ie = 1. / np.sqrt(
                    (1. / tim.inverr)**2 + floor_sigma[band] +
                    poissons[band]**2 * np.maximum(0., tim.data))
            new_ie[tim.inverr == 0] = 0.

            if plots:
                plt.clf()
                plt.plot((1. / tim.inverr[tim.inverr > 0]).ravel(),
                         (1. / new_ie[tim.inverr > 0]).ravel(), 'b.')
                plt.title('unWISE per-pixel error: %s band %i' %
                          (tile.coadd_id, band))
                plt.xlabel('original')
                plt.ylabel('floored')
                ps.savefig()

            assert (np.all(np.isfinite(new_ie)))
            assert (np.all(new_ie >= 0.))
            tim.inverr = new_ie

            # Expand a 3-pixel radius around weight=0 (saturated) pixels
            # from Eddie via crowdsource
            # https://github.com/schlafly/crowdsource/blob/7069da3e7d9d3124be1cbbe1d21ffeb63fc36dcc/python/wise_proc.py#L74
            ## FIXME -- W3/W4 ??
            satlimit = 85000
            msat = ((tim.data > satlimit) | ((tim.nims == 0) &
                                             (tim.nuims > 1)))
            from scipy.ndimage.morphology import binary_dilation
            xx, yy = np.mgrid[-3:3 + 1, -3:3 + 1]
            dilate = xx**2 + yy**2 <= 3**2
            msat = binary_dilation(msat, dilate)
            nbefore = np.sum(tim.inverr == 0)
            tim.inverr[msat] = 0
            nafter = np.sum(tim.inverr == 0)
            debug('Masking an additional', (nafter - nbefore),
                  'near-saturated pixels in unWISE', tile.coadd_id, 'band',
                  band)

        # Read mask file?
        if get_masks:
            from astrometry.util.resample import resample_with_wcs, OverlapError
            # unwise_dir can be a colon-separated list of paths
            tilemask = None
            for d in tile.unwise_dir.split(':'):
                fn = os.path.join(d, tile.coadd_id[:3], tile.coadd_id,
                                  'unwise-%s-msk.fits.gz' % tile.coadd_id)
                if os.path.exists(fn):
                    debug('Reading unWISE mask file', fn)
                    x0, x1, y0, y1 = tim.roi
                    tilemask = fitsio.FITS(fn)[0][y0:y1, x0:x1]
                    break
            if tilemask is None:
                info('unWISE mask file for tile', tile.coadd_id,
                     'does not exist')
            else:
                try:
                    tanwcs = tim.wcs.wcs
                    assert (tanwcs.shape == tilemask.shape)
                    Yo, Xo, Yi, Xi, _ = resample_with_wcs(get_masks,
                                                          tanwcs,
                                                          intType=np.int16)
                    # Only deal with mask pixels that are set.
                    I, = np.nonzero(tilemask[Yi, Xi] > 0)
                    # Trim to unique area for this tile
                    rr, dd = get_masks.pixelxy2radec(Xo[I] + 1, Yo[I] + 1)
                    good = radec_in_unique_area(rr, dd, tile.ra1, tile.ra2,
                                                tile.dec1, tile.dec2)
                    I = I[good]
                    maskmap[Yo[I], Xo[I]] = tilemask[Yi[I], Xi[I]]
                except OverlapError:
                    # Shouldn't happen by this point
                    print('Warning: no overlap between WISE tile',
                          tile.coadd_id, 'and brick')

            if plots:
                plt.clf()
                plt.imshow(tilemask, interpolation='nearest', origin='lower')
                plt.title('Tile %s: mask' % tile.coadd_id)
                ps.savefig()
                plt.clf()
                plt.imshow(maskmap, interpolation='nearest', origin='lower')
                plt.title('Tile %s: accumulated maskmap' % tile.coadd_id)
                ps.savefig()

        # The tiles have some overlap, so zero out pixels outside the
        # tile's unique area.
        th, tw = tim.shape
        xx, yy = np.meshgrid(np.arange(tw), np.arange(th))
        rr, dd = tim.wcs.wcs.pixelxy2radec(xx + 1, yy + 1)
        unique = radec_in_unique_area(rr, dd, tile.ra1, tile.ra2, tile.dec1,
                                      tile.dec2)
        debug('Tile', tile.coadd_id, '- total of', np.sum(unique),
              'unique pixels out of', len(unique.flat), 'total pixels')
        if get_models:
            # Save the inverr before blanking out non-unique pixels, for making coadds with no gaps!
            # (actually, slightly more subtly, expand unique area by 1 pixel)
            from scipy.ndimage.morphology import binary_dilation
            du = binary_dilation(unique)
            tim.coadd_inverr = tim.inverr * du
        tim.inverr[unique == False] = 0.
        del xx, yy, rr, dd, unique

        if plots:
            sig1 = tim.sig1
            plt.clf()
            plt.imshow(tim.getImage() * (tim.inverr > 0),
                       interpolation='nearest',
                       origin='lower',
                       cmap='gray',
                       vmin=-3 * sig1,
                       vmax=10 * sig1)
            plt.colorbar()
            tag = '%s W%i' % (tile.coadd_id, band)
            plt.title('%s: tim data (unique)' % tag)
            ps.savefig()

        if pixelized_psf:
            from unwise_psf import unwise_psf
            if (band == 1) or (band == 2):
                # we only have updated PSFs for W1 and W2
                psfimg = unwise_psf.get_unwise_psf(band,
                                                   tile.coadd_id,
                                                   modelname='neo6_unwisecat')
            else:
                psfimg = unwise_psf.get_unwise_psf(band, tile.coadd_id)

            if band == 4:
                # oversample (the unwise_psf models are at native W4 5.5"/pix,
                # while the unWISE coadds are made at 2.75"/pix.
                ph, pw = psfimg.shape
                subpsf = np.zeros((ph * 2 - 1, pw * 2 - 1), np.float32)
                from astrometry.util.util import lanczos3_interpolate
                xx, yy = np.meshgrid(
                    np.arange(0., pw - 0.51, 0.5, dtype=np.float32),
                    np.arange(0., ph - 0.51, 0.5, dtype=np.float32))
                xx = xx.ravel()
                yy = yy.ravel()
                ix = xx.astype(np.int32)
                iy = yy.astype(np.int32)
                dx = (xx - ix).astype(np.float32)
                dy = (yy - iy).astype(np.float32)
                psfimg = psfimg.astype(np.float32)
                rtn = lanczos3_interpolate(ix, iy, dx, dy, [subpsf.flat],
                                           [psfimg])

                if plots:
                    plt.clf()
                    plt.imshow(psfimg, interpolation='nearest', origin='lower')
                    plt.title('Original PSF model')
                    ps.savefig()
                    plt.clf()
                    plt.imshow(subpsf, interpolation='nearest', origin='lower')
                    plt.title('Subsampled PSF model')
                    ps.savefig()

                psfimg = subpsf
                del xx, yy, ix, iy, dx, dy

            from tractor.psf import PixelizedPSF
            psfimg /= psfimg.sum()
            fluxrescales = {1: 1.04, 2: 1.005, 3: 1.0, 4: 1.0}
            psfimg *= fluxrescales[band]
            tim.psf = PixelizedPSF(psfimg)

        if psf_broadening is not None and not pixelized_psf:
            # psf_broadening is a factor by which the PSF FWHMs
            # should be scaled; the PSF is a little wider
            # post-reactivation.
            psf = tim.getPsf()
            from tractor import GaussianMixturePSF
            if isinstance(psf, GaussianMixturePSF):
                debug('Broadening PSF: from', psf)
                p0 = psf.getParams()
                pnames = psf.getParamNames()
                p1 = [
                    p * psf_broadening**2 if 'var' in name else p
                    for (p, name) in zip(p0, pnames)
                ]
                psf.setParams(p1)
                debug('Broadened PSF:', psf)
            else:
                print(
                    'WARNING: cannot apply psf_broadening to WISE PSF of type',
                    type(psf))

        wcs = tim.wcs.wcs
        _, fx, fy = wcs.radec2pixelxy(ra, dec)
        x = np.round(fx - 1.).astype(int)
        y = np.round(fy - 1.).astype(int)
        good = (x >= 0) * (x < tw) * (y >= 0) * (y < th)
        # Which sources are in this brick's unique area?
        usrc = radec_in_unique_area(ra, dec, tile.ra1, tile.ra2, tile.dec1,
                                    tile.dec2)
        I, = np.nonzero(good * usrc)

        nexp[I] = tim.nuims[y[I], x[I]]
        if hasattr(tim, 'mjdmin') and hasattr(tim, 'mjdmax'):
            mjd[I] = (tim.mjdmin + tim.mjdmax) / 2.
        phot.wise_coadd_id[I] = tile.coadd_id
        phot.wise_x[I] = fx[I] - 1.
        phot.wise_y[I] = fy[I] - 1.

        central_flux[I] = tim.getImage()[y[I], x[I]]
        del x, y, good, usrc

        # PSF norm for depth
        psf = tim.getPsf()
        h, w = tim.shape
        patch = psf.getPointSourcePatch(h // 2, w // 2).patch
        psfnorm = np.sqrt(np.sum(patch**2))
        # To handle zero-depth, we return 1/nanomaggies^2 units rather than mags.
        # In the small empty patches of the sky (eg W4 in 0922p702), we get sig1 = NaN
        if np.isfinite(tim.sig1):
            phot.get('psfdepth_%s' % wband)[I] = 1. / (tim.sig1 / psfnorm)**2

        tim.tile = tile
        tims.append(tim)

    if plots:
        plt.clf()
        mn, mx = 0.1, 20000
        plt.hist(np.log10(np.clip(central_flux, mn, mx)),
                 bins=100,
                 range=(np.log10(mn), np.log10(mx)))
        logt = np.arange(0, 5)
        plt.xticks(logt, ['%i' % i for i in 10.**logt])
        plt.title('Central fluxes (W%i)' % band)
        plt.axvline(np.log10(20000), color='k')
        plt.axvline(np.log10(1000), color='k')
        ps.savefig()

    # Eddie's non-secret recipe:
    #- central pixel <= 1000: 19x19 pix box size
    #- central pixel in 1000 - 20000: 59x59 box size
    #- central pixel > 20000 or saturated: 149x149 box size
    #- object near "bright star": 299x299 box size
    nbig = nmedium = nsmall = 0
    for src, cflux in zip(cat, central_flux):
        if cflux > 20000:
            R = 100
            nbig += 1
        elif cflux > 1000:
            R = 30
            nmedium += 1
        else:
            R = 15
            nsmall += 1
        if isinstance(src, PointSource):
            src.fixedRadius = R
        else:
            ### FIXME -- sizes for galaxies..... can we set PSF size separately?
            galrad = 0
            # RexGalaxy is a subclass of ExpGalaxy
            if isinstance(src, (ExpGalaxy, DevGalaxy, SersicGalaxy)):
                galrad = src.shape.re
            pixscale = 2.75
            src.halfsize = int(np.hypot(R, galrad * 5 / pixscale))
    debug('Set WISE source sizes:', nbig, 'big', nmedium, 'medium', nsmall,
          'small')

    tractor = Tractor(tims, cat)
    if use_ceres:
        from tractor.ceres_optimizer import CeresOptimizer
        tractor.optimizer = CeresOptimizer(BW=ceres_block, BH=ceres_block)
    tractor.freezeParamsRecursive('*')
    tractor.thawPathsTo(wanyband)

    t0 = Time()
    R = tractor.optimize_forced_photometry(fitstats=True,
                                           variance=True,
                                           shared_params=False,
                                           wantims=wantims)
    info('unWISE forced photometry took', Time() - t0)

    if use_ceres:
        term = R.ceres_status['termination']
        # Running out of memory can cause failure to converge and term
        # status = 2.  Fail completely in this case.
        if term != 0:
            info('Ceres termination status:', term)
            raise RuntimeError('Ceres terminated with status %i' % term)

    if wantims:
        ims1 = R.ims1
        # can happen if empty source list (we still want to generate coadds)
        if ims1 is None:
            ims1 = R.ims0

    flux_invvars = R.IV
    if R.fitstats is not None:
        for k in fskeys:
            x = getattr(R.fitstats, k)
            fitstats[k] = np.array(x).astype(np.float32)

    if save_fits:
        for i, tim in enumerate(tims):
            tile = tim.tile
            (dat, mod, _, chi, _) = ims1[i]
            wcshdr = fitsio.FITSHDR()
            tim.wcs.wcs.add_to_header(wcshdr)
            tag = 'fit-%s-w%i' % (tile.coadd_id, band)
            fitsio.write('%s-data.fits' % tag,
                         dat,
                         clobber=True,
                         header=wcshdr)
            fitsio.write('%s-mod.fits' % tag, mod, clobber=True, header=wcshdr)
            fitsio.write('%s-chi.fits' % tag, chi, clobber=True, header=wcshdr)

    if plots:
        # Create models for just the brightest sources
        bright_cat = [
            src for src in cat if src.getBrightness().getBand(wanyband) > 1000
        ]
        debug('Bright soures:', len(bright_cat))
        btr = Tractor(tims, bright_cat)
        for tim in tims:
            mod = btr.getModelImage(tim)
            tile = tim.tile
            tag = '%s W%i' % (tile.coadd_id, band)
            sig1 = tim.sig1
            plt.clf()
            plt.imshow(mod,
                       interpolation='nearest',
                       origin='lower',
                       cmap='gray',
                       vmin=-3 * sig1,
                       vmax=25 * sig1)
            plt.colorbar()
            plt.title('%s: bright-star models' % tag)
            ps.savefig()

    if get_models:
        for i, tim in enumerate(tims):
            tile = tim.tile
            (dat, mod, _, _, _) = ims1[i]
            models.append(
                (tile.coadd_id, band, tim.wcs.wcs, dat, mod, tim.coadd_inverr))

    if plots:
        for i, tim in enumerate(tims):
            tile = tim.tile
            tag = '%s W%i' % (tile.coadd_id, band)
            (dat, mod, _, chi, _) = ims1[i]
            sig1 = tim.sig1
            plt.clf()
            plt.imshow(dat,
                       interpolation='nearest',
                       origin='lower',
                       cmap='gray',
                       vmin=-3 * sig1,
                       vmax=25 * sig1)
            plt.colorbar()
            plt.title('%s: data' % tag)
            ps.savefig()
            plt.clf()
            plt.imshow(mod,
                       interpolation='nearest',
                       origin='lower',
                       cmap='gray',
                       vmin=-3 * sig1,
                       vmax=25 * sig1)
            plt.colorbar()
            plt.title('%s: model' % tag)
            ps.savefig()

            plt.clf()
            plt.imshow(chi,
                       interpolation='nearest',
                       origin='lower',
                       cmap='gray',
                       vmin=-5,
                       vmax=+5)
            plt.colorbar()
            plt.title('%s: chi' % tag)
            ps.savefig()

    nm = np.array([src.getBrightness().getBand(wanyband) for src in cat])
    nm_ivar = flux_invvars
    # Sources out of bounds, eg, never change from their initial
    # fluxes.  Zero them out instead.
    nm[nm_ivar == 0] = 0.

    phot.set('flux_%s' % wband, nm.astype(np.float32))
    phot.set('flux_ivar_%s' % wband, nm_ivar.astype(np.float32))
    for k in fskeys:
        phot.set(k + '_' + wband,
                 fitstats.get(k, np.zeros(len(phot), np.float32)))
    phot.set('nobs_%s' % wband, nexp)
    phot.set('mjd_%s' % wband, mjd)

    rtn = wphotduck()
    rtn.phot = phot
    rtn.models = None
    rtn.maskmap = None
    if get_models:
        rtn.models = models
    if get_masks:
        rtn.maskmap = maskmap
    return rtn
Ejemplo n.º 51
0
    def dailyreturns(self, ):
        # Parameters
        #startdate_string = '2004-12-31'
        #symbol = '^GSPC   ^OEX    ^VIX    ^OEX    ^MID   ^RUT
        symbol = self.Symbol
        #startdate_string = self.StartDateString
        # ##########
        # Date setup
        import datetime
        #today_datetime = datetime.datetime.today()
        today_date = datetime.date.today()
        #yesterday_date = datetime.date.fromordinal(datetime.date.today().toordinal()-1)
        #print str(today_date)

        import pullprices as pp
        #df_00 = pp.stockhistorybackfilledtodatframeofstockhistoryinstancesusingcache(symbol,startdate_string,str(yesterday_date)) #,str(today_date))
        df_00 = self.StockHistoryDataframe
        #print list(df_00)#['Close','Adj Close']
        #print df_00[['Close','Adj Close']]

        import pandas as pd
        dates1 = pd.date_range('1910-01-01', str(today_date), freq='D')

        dummy_date = datetime.datetime.strptime("1801-01-01", "%Y-%m-%d")
        prev_date = dummy_date
        prev_value = float('Nan')

        rows_dailyreturns = []
        #rows_optionpricescurrent.append(['optionsymbol','stockprice','strike','pdeltapct_to_sell_price','cumprob_to_sell_price','bid','ask','last'])
        rows_dailyreturns.append(
            ['a_symbol', 'b_monthend', 'e_pctchange', 'd_end'])

        for dt in dates1:
            if str(dt.date()) in df_00.index:
                #print dt.date()
                myobj = df_00.loc[str(dt.date())]
                #print myobj
                curr_date = dt.date()
                curr_value = myobj['Adj Close']
                if prev_date != dummy_date:
                    #print 'pullreturns curr_value,prev_value',curr_value,prev_value
                    if is_number(curr_value) and is_number(prev_value):
                        change_pct = (float(curr_value) -
                                      float(prev_value)) / float(prev_value)
                    else:
                        change_pct = float('NaN')

                    #print symbol,prev_date,prev_value,curr_date,curr_value,change_pct
                    rows_dailyreturns.append(
                        [symbol, curr_date, change_pct, curr_value])
                    #'{percent:.2%}'.format(percent=pdeltapct_to_sell_price)
                    #print symbol,curr_date,change_pct

                prev_date = dt.date()
                prev_value = myobj['Adj Close']

        headers = rows_dailyreturns.pop(0)
        df_dailyreturns = pd.DataFrame(rows_dailyreturns, columns=headers)
        import numpy as np
        df_dailyreturnsfinite = df_dailyreturns[np.isfinite(
            df_dailyreturns['e_pctchange'])]
        #print df_dailyreturnsfinite
        #print df_00

        stock_dataframe = pp.stock_dataframe(symbol)
        myobj = df_00.loc[str(prev_date)]
        prev_ending = myobj['Adj Close']
        curr_price = stock_dataframe['last'][0]
        if is_number(curr_price) and is_number(prev_ending):
            curr_pctchange = (float(curr_price) -
                              float(prev_ending)) / float(prev_ending)
        else:
            curr_pctchange = float('NaN')

        #df_curr = pd.DataFrame([symbol,today_date,curr_pctchange], columns=['a_symbol','b_monthend','e_pctchange'])
        #newrow = np.array([symbol,today_date,curr_pctchange])
        #columns=['a_symbol','b_monthend','e_pctchange','d_end']

        #import numpy as np

        mydict = {}
        mydict[0] = {
            'a_symbol': symbol,
            'b_monthend': str(today_date),
            'e_pctchange': curr_pctchange,
            'd_end': curr_price
        }
        df_curr = pd.DataFrame(mydict).T
        #print df_curr.T

        df_dailyreturnstotoday = df_dailyreturnsfinite.append(
            df_curr, ignore_index=True)
        #print df_dailyreturns
        #print str(today_date)[:7]
        return df_dailyreturnstotoday
Ejemplo n.º 52
0
def make_comparison_image(filename1,
                          filename2,
                          title1='bsens',
                          title2='cleanest',
                          writediff=False,
                          allow_reproj=False):
    #fh_pre = fits.open()
    #fh_post = fits.open()
    cube_pre = SpectralCube.read(filename1,
                                 format='fits' if 'fits' in filename1 else
                                 'casa_image').with_spectral_unit(u.GHz)
    cube_post = SpectralCube.read(filename2,
                                  format='fits' if 'fits' in filename2 else
                                  'casa_image').with_spectral_unit(u.GHz)

    if 'pbcor' in filename1:
        assert 'pbcor' in filename2
    if 'pbcor' in filename2:
        assert 'pbcor' in filename1

    if allow_reproj:
        if cube_pre.shape != cube_post.shape or (
                cube_post.wcs != cube_pre.wcs
                and cube_post.wcs.wcs != cube_pre.wcs.wcs):
            cube_post = cube_post.reproject(cube_pre.header)

    cube_pre = cube_pre.with_mask(cube_pre != 0 * cube_pre.unit)
    cube_post = cube_post.with_mask(cube_post != 0 * cube_post.unit)
    slices = cube_pre.subcube_slices_from_mask(cube_pre.mask & cube_post.mask,
                                               spatial_only=True)[1:]

    # make the cubes match the data; needed for later WCS cutouts
    cube_pre = cube_pre[:, slices[0], slices[1]]
    cube_post = cube_post[:, slices[0], slices[1]]

    #cube_pre = cube_pre.minimal_subcube()
    #cube_post = cube_post.minimal_subcube()
    data_pre = cube_pre[0].value
    data_post = cube_post[0].value

    data_pre[np.abs(data_pre) < 1e-7] = np.nan
    data_post[np.abs(data_post) < 1e-7] = np.nan

    try:
        diff = (data_post - data_pre)
    except Exception as ex:
        print(filename1, filename2, cube_pre.shape, cube_post.shape)
        raise ex

    ww = cube_post.wcs
    beam = cube_post.beam
    pixscale = wcs.utils.proj_plane_pixel_area(ww) * u.deg**2
    ppbeam = (beam.sr / pixscale).decompose()
    assert ppbeam.unit.is_equivalent(u.dimensionless_unscaled)
    ppbeam = ppbeam.value

    if writediff:
        fits.PrimaryHDU(data=diff, header=cube_post.header).writeto(
            filename2.split(".fits")[0] + ".preselfcal-diff.fits",
            overwrite=True)
    fig = pl.figure(1, figsize=(14, 6))
    fig.clf()

    if fig.get_figheight() != 6:
        fig.set_figheight(6)
    if fig.get_figwidth() != 14:
        fig.set_figwidth(14)

    minv = np.nanpercentile(data_pre, 0.05)
    maxv = np.nanpercentile(data_pre, 99.5)
    if np.abs(minv) > maxv:
        minv = -maxv

    norm = visualization.simple_norm(
        data=diff.squeeze(),
        stretch='asinh',
        #min_percent=0.05, max_percent=99.995,)
        min_cut=minv,
        max_cut=maxv)
    if norm.vmax < 0.001:
        norm.vmax = 0.001

    cm = pl.matplotlib.cm.gray
    cm.set_bad('white', 0)

    ax1 = pl.subplot(1, 3, 1)
    ax2 = pl.subplot(1, 3, 2)
    ax3 = pl.subplot(1, 3, 3)
    for ax in (ax1, ax2, ax3):
        ax.cla()

    ax1.imshow(data_pre,
               norm=norm,
               origin='lower',
               interpolation='nearest',
               cmap=cm)
    ax1.set_title(title1)

    ax2.imshow(data_post,
               norm=norm,
               origin='lower',
               interpolation='nearest',
               cmap=cm)
    ax2.set_title(title2)

    im = ax3.imshow(diff.squeeze(),
                    norm=norm,
                    origin='lower',
                    interpolation='nearest',
                    cmap=cm)
    ax3.set_title(f"{title2} - {title1}")

    for ax in (ax1, ax2, ax3):
        ax.set_xticks([])
        ax.set_yticks([])

    pl.subplots_adjust(wspace=0.0)

    cbax = fig.add_axes([0.91, 0.18, 0.03, 0.64])
    fig.colorbar(cax=cbax, mappable=im)

    meta = parse_fn(filename1)

    reg = get_noise_region(meta['region'], meta['band'])

    if reg is not None:
        reglist = regions.read_ds9(reg)
        composite_region = reduce(operator.or_, reglist)

        if hasattr(composite_region, 'to_mask'):
            msk = composite_region.to_mask()
        else:
            preg = composite_region.to_pixel(cube_pre.wcs.celestial)
            msk = preg.to_mask()

        cutout_pixels_pre = msk.cutout(
            data_pre, fill_value=np.nan)[msk.data.astype('bool')]

        mad_sample_pre = mad_std(cutout_pixels_pre, ignore_nan=True)
        std_sample_pre = np.nanstd(cutout_pixels_pre)

        if hasattr(composite_region, 'to_mask'):
            msk = composite_region.to_mask()
        else:
            preg = composite_region.to_pixel(cube_post.wcs.celestial)
            msk = preg.to_mask()
        cutout_pixels_post = msk.cutout(
            data_post, fill_value=np.nan)[msk.data.astype('bool')]

        mad_sample_post = mad_std(cutout_pixels_post, ignore_nan=True)
        std_sample_post = np.nanstd(cutout_pixels_post)

        if np.any(np.isnan(mad_sample_pre)):
            log.warning("mad_sample_pre contains some NaN values")
        if np.any(np.isnan(mad_sample_post)):
            log.warning("mad_sample_post contains some NaN values")

        if len(cutout_pixels_post) != len(cutout_pixels_pre):
            log.warning(
                f"cutout pixels are different size in pre vs post ({filename1} : {filename2})"
            )
        if (cube_pre.wcs.celestial != cube_post.wcs.celestial) and (
                cube_pre.wcs.celestial.wcs != cube_post.wcs.celestial.wcs):
            # wcs comparisons stopped working sometime in 2019-2020 - wcs.wcs comparisons appear to work?
            log.warning(
                f"post and pre have different celestial WCSes ({filename1} : {filename2})"
            )

        if not np.isfinite(mad_sample_pre):
            raise ValueError

    mad_pre = mad_std(data_pre, ignore_nan=True)
    mad_post = mad_std(data_post, ignore_nan=True)

    mad_diff = mad_std(diff, ignore_nan=True)
    diffmask = np.abs(diff) > 3 * mad_diff

    diffstats = {
        'mean': np.nanmean(diff),
        'max': np.nanmax(diff),
        'shape': diff.shape[0],
        'ppbeam': ppbeam,
        'sum': np.nansum(diff),
        'masksum': diff[diffmask].sum(),
        'min': np.nanmin(diff),
        'median': np.nanmedian(diff),
        'mad': mad_diff,
        'dr_pre': np.nanmax(data_pre) / mad_std(data_pre, ignore_nan=True),
        'dr_post': np.nanmax(data_post) / mad_std(data_post, ignore_nan=True),
        'min_pre': np.nanmin(data_pre),
        'min_post': np.nanmin(data_post),
        'max_pre': np.nanmax(data_pre),
        'max_post': np.nanmax(data_post),
        'sum_pre': np.nansum(data_pre),
        'sum_post': np.nansum(data_post),
        'masksum_pre': (data_pre[data_pre > mad_pre * 3]).sum(),
        'masksum_post': (data_post[data_post > mad_post * 3]).sum(),
        'mad_pre': mad_pre,
        'mad_post': mad_post,
        'mad_sample_pre': np.nan,
        'mad_sample_post': np.nan,
        'std_sample_pre': np.nan,
        'std_sample_post': np.nan,
    }
    if reg is not None:
        diffstats.update({
            'mad_sample_pre': mad_sample_pre,
            'mad_sample_post': mad_sample_post,
            'std_sample_pre': std_sample_pre,
            'std_sample_post': std_sample_post,
        })

    return ax1, ax2, ax3, fig, diffstats
Ejemplo n.º 53
0
    def plot_bolometric_qlf(self, redshift: float, gals: np.ndarray = None):
        """Plot the bolometric quasar luminosity function.

        Parameters
        ----------
        redshift: float
            The redshift of interest
        gals : np.ndarray, optional
            The galaxies (already read in with correct Hubble corrections applied). If not supplied, the necessary
            galaxy properties will be read in.

        Returns
        -------
        fig : matplotlib.Figure
            The matplotlib figure
        ax : matplotlib.Axes
            The matplotlib axis
        """
        snap, z = check_for_redshift(self.fname, redshift)

        logger.info(f"Plotting z={redshift:.2f} bolometric QLF.")
        logger.warning("This plotting routine is under construction and should not be trusted!")

        required_props = [
            "BlackHoleMass",
            "BlackHoleAccretedHotMass",
            "BlackHoleAccretedColdMass",
            "dt",
        ]
        if gals is None:
            try:
                gals = read_gals(self.fname, snap, props=required_props)
            except ValueError:
                logger.warning(f"Unable to read required properties: {required_props}")
                return []
        else:
            if not all([prop in gals.dtype.names for prop in required_props]):
                logger.warning(f"Unable to read required properties: {required_props}")
                return []

        mags = bh_bolometric_mags(gals, self.params)
        lum = (4.74 - mags[np.isfinite(mags)]) / 2.5
        lf = munge.mass_function(lum, self.params["Volume"], 30)

        #  lf[:, 0] *= 1.0 - np.cos(np.deg2rad(self.params['quasar_open_angle']) / 2.0)  # normalized to 2pi

        obs = number_density(feature="QLF_bolometric", z_target=redshift, h=self.params["Hubble_h"], quiet=True,)

        fig, ax = plt.subplots(1, 1, tight_layout=True)
        alpha = 0.6
        props = cycler.cycler(marker=("o", "s", "H", "P", "*", "^", "v", "<", ">"))
        for ii, prop in zip(range(obs.n_target_observation), props):
            data = obs.target_observation["Data"][ii]
            label = obs.target_observation.index[ii]
            datatype = obs.target_observation["DataType"][ii]
            data[:, 1:] = np.log10(data[:, 1:])
            if datatype == "data":
                ax.errorbar(
                    data[:, 0],
                    data[:, 1],
                    yerr=[data[:, 1] - data[:, 3], data[:, 2] - data[:, 1]],
                    label=label,
                    ls="",
                    mec="w",
                    alpha=alpha,
                    **prop,
                )
            elif datatype == "dataULimit":
                ax.errorbar(
                    data[:, 0],
                    data[:, 1],
                    yerr=-0.2 * data[:, 1],
                    uplims=True,
                    label=label,
                    mec="w",
                    alpha=alpha,
                    **prop,
                )
            else:
                ax.plot(data[:, 0], data[:, 1], label=label, lw=3, alpha=alpha)
                ax.fill_between(data[:, 0], data[:, 2], data[:, 3], alpha=0.4)

        ax.plot(lf[:, 0], np.log10(lf[:, 1]), ls="-", color="k", lw=4, label="Meraxes run")

        ax.legend(loc="lower left", fontsize="xx-small", ncol=2)
        ax.text(0.95, 0.95, f"z={z:.2f}", ha="right", va="top", transform=ax.transAxes)

        ax.set(
            xlim=(8, 18),
            ylim=(-14, -1),
            xlabel=r"$\log_{10}(L/{\rm L_{\odot}})$",
            ylabel=r"$\log_{10}(\phi\ [{\rm Mpc^{-1}}])$",
        )

        if self.save:
            self.plot_dir.mkdir(exist_ok=True)
            sns.despine(ax=ax)
            fname = self.plot_dir / f"bolometric_qlf_z{redshift:.2f}.pdf"
            plt.savefig(fname)

        return fig, ax
Ejemplo n.º 54
0
def temporal_fill_func(sub_array,
                       sub_i_array,
                       block_mask,
                       fill_method='linear'):
    """Single core temporal fill function

    Fill Landsat scene dates so that interpolator only runs between known dates

    Parameters
    ----------
    sub_array : ndarray
    sub_i_array : ndarray
    block_mask : ndarray
    fill_method : {'linear' or 'cubicspline'}
        Interpolation method (the default is 'linear').

    Returns
    -------
    ndarray

    """
    # Skip block if array is all nodata
    if not np.any(block_mask):
        return sub_array
    # Skip block if array is all nodata
    # elif np.all(np.isnan(data_array)):
    #     return sub_array

    # Begin interpolating scene days with missing values
    # for interp_i, interp_doy in enumerate(sub_i_array):
    for interp_sub_i, interp_full_i in enumerate(sub_i_array):
        # Interp mask is False where pixels have data
        # (i.e. True for pixels that will be interpolated)
        interp_mask = np.isnan(sub_array[interp_sub_i, :, :])
        interp_mask &= block_mask
        if not np.any(interp_mask):
            continue
        # logging.info('    INTERP {} {}'.format(
        #     interp_sub_i, interp_full_i))

        # list of subsequent days
        for anchor_sub_i, anchor_full_i in enumerate(sub_i_array):
            if anchor_sub_i <= interp_sub_i:
                continue
            # Interpolate when next DOY has data
            anchor_mask = np.copy(interp_mask)
            anchor_mask &= np.isfinite(sub_array[anchor_sub_i, :, :])
            if not np.any(anchor_mask):
                continue
            # logging.info('      ANCHOR {} {}'.format(
            #     anchor_sub_i, anchor_full_i))
            if fill_method == 'cubicspline':
                for cubic_sub_i, cubic_full_i in enumerate(sub_i_array):
                    if cubic_sub_i <= anchor_sub_i:
                        continue
                    cubic_mask = np.copy(anchor_mask)
                    cubic_mask &= np.isfinite(sub_array[cubic_sub_i, :, :])
                    if not np.any(cubic_mask):
                        continue
                    # logging.info('      CUBIC {} {}'.format(
                    #     cubic_sub_i, cubic_full_i))
                    interp_i_array = np.array([
                        sub_i_array[interp_sub_i - 2],
                        sub_i_array[interp_sub_i - 1],
                        sub_i_array[anchor_sub_i], sub_i_array[cubic_sub_i]
                    ])
                    interp_i_mask = np.in1d(sub_i_array, interp_i_array)
                    interp_array = sub_array[interp_i_mask, :, :][:,
                                                                  cubic_mask]
                    f = interpolate.interp1d(interp_i_array,
                                             interp_array,
                                             axis=0,
                                             kind=3)
                    sub_array[interp_sub_i, :, :][cubic_mask] = f(
                        interp_full_i)
                    # sub_array[interp_sub_i,:,:][anchor_mask] = f(interp_full_i).astype(np.float32)
                    interp_mask[cubic_mask] = False
                    anchor_mask[cubic_mask] = False
                    del f, interp_i_array, interp_i_mask
                    del cubic_mask, interp_array
                    if not np.any(interp_mask):
                        break
            elif fill_method == 'linear':
                interp_i_array = np.array(
                    [sub_i_array[interp_sub_i - 1], sub_i_array[anchor_sub_i]])
                interp_i_mask = np.in1d(sub_i_array, interp_i_array)
                interp_array = sub_array[interp_i_mask, :, :][:, anchor_mask]
                f = interpolate.interp1d(interp_i_array,
                                         interp_array,
                                         axis=0,
                                         kind=fill_method)
                sub_array[interp_sub_i, :, :][anchor_mask] = f(interp_full_i)
                # sub_array[interp_sub_i,:,:][anchor_mask] = f(interp_full_i).astype(np.float32)
                interp_mask[anchor_mask] = False
                del f, interp_i_array, interp_i_mask, interp_array
                if not np.any(interp_mask):
                    break
            elif fill_method == 'nearest':
                pass
            # There is a memory leak with f/interp1d
            # gc.collect()
        del interp_mask
    return sub_array
Ejemplo n.º 55
0
    def plot_HImf(self, redshift: float, gals: np.ndarray = None):
        """Plot the HI mass function.

        Parameters
        ----------
        redshift: float
            The redshift of interest
        gals : np.ndarray, optional
            The galaxies (already read in with correct Hubble corrections applied). If not supplied, the necessary
            galaxy properties will be read in.

        Returns
        -------
        fig : matplotlib.Figure
            The matplotlib figure
        ax : matplotlib.Axes
            The matplotlib axis
        """

        snap, z = check_for_redshift(self.fname, redshift)
        logger.info(f"Plotting z={redshift:.2f} HImf")

        plot_obs = False
        if not 0.0 <= redshift <= 0.05:
            logger.warning(f"Currently only have HImf data for z=0.")
        else:
            plot_obs = True

        fig, ax = plt.subplots(1, 1, tight_layout=True)
        props = cycler.cycler(marker=("o", "s", "H", "P", "*", "^", "v", "<", ">"))()
        alpha = 0.6

        if plot_obs:
            # ALFALFA-Martin et al. 2010 (h=0.7)
            # Values provided by H. Kim.
            obs_hubble = 0.7
            _raw = dedent(
                """\
             6.3  -0.743   0.366
             6.5  -0.839   0.259
             6.7  -0.875   0.191
             6.9  -0.935   0.153
             7.1  -1.065   0.154
             7.3  -1.130   0.114
             7.5  -1.163   0.082
             7.7  -1.224   0.070
             7.9  -1.363   0.061
             8.1  -1.460   0.054
             8.3  -1.493   0.046
             8.5  -1.573   0.043
             8.7  -1.664   0.038
             8.9  -1.689   0.029
             9.1  -1.673   0.023
             9.3  -1.740   0.021
             9.5  -1.893   0.021
             9.7  -2.061   0.018
             9.9  -2.288   0.017
            10.1  -2.596   0.017
            10.3  -3.006   0.024
            10.5  -3.641   0.057
            10.7  -4.428   0.131
            10.9  -5.320   0.376
            """
            )
            data = np.fromstring(_raw, sep=" ").reshape(-1, 3)
            data[:, 0] -= 2 * np.log10(self.params["Hubble_h"] / obs_hubble)
            data[:, 1] += 3 * np.log10(self.params["Hubble_h"] / obs_hubble)

            ax.errorbar(
                data[:, 0],
                data[:, 1],
                yerr=data[:, 2],
                label="Martin et al. (2010)",
                ls="",
                mec="w",
                alpha=alpha,
                **next(props),
            )

            # HIPASS-Zwaan et al. 2005 (h=0.75)
            # Values provided by H. Kim.
            obs_hubble = 0.75
            _raw = dedent(
                """\
            7.186 -0.733 0.397 0.2039
            7.3345 -0.8838 0.3179 0.1816
            7.483 -1.1 0.301 0.1761
            7.6315 -1.056 0.1955 0.1343
            7.78 -1.207 0.1992 0.136
            7.9285 -1.35 0.1374 0.1042
            8.077 -1.315 0.08988 0.07443
            8.2255 -1.331 0.07159 0.06144
            8.374 -1.308 0.05789 0.05108
            8.5225 -1.31 0.04438 0.04027
            8.671 -1.455 0.04284 0.03899
            8.8195 -1.555 0.03725 0.03431
            8.968 -1.55 0.03187 0.02969
            9.1165 -1.69 0.03179 0.02962
            9.265 -1.735 0.02666 0.02512
            9.4135 -1.843 0.02456 0.02324
            9.562 -1.974 0.02352 0.02231
            9.7105 -2.166 0.02506 0.0237
            9.859 -2.401 0.02768 0.02602
            10.0075 -2.785 0.03275 0.03045
            10.156 -3.013 0.03628 0.03348
            10.3045 -3.417 0.05028 0.04506
            10.453 -4.044 0.07708 0.06544
            10.6015 -4.83 0.1562 0.1147
            10.75 -5.451 0.2567 0.1602
            """
            )
            data = np.fromstring(_raw, sep=" ").reshape(-1, 4)
            data[:, 0] -= 2 * np.log10(self.params["Hubble_h"] / obs_hubble)
            data[:, 1] += 3 * np.log10(self.params["Hubble_h"] / obs_hubble)

            ax.errorbar(
                data[:, 0],
                data[:, 1],
                yerr=[data[:, 2], data[:, 3]],
                label="Zwaan et al. (2005)",
                ls="",
                mec="w",
                alpha=alpha,
                **next(props),
            )

        if gals is None:
            HImass = np.log10(read_gals(self.fname, snap, props=["HIMass"])["HIMass"]) + 10.0
        else:
            HImass = np.log10(gals["HIMass"]) + 10.0
        HImass = HImass[np.isfinite(HImass)]
        mf = munge.mass_function(HImass, self.params["Volume"], 30)

        ax.plot(
            mf[:, 0], np.log10(mf[:, 1]), ls="-", color="k", lw=4, zorder=10, label="Meraxes run",
        )

        ax.legend(loc="lower left", fontsize="xx-small", ncol=2)
        ax.text(0.95, 0.95, f"z={z:.2f}", ha="right", va="top", transform=ax.transAxes)

        ax.set(
            xlim=(7, 11),
            ylim=(-6, 0),
            xlabel=r"$\log_{10}(M_{\rm HI}\ [{\rm M_{\odot}}])$",
            ylabel=r"$\log_{10}(\phi\ [{\rm Mpc^{-1}}])$",
        )

        if self.save:
            self.plot_dir.mkdir(exist_ok=True)
            sns.despine(ax=ax)
            fname = self.plot_dir / f"HImf_z{redshift:.2f}.pdf"
            plt.savefig(fname)

        return fig, ax
Ejemplo n.º 56
0
    def plot_smf(self, redshift: float, imfscaling: float = 1.0, gals: np.ndarray = None):
        """Plot the stellar mass function for a given redshift.

        Parameters
        ----------
        redshift : float
            The requested redshift to plot.
        imfscaling : float
            Scaling for IMF from Salpeter (Mstar[IMF] = Mstar[Salpeter] * imfscaling) (default: 1.0)
        gals : np.ndarray, optional
            The galaxies (already read in with correct Hubble corrections applied). If not supplied, the necessary
            galaxy properties will be read in.

        Returns
        -------
        fig : matplotlib.Figure
            The matplotlib figure
        ax : matplotlib.Axes
            The matplotlib axis
        """

        imfscaling = np.log10(imfscaling)
        snap, z = check_for_redshift(self.fname, redshift)

        logger.info(f"Plotting z={redshift:.2f} SMF")

        if gals is None:
            stellar = np.log10(read_gals(self.fname, snap, props=["StellarMass"])["StellarMass"]) + 10.0
        else:
            stellar = np.log10(gals["StellarMass"]) + 10.0

        stellar = stellar[np.isfinite(stellar)]
        smf = munge.mass_function(stellar, self.params["Volume"], 30)

        obs = number_density(feature="GSMF", z_target=z, h=self.params["Hubble_h"], quiet=True)

        fig, ax = plt.subplots(1, 1, tight_layout=True)
        alpha = 0.6
        props = cycler.cycler(marker=("o", "s", "H", "P", "*", "^", "v", "<", ">"))
        for ii, prop in zip(range(obs.n_target_observation), props):
            data = obs.target_observation["Data"][ii]
            data[:, 0] += imfscaling
            label = obs.target_observation.index[ii]
            datatype = obs.target_observation["DataType"][ii]
            data[:, 1:] = np.log10(data[:, 1:])
            if datatype == "data":
                ax.errorbar(
                    data[:, 0],
                    data[:, 1],
                    yerr=[data[:, 1] - data[:, 3], data[:, 2] - data[:, 1]],
                    label=label,
                    ls="",
                    mec="w",
                    alpha=alpha,
                    **prop,
                )
            elif datatype == "dataULimit":
                ax.errorbar(
                    data[:, 0],
                    data[:, 1],
                    yerr=-0.2 * data[:, 1],
                    uplims=True,
                    label=label,
                    mec="w",
                    alpha=alpha,
                    **prop,
                )
            else:
                ax.plot(data[:, 0], data[:, 1], label=label, lw=3, alpha=alpha)
                ax.fill_between(data[:, 0], data[:, 2], data[:, 3], alpha=0.4)

        ax.plot(smf[:, 0], np.log10(smf[:, 1]), ls="-", color="k", lw=4, label="Meraxes run")

        ax.legend(loc="lower left", fontsize="xx-small", ncol=2)
        ax.text(0.95, 0.95, f"z={z:.2f}", ha="right", va="top", transform=ax.transAxes)

        ax.set(
            xlim=(6, 13),
            ylim=(-8, 0.5),
            xlabel=r"$\log_{10}(M_*\ [{\rm M_{\odot}}])$",
            ylabel=r"$\log_{10}(\phi\ [{\rm Mpc^{-1}}])$",
        )

        if self.save:
            self.plot_dir.mkdir(exist_ok=True)
            sns.despine(ax=ax)
            fname = self.plot_dir / f"smf_z{redshift:.2f}.pdf"
            plt.savefig(fname)

        return fig, ax
Ejemplo n.º 57
0
def solve_collocation_system(fun, t, y, h, Z0, scale, tol, LU_real, LU_complex,
                             solve_lu):
    """Solve the collocation system.

    Parameters
    ----------
    fun : callable
        Right-hand side of the system.
    t : float
        Current time.
    y : ndarray, shape (n,)
        Current state.
    h : float
        Step to try.
    Z0 : ndarray, shape (3, n)
        Initial guess for the solution. It determines new values of `y` at
        ``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants.
    scale : ndarray, shape (n)
        Problem tolerance scale, i.e. ``rtol * abs(y) + atol``.
    tol : float
        Tolerance to which solve the system. This value is compared with
        the normalized by `scale` error.
    LU_real, LU_complex
        LU decompositions of the system Jacobians.
    solve_lu : callable
        Callable which solves a linear system given a LU decomposition. The
        signature is ``solve_lu(LU, b)``.

    Returns
    -------
    converged : bool
        Whether iterations converged.
    n_iter : int
        Number of completed iterations.
    Z : ndarray, shape (3, n)
        Found solution.
    rate : float
        The rate of convergence.
    """
    n = y.shape[0]
    M_real = MU_REAL / h
    M_complex = MU_COMPLEX / h

    W = TI.dot(Z0)
    Z = Z0

    F = np.empty((3, n))
    ch = h * C

    dW_norm_old = None
    dW = np.empty_like(W)
    converged = False
    rate = None
    for k in range(NEWTON_MAXITER):
        for i in range(3):
            F[i] = fun(t + ch[i], y + Z[i])

        if not np.all(np.isfinite(F)):
            break

        f_real = F.T.dot(TI_REAL) - M_real * W[0]
        f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2])

        dW_real = solve_lu(LU_real, f_real)
        dW_complex = solve_lu(LU_complex, f_complex)

        dW[0] = dW_real
        dW[1] = dW_complex.real
        dW[2] = dW_complex.imag

        dW_norm = norm(dW / scale)
        if dW_norm_old is not None:
            rate = dW_norm / dW_norm_old

        if (rate is not None and (rate >= 1 or rate**(NEWTON_MAXITER - k) /
                                  (1 - rate) * dW_norm > tol)):
            break

        W += dW
        Z = T.dot(W)

        if (dW_norm == 0
                or rate is not None and rate / (1 - rate) * dW_norm < tol):
            converged = True
            break

        dW_norm_old = dW_norm

    return converged, k + 1, Z, rate
Ejemplo n.º 58
0
def lnprob(params, oe, data, err):
    lp = lnprior(params)
    if not np.isfinite(lp):
        return -np.inf
    return lp + lnlike(params, oe, data, err)
    #print url_values
    queryData = urllib2.urlopen(url + url_values)

    returnPage = queryData.read()
    dtypeseq = ['U40']
    dtypeseq.extend(['f8'] * 3)
    dataBlock = np.genfromtxt(returnPage.splitlines(), delimiter=',', skip_header=1, \
                        dtype=dtypeseq)
    gtName = dataBlock['f0']
    gtPer = dataBlock['f1']
    gtRa = dataBlock['f2']
    gtDec = dataBlock['f3']
    gtTIC = np.arange(len(gtName))
    gtTOI = np.arange(len(gtName))
    # Check for missing ephemeris values
    idxBd = np.where((np.logical_not(np.isfinite(gtPer))))[0]
    if len(idxBd) > 0:
        for curIdxBd in idxBd:
            print('Bad Period for Known Planet')
            print(gtName[curIdxBd])
            # Load known multi data planet table from NEXSCI
            whereString = 'mpl_name like \'{0}\''.format(gtName[curIdxBd])
            url = 'https://exoplanetarchive.ipac.caltech.edu/cgi-bin/nstedAPI/nph-nstedAPI?'
            data = {'table':'exomultpars', \
                    'select':'mpl_name,mpl_orbper', \
                    'format':'csv', \
                    'where':whereString}
            url_values = urllib.urlencode(data)
            #print url_values
            queryData = urllib2.urlopen(url + url_values)
            returnPage = queryData.read()
Ejemplo n.º 60
0
def check_array(array,
                force_2d=False,
                n_feats=None,
                ndim=None,
                min_samples=1,
                name='Input data',
                verbose=True):
    """
    tool to perform basic data validation.
    called by check_X and check_y.

    ensures that data:
    - is ndim dimensional
    - contains float-compatible data-types
    - has at least min_samples
    - has n_feats
    - is finite

    Parameters
    ----------
    array : array-like
    force_2d : boolean, default: False
        whether to force a 2d array. Setting to True forces ndim = 2
    n_feats : int, default: None
              represents number of features that the array should have.
              not enforced if n_feats is None.
    ndim : int default: None
        number of dimensions expected in the array
    min_samples : int, default: 1
    name : str, default: 'Input data'
        name to use when referring to the array
    verbose : bool, default: True
        whether to print warnings

    Returns
    -------
    array : validated array
    """
    # make array
    if force_2d:
        array = make_2d(array, verbose=verbose)
        ndim = 2
    else:
        array = np.array(array)

    # cast to float
    dtype = array.dtype
    if dtype.kind not in ['i', 'f']:
        try:
            array = array.astype('float')
        except ValueError as e:
            raise ValueError('{} must be type int or float, '\
                             'but found type: {}\n'\
                             'Try transforming data with a LabelEncoder first.'\
                             .format(name, dtype.type))

    # check finite
    if not (np.isfinite(array).all()):
        raise ValueError('{} must not contain Inf nor NaN'.format(name))

    # check ndim
    if ndim is not None:
        if array.ndim != ndim:
            raise ValueError('{} must have {} dimensions. '\
                             'found shape {}'.format(name, ndim, array.shape))

    # check n_feats
    if n_feats is not None:
        m = array.shape[1]
        if m != n_feats:
            raise ValueError('{} must have {} features, '\
                             'but found {}'.format(name, n_feats, m))

    # minimum samples
    n = array.shape[0]
    if n < min_samples:
        raise ValueError('{} should have at least {} samples, '\
                         'but found {}'.format(name, min_samples, n))

    return array