Ejemplo n.º 1
0
def test_types_scalar_fix():
    a = Scalar(1.0)

    assert_(not a.isfixed)

    a.fix()
    assert_(a.isfixed)
Ejemplo n.º 2
0
def test_variables_setattr():
    a = Variables(a0=Scalar(1.0))

    a["a1"] = Scalar(2.0)
    a["a1"].value += 1.0

    assert_equal(a.get("a0").value, 1.0)
    assert_equal(a.get("a1").value, 3.0)
Ejemplo n.º 3
0
def test_variables_merge():
    a = Variables(a0=Scalar(1.0))
    b = Variables(b0=Scalar(1.0))
    c = merge_variables(dict(a=a, b=b))

    a.get("a0").value += 1.0

    assert_equal(a.get("a0").value, 2.0)
    assert_equal(a.get("a0").value, c.get("a.a0").value)
Ejemplo n.º 4
0
def test_variables_set():
    a = Scalar(1.0)
    b = a
    a.value = 2.0
    assert_(a is b)
    assert_(a.raw is b.raw)

    v = Variables(dict(a=Scalar(1.0), b=Scalar(1.5)))
    v.set({"a": 0.5})
    assert_allclose(v.get("a"), 0.5)
Ejemplo n.º 5
0
def test_types_scalar_comparison():
    a = Scalar(1.0)
    b = Scalar(2.0)

    assert_(a < b)
    assert_(a <= b)
    assert_(a != b)

    b.value = 1.0

    assert_(a == b)
Ejemplo n.º 6
0
def test_types_scalar_listen():
    a = Scalar(1.0)

    class Listener(object):
        def __init__(self):
            self.value = None

        def __call__(self):
            self.value = 3.0

    l = Listener()
    a.listen(l)
    a.value = 3.0

    assert_(l.value == 3.0)
Ejemplo n.º 7
0
class Foo1(Function):
    def __init__(self):
        self._a = Vector([0, 0])
        self._b = Vector([0, 0])
        self._c = Scalar(1)
        super(Foo1, self).__init__("Foo1", a=self._a, b=self._b, c=self._c)

    @property
    def a(self):
        return self._a.value

    @property
    def b(self):
        return self._b.value

    @property
    def c(self):
        return self._c.value

    def fix_c(self):
        self._c.fix()

    def unfix_c(self):
        self._c.unfix()

    @c.setter
    def c(self, v):
        self._c.value = v

    def value(self):
        a = self.a
        b = self.b
        c = self.c
        return (a @ b - 3 + a @ [1, 1] - b @ [1, 2] + 1 / c)**2

    def gradient(self):
        a = self.a
        b = self.b
        c = self.c
        v = a @ b - 3 + a @ [1, 1] - b @ [1, 2] + 1 / c
        da = 2 * v * array([b[0] + 1, b[1] + 1])
        db = 2 * v * array([a[0] - 1, a[1] - 2])
        dc = 2 * v * -1 / (c**2)
        return {"a": da, "b": db, "c": dc}

    def check_grad(self):
        return self._check_grad()
Ejemplo n.º 8
0
    def __init__(self, y, Q0, Q1, S0, covariates=None):
        super(FastLMM, self).__init__(logistic=Scalar(0.0))

        if not is_all_finite(y):
            raise ValueError("There are non-finite values in the phenotype.")

        self._flmmc = FastLMMCore(y, covariates, Q0, Q1, S0)
        self.set_nodata()
Ejemplo n.º 9
0
def test_types_scalar_listen_indirect():
    a = Scalar(1.0)

    class Listener(object):
        def __init__(self):
            self.value = None

        def __call__(self):
            self.value = 3.0

    l = Listener()

    a.listen(l)

    value = a.value
    value.itemset(3.0)

    assert_(l.value == 3.0)
Ejemplo n.º 10
0
    def __init__(self, y, lik, X, QS=None):
        y = ascontiguousarray(y, float)
        X = asarray(X, float)

        Function.__init__(
            self,
            "GLMM",
            beta=Vector(zeros(X.shape[1])),
            logscale=Scalar(0.0),
            logitdelta=Scalar(0.0),
        )

        if not isinstance(lik, (tuple, list)):
            lik = (lik, )

        self._lik = (lik[0].lower(), ) + tuple(
            ascontiguousarray(i) for i in lik[1:])
        self._y = check_outcome(y, self._lik)
        self._X = check_covariates(X)
        if QS is None:
            self._QS = economic_qs_zeros(self._y.shape[0])
        else:
            self._QS = check_economic_qs(QS)
            if self._y.shape[0] != self._QS[0][0].shape[0]:
                raise ValueError(
                    "Number of samples in outcome and covariance differ.")

        if self._y.shape[0] != self._X.shape[0]:
            raise ValueError(
                "Number of samples in outcome and covariates differ.")

        self._factr = 1e5
        self._pgtol = 1e-6
        self._verbose = False
        self.set_variable_bounds("logscale", (log(0.001), 6.0))

        self.set_variable_bounds("logitdelta", (-50, +15))

        if lik[0] == "probit":
            self.delta = 0.0
            self.fix("delta")
Ejemplo n.º 11
0
    def __init__(self, X):
        """
        Constructor.

        Parameters
        ----------
        X : array_like
            Matrix X from K = s⋅XXᵀ.
        """
        self._logscale = Scalar(0.0)
        self._X = X
        Function.__init__(self, "LinearCov", logscale=self._logscale)
        self._logscale.bounds = (-20.0, +10)
Ejemplo n.º 12
0
    def __init__(self, dim):
        """
        Constructor.

        Parameters
        ----------
        dim : int
            Matrix dimension, d.
        """
        self._dim = dim
        self._I = eye(dim)
        self._logscale = Scalar(0.0)
        Function.__init__(self, "EyeCov", logscale=self._logscale)
        self._logscale.bounds = (-20.0, +10)
Ejemplo n.º 13
0
    def __init__(self, K0):
        """
        Constructor.

        Parameters
        ----------
        K0 : array_like
            A semi-definite positive matrix.
        """
        from numpy_sugar.linalg import check_symmetry

        self._logscale = Scalar(0.0)
        Function.__init__(self, "GivenCov", logscale=self._logscale)
        self._logscale.bounds = (-20.0, +10)
        if not check_symmetry(K0):
            raise ValueError(
                "The provided covariance-matrix is not symmetric.")
        self._K0 = K0
Ejemplo n.º 14
0
def test_types_scalar_copy():
    a = Scalar(1.0)
    b = a.copy()

    assert_(a is not b)
    assert_(a == b)
Ejemplo n.º 15
0
    def __init__(self, y, X, QS=None, restricted=False):
        """
        Constructor.

        Parameters
        ----------
        y : array_like
            Outcome.
        X : array_like
            Covariates as a two-dimensional array.
        QS : tuple
            Economic eigendecompositon in form of ``((Q0, ), S0)`` of a
            covariance matrix ``K``.
        restricted : bool
            ``True`` for restricted maximum likelihood optimization; ``False``
            otherwise. Defaults to ``False``.
        """
        from numpy_sugar import is_all_finite

        logistic = Scalar(0.0)
        logistic.listen(self._delta_update)
        logistic.bounds = (-numbers.logmax, +numbers.logmax)
        Function.__init__(self, "LMM", logistic=logistic)
        self._logistic = logistic

        y = asarray(y, float).ravel()
        if not is_all_finite(y):
            raise ValueError("There are non-finite values in the outcome.")

        if len(y) == 0:
            raise ValueError("The outcome array is empty.")

        X = atleast_2d(asarray(X, float).T).T
        if not is_all_finite(X):
            raise ValueError("There are non-finite values in the covariates matrix.")

        self._optimal = {"beta": False, "scale": False}
        if QS is None:
            QS = economic_qs_zeros(len(y))
            self._B = B(QS[0][0], QS[1], 0.0, 1.0)
            self.delta = 1.0
            logistic.fix()
        else:
            self._B = B(QS[0][0], QS[1], 0.5, 0.5)
            self.delta = 0.5

        if QS[0][0].shape[0] != len(y):
            msg = "Sample size differs between outcome and covariance decomposition."
            raise ValueError(msg)

        if y.shape[0] != X.shape[0]:
            msg = "Sample size differs between outcome and covariates."
            raise ValueError(msg)

        self._y = y
        self._Q0 = QS[0][0]
        self._S0 = QS[1]
        self._Xsvd = SVD(X)
        self._tbeta = zeros(self._Xsvd.rank)
        self._scale = 1.0
        self._fix = {"beta": False, "scale": False}
        self._restricted = restricted
Ejemplo n.º 16
0
 def __init__(self):
     Function.__init__(self, offset=Scalar(1.0))
Ejemplo n.º 17
0
 def __init__(self):
     self._c = Scalar(1)
     super(Foo4, self).__init__("Foo4", c=self._c)
Ejemplo n.º 18
0
 def __init__(self):
     self._a = Vector([0, 0])
     self._b = Vector([0, 0])
     self._c = Scalar(1)
     super(Foo1, self).__init__("Foo1", a=self._a, b=self._b, c=self._c)
Ejemplo n.º 19
0
 def __init__(self):
     self._c = Scalar(1)
     self._c.bounds = [1e-9, 1e9]
     super(Foo2, self).__init__("Foo2", c=self._c)
Ejemplo n.º 20
0
 def copy(self):
     o = FastLMM.__new__(FastLMM)
     super(FastLMM, o).__init__(logistic=Scalar(self.get('logistic')))
     o._flmmc = self._flmmc.copy()
     o.set_nodata()
     return o
Ejemplo n.º 21
0
def test_types_modify_scalar():
    a = Scalar(1.0)
    value = atleast_1d(a.value)
    value[0] = 2.0
    assert_(a.value == value[0])
Ejemplo n.º 22
0
 def __init__(self):
     Function.__init__(self, logscale=Scalar(0.0))
Ejemplo n.º 23
0
def test_variables_str():
    v = Variables(dict(a=Scalar(1.0), b=Scalar(1.5)))
    msg = "Variables(a=Scalar(1.0),\n"
    msg += " " * 10 + "b=Scalar(1.5))"
    assert_equal(v.__str__(), msg)