Exemplo n.º 1
0
 def mult_add_mv(self, m, v, out):
     if m.shape == v.shape:
         self.mult_add_tt(m, v, out=out)
     else:
         tmp = self.allocate(out.shape)
         cumisc.mult_matvec(m, v, out=tmp)
         self.add_tt(tmp, out, out=out)
Exemplo n.º 2
0
    def add_batch(self, X, T, wc=None):
        """Add a batch of training data to an iterative solution, weighted if neeed.

        The batch is processed as a whole, the training data is splitted in `ELM.add_data()` method.
        With parameters HH_out, HT_out, the output will be put into these matrices instead of model.

        Args:
            X (matrix): input data matrix size (N * `inputs`)
            T (matrix): output data matrix size (N * `outputs`)
            wc (vector): vector of weights for data samples, one weight per sample, size (N * 1)
            HH_out, HT_out (matrix, optional): output matrices to add batch result into, always given together
        """
        devH = self._project(X, dev=True)
        T = np.array(T, order="C", dtype=self.precision)
        devT = gpuarray.to_gpu(T)
        if wc is not None:  # apply weights if given
            w = np.array(wc**0.5, dtype=self.precision)[:, None]  # re-shape to column matrix
            devWC = gpuarray.to_gpu(w)
            misc.mult_matvec(devH, devWC, axis=0, out=devH)
            misc.mult_matvec(devT, devWC, axis=0, out=devT)

        if self.HH is None:  # initialize space for self.HH, self.HT
            self.HT = misc.zeros((self.L, self.outputs), dtype=self.precision)
            self.HH = linalg.eye(self.L, dtype=self.precision)
            self.HH *= self.norm

        linalg.add_dot(devH, devT, self.HT, transa='T')
        if self.precision is np.float64:
            linalg.add_dot(devH, devH, self.HH, transa='T')
        else:
            cublas.cublasSsyrk(self.handle, 'L', 'N', self.L, X.shape[0], 1, devH.ptr, self.L, 1, self.HH.ptr, self.L)
Exemplo n.º 3
0
 def mult_add_mv(self, m, v, out):
     if m.shape == v.shape:
         self.mult_add_tt(m, v, out=out)
     else:
         tmp = self.allocate(out.shape)
         cumisc.mult_matvec(m, v, out=tmp)
         self.add_tt(tmp, out, out=out)
Exemplo n.º 4
0
    def add_batch(self, X, T, wc=None):
        """Add a batch of training data to an iterative solution, weighted if neeed.

        The batch is processed as a whole, the training data is splitted in `ELM.add_data()` method.
        With parameters HH_out, HT_out, the output will be put into these matrices instead of model.

        Args:
            X (matrix): input data matrix size (N * `inputs`)
            T (matrix): output data matrix size (N * `outputs`)
            wc (vector): vector of weights for data samples, one weight per sample, size (N * 1)
            HH_out, HT_out (matrix, optional): output matrices to add batch result into, always given together
        """
        devH = self._project(X, dev=True)
        T = np.array(T, order="C", dtype=self.precision)
        devT = gpuarray.to_gpu(T)
        if wc is not None:  # apply weights if given
            w = np.array(
                wc**0.5,
                dtype=self.precision)[:, None]  # re-shape to column matrix
            devWC = gpuarray.to_gpu(w)
            misc.mult_matvec(devH, devWC, axis=0, out=devH)
            misc.mult_matvec(devT, devWC, axis=0, out=devT)

        if self.HH is None:  # initialize space for self.HH, self.HT
            self.HT = misc.zeros((self.L, self.outputs), dtype=self.precision)
            self.HH = linalg.eye(self.L, dtype=self.precision)
            self.HH *= self.norm

        linalg.add_dot(devH, devT, self.HT, transa='T')
        if self.precision is np.float64:
            linalg.add_dot(devH, devH, self.HH, transa='T')
        else:
            cublas.cublasSsyrk(self.handle, 'L', 'N', self.L, X.shape[0], 1,
                               devH.ptr, self.L, 1, self.HH.ptr, self.L)
Exemplo n.º 5
0
    def impl_test_binaryop_matvec(self, dtype):
        x = np.random.normal(scale=5.0, size=(3, 5)).astype(dtype)
        a = np.random.normal(scale=5.0, size=(1, 5)).astype(dtype)
        b = np.random.normal(scale=5.0, size=(3, 1)).astype(dtype)

        # the following two test correct broadcasting on 0D vectors
        c = np.random.normal(scale=5.0, size=(5, )).astype(dtype)
        d = np.random.normal(scale=5.0, size=(3, )).astype(dtype)
        x_gpu = gpuarray.to_gpu(x)
        a_gpu = gpuarray.to_gpu(a)
        b_gpu = gpuarray.to_gpu(b)
        c_gpu = gpuarray.to_gpu(c)
        d_gpu = gpuarray.to_gpu(d)
        out = gpuarray.empty(x.shape, dtype=dtype)
        # addition
        res = misc.add_matvec(x_gpu, a_gpu, out=out).get()
        assert np.allclose(res, x+a)
        assert np.allclose(misc.add_matvec(x_gpu, b_gpu).get(), x+b)
        assert np.allclose(misc.add_matvec(x_gpu, c_gpu).get(), x+c)
        assert_raises(ValueError, misc.add_matvec, x_gpu, d_gpu)
        # multiplication
        res = misc.mult_matvec(x_gpu, a_gpu, out=out).get()
        assert np.allclose(res, x*a)
        assert np.allclose(misc.mult_matvec(x_gpu, b_gpu).get(), x*b)
        assert np.allclose(misc.mult_matvec(x_gpu, c_gpu).get(), x*c)
        assert_raises(ValueError, misc.mult_matvec, x_gpu, d_gpu)
        # division
        res = misc.div_matvec(x_gpu, a_gpu, out=out).get()
        assert np.allclose(res, x/a)
        assert np.allclose(misc.div_matvec(x_gpu, b_gpu).get(), x/b)
        assert np.allclose(misc.div_matvec(x_gpu, c_gpu).get(), x/c)
        assert_raises(ValueError, misc.div_matvec, x_gpu, d_gpu)
Exemplo n.º 6
0
    def impl_test_binaryop_matvec(self, dtype):
        x = np.random.normal(scale=5.0, size=(3, 5)).astype(dtype)
        a = np.random.normal(scale=5.0, size=(1, 5)).astype(dtype)
        b = np.random.normal(scale=5.0, size=(3, 1)).astype(dtype)

        # the following two test correct broadcasting on 0D vectors
        c = np.random.normal(scale=5.0, size=(5, )).astype(dtype)
        d = np.random.normal(scale=5.0, size=(3, )).astype(dtype)
        x_gpu = gpuarray.to_gpu(x)
        a_gpu = gpuarray.to_gpu(a)
        b_gpu = gpuarray.to_gpu(b)
        c_gpu = gpuarray.to_gpu(c)
        d_gpu = gpuarray.to_gpu(d)
        out = gpuarray.empty(x.shape, dtype=dtype)
        # addition
        res = misc.add_matvec(x_gpu, a_gpu, out=out).get()
        assert np.allclose(res, x + a)
        assert np.allclose(misc.add_matvec(x_gpu, b_gpu).get(), x + b)
        assert np.allclose(misc.add_matvec(x_gpu, c_gpu).get(), x + c)
        assert_raises(ValueError, misc.add_matvec, x_gpu, d_gpu)
        # multiplication
        res = misc.mult_matvec(x_gpu, a_gpu, out=out).get()
        assert np.allclose(res, x * a)
        assert np.allclose(misc.mult_matvec(x_gpu, b_gpu).get(), x * b)
        assert np.allclose(misc.mult_matvec(x_gpu, c_gpu).get(), x * c)
        assert_raises(ValueError, misc.mult_matvec, x_gpu, d_gpu)
        # division
        res = misc.div_matvec(x_gpu, a_gpu, out=out).get()
        assert np.allclose(res, x / a)
        assert np.allclose(misc.div_matvec(x_gpu, b_gpu).get(), x / b)
        assert np.allclose(misc.div_matvec(x_gpu, c_gpu).get(), x / c)
        assert_raises(ValueError, misc.div_matvec, x_gpu, d_gpu)
Exemplo n.º 7
0
 def mult_mv(self, m, v, out):
     if m.shape == v.shape:
         self.mult_tt(m, v, out=out)
     else:
         cumisc.mult_matvec(m, v, out=out)
Exemplo n.º 8
0
    def _impl_test_binaryop_matvec(self, dtype):
        if issubclass(dtype, numbers.Integral):
            x = np.random.randint(1, 10, 15).reshape((3, 5)).astype(dtype)
            a = np.random.randint(1, 10, 5).reshape((1, 5)).astype(dtype)
            b = np.random.randint(1, 10, 3).reshape((3, 1)).astype(dtype)

            # the following two test correct broadcasting on 0D vectors
            c = np.random.randint(1, 10, 5).reshape((5, )).astype(dtype)
            d = np.random.randint(1, 10, 3).reshape((3, )).astype(dtype)
        else:
            x = np.random.normal(scale=5.0, size=(3, 5)).astype(dtype)
            a = np.random.normal(scale=5.0, size=(1, 5)).astype(dtype)
            b = np.random.normal(scale=5.0, size=(3, 1)).astype(dtype)

            # the following two test correct broadcasting on 0D vectors
            c = np.random.normal(scale=5.0, size=(5, )).astype(dtype)
            d = np.random.normal(scale=5.0, size=(3, )).astype(dtype)
        x_gpu = gpuarray.to_gpu(x)
        a_gpu = gpuarray.to_gpu(a)
        b_gpu = gpuarray.to_gpu(b)
        c_gpu = gpuarray.to_gpu(c)
        d_gpu = gpuarray.to_gpu(d)
        out = gpuarray.empty(x.shape, dtype=dtype)

        # addition
        res = misc.add_matvec(x_gpu, a_gpu, out=out).get()
        assert_allclose(res,
                        x + a,
                        rtol=dtype_to_rtol[dtype],
                        atol=dtype_to_atol[dtype])
        assert_allclose(misc.add_matvec(x_gpu, b_gpu).get(),
                        x + b,
                        rtol=dtype_to_rtol[dtype],
                        atol=dtype_to_atol[dtype])
        assert_allclose(misc.add_matvec(x_gpu, c_gpu).get(),
                        x + c,
                        rtol=dtype_to_rtol[dtype],
                        atol=dtype_to_atol[dtype])
        assert_raises(ValueError, misc.add_matvec, x_gpu, d_gpu)

        # multiplication
        res = misc.mult_matvec(x_gpu, a_gpu, out=out).get()
        assert_allclose(res,
                        x * a,
                        rtol=dtype_to_rtol[dtype],
                        atol=dtype_to_atol[dtype])
        assert_allclose(misc.mult_matvec(x_gpu, b_gpu).get(),
                        x * b,
                        rtol=dtype_to_rtol[dtype],
                        atol=dtype_to_atol[dtype])
        assert_allclose(misc.mult_matvec(x_gpu, c_gpu).get(),
                        x * c,
                        rtol=dtype_to_rtol[dtype],
                        atol=dtype_to_atol[dtype])
        assert_raises(ValueError, misc.mult_matvec, x_gpu, d_gpu)

        # division
        res = misc.div_matvec(x_gpu, a_gpu, out=out).get()
        if issubclass(dtype, numbers.Integral):
            assert_allclose(res,
                            x // a,
                            rtol=dtype_to_rtol[dtype],
                            atol=dtype_to_atol[dtype])
            assert_allclose(misc.div_matvec(x_gpu, b_gpu).get(),
                            x // b,
                            rtol=dtype_to_rtol[dtype],
                            atol=dtype_to_atol[dtype])
            assert_allclose(misc.div_matvec(x_gpu, c_gpu).get(),
                            x // c,
                            rtol=dtype_to_rtol[dtype],
                            atol=dtype_to_atol[dtype])
        else:
            assert_allclose(res,
                            x / a,
                            rtol=dtype_to_rtol[dtype],
                            atol=dtype_to_atol[dtype])
            assert_allclose(misc.div_matvec(x_gpu, b_gpu).get(),
                            x / b,
                            rtol=dtype_to_rtol[dtype],
                            atol=dtype_to_atol[dtype])
            assert_allclose(misc.div_matvec(x_gpu, c_gpu).get(),
                            x / c,
                            rtol=dtype_to_rtol[dtype],
                            atol=dtype_to_atol[dtype])

        assert_raises(ValueError, misc.div_matvec, x_gpu, d_gpu)
Exemplo n.º 9
0
 def mult_mv(self, m, v, out):
     if m.shape == v.shape:
         self.mult_tt(m, v, out=out)
     else:
         cumisc.mult_matvec(m, v, out=out)
Exemplo n.º 10
0
    def _impl_test_binaryop_matvec(self, dtype):
        if issubclass(dtype, numbers.Integral):
            x = np.random.randint(1, 10, 15).reshape((3, 5)).astype(dtype)
            a = np.random.randint(1, 10, 5).reshape((1, 5)).astype(dtype)
            b = np.random.randint(1, 10, 3).reshape((3, 1)).astype(dtype)

            # the following two test correct broadcasting on 0D vectors
            c = np.random.randint(1, 10, 5).reshape((5, )).astype(dtype)
            d = np.random.randint(1, 10, 3).reshape((3, )).astype(dtype)
        else:
            x = np.random.normal(scale=5.0, size=(3, 5)).astype(dtype)
            a = np.random.normal(scale=5.0, size=(1, 5)).astype(dtype)
            b = np.random.normal(scale=5.0, size=(3, 1)).astype(dtype)

            # the following two test correct broadcasting on 0D vectors
            c = np.random.normal(scale=5.0, size=(5, )).astype(dtype)
            d = np.random.normal(scale=5.0, size=(3, )).astype(dtype)
        x_gpu = gpuarray.to_gpu(x)
        a_gpu = gpuarray.to_gpu(a)
        b_gpu = gpuarray.to_gpu(b)
        c_gpu = gpuarray.to_gpu(c)
        d_gpu = gpuarray.to_gpu(d)
        out = gpuarray.empty(x.shape, dtype=dtype)

        # addition
        res = misc.add_matvec(x_gpu, a_gpu, out=out).get()
        assert_allclose(res, x+a,
                        rtol=dtype_to_rtol[dtype],
                        atol=dtype_to_atol[dtype])
        assert_allclose(misc.add_matvec(x_gpu, b_gpu).get(), x+b,
                        rtol=dtype_to_rtol[dtype],
                        atol=dtype_to_atol[dtype])
        assert_allclose(misc.add_matvec(x_gpu, c_gpu).get(), x+c,
                        rtol=dtype_to_rtol[dtype],
                        atol=dtype_to_atol[dtype])
        assert_raises(ValueError, misc.add_matvec, x_gpu, d_gpu)

        # multiplication
        res = misc.mult_matvec(x_gpu, a_gpu, out=out).get()
        assert_allclose(res, x*a,
                        rtol=dtype_to_rtol[dtype],
                        atol=dtype_to_atol[dtype])
        assert_allclose(misc.mult_matvec(x_gpu, b_gpu).get(), x*b,
                        rtol=dtype_to_rtol[dtype],
                        atol=dtype_to_atol[dtype])
        assert_allclose(misc.mult_matvec(x_gpu, c_gpu).get(), x*c,
                        rtol=dtype_to_rtol[dtype],
                        atol=dtype_to_atol[dtype])
        assert_raises(ValueError, misc.mult_matvec, x_gpu, d_gpu)

        # division
        res = misc.div_matvec(x_gpu, a_gpu, out=out).get()
        if issubclass(dtype, numbers.Integral):
            assert_allclose(res, x//a,
                            rtol=dtype_to_rtol[dtype],
                            atol=dtype_to_atol[dtype])
            assert_allclose(misc.div_matvec(x_gpu, b_gpu).get(), x//b,
                            rtol=dtype_to_rtol[dtype],
                            atol=dtype_to_atol[dtype])
            assert_allclose(misc.div_matvec(x_gpu, c_gpu).get(), x//c,
                            rtol=dtype_to_rtol[dtype],
                            atol=dtype_to_atol[dtype])
        else:
            assert_allclose(res, x/a,
                            rtol=dtype_to_rtol[dtype],
                            atol=dtype_to_atol[dtype])
            assert_allclose(misc.div_matvec(x_gpu, b_gpu).get(), x/b,
                            rtol=dtype_to_rtol[dtype],
                            atol=dtype_to_atol[dtype])
            assert_allclose(misc.div_matvec(x_gpu, c_gpu).get(), x/c,
                            rtol=dtype_to_rtol[dtype],
                            atol=dtype_to_atol[dtype])

        assert_raises(ValueError, misc.div_matvec, x_gpu, d_gpu)