Esempio n. 1
0
def test_randomly_replace_elements():
    for val in (0.0, 0.5, 5):
        for p in (0.1, 0.2, 0.5, 0.75, 0.99):
            X = np.random.normal(size=(1024, 2048)).astype(np.float32)
            Xd = op.to_gpu(X)
            Xr, M = op.randomly_replace_elements(X, p, val)
            assert (Xr is X)
            assert_almost_equal((X == val).mean(),
                                p,
                                decimal=2,
                                err_msg="val: %.1f p: %.1f" % (val, p))
            assert_almost_equal(M.mean(),
                                1 - p,
                                decimal=2,
                                err_msg="M val: %.1f p: %.1f" % (val, p))

            Xrd, Md = op.randomly_replace_elements(Xd, p, val)
            assert (Xrd is Xd)
            assert_almost_equal(op.to_cpu(op.mean(Xd == val)),
                                p,
                                decimal=2,
                                err_msg="val: %.1f p: %.1f (gpu)" % (val, p))
            assert_almost_equal(op.to_cpu(op.mean(Md)),
                                1 - p,
                                decimal=2,
                                err_msg="M val: %.1f p: %.1f (gpu)" % (val, p))
Esempio n. 2
0
def test_csrmm_bug():
    ''' the 2nd call might crash'''
    from scipy.sparse import csr_matrix
    W = np.random.normal(size=(5, 3)).astype(np.float32, order="c")
    X = np.random.laplace(size=(6, 3)).astype(np.float32)
    X[X<0.1] = 0
    X = csr_matrix(X, dtype=np.float32)

    Xd = GPUCSRArray(X)
    Wd = op.to_gpu(W)
    Cd = op.dot(Xd, Wd, False, True, out=None, stream=op.streams[0])
    op.add_dot(Cd, Xd, Wd, True, False, alpha=-0.3, beta=1.0, stream=op.streams[0])
    op.mean(Cd, axis=0, stream=op.streams[1])
Esempio n. 3
0
    def bprop(self, delta, momentum=0.0):
        op.streams[2].synchronize()  # make sure layer above is done
        self.dfunc(delta, self.A, self.Z, stream=op.streams[0])
        op.streams[0].synchronize()
        op.add_dot(delta,
                   self.X,
                   self.dW,
                   True,
                   False,
                   alpha=1.0 / delta.shape[0],
                   beta=momentum,
                   stream=op.streams[0])
        m = op.mean(delta, axis=0, stream=op.streams[1])
        op.add_vec(self.db, 1.0, m, beta=momentum, stream=op.streams[1])

        if self.l2_penalty > 0:
            op.add_vec(self.dW, self.l2_penalty, self.W, stream=op.streams[0])

        if not self.is_input_layer:
            if self.dropout > 0.0 and self.activation not in ("relu",
                                                              "sigmoid"):
                return op.dot(delta, self.W) * self.M
            else:
                return op.dot(delta, self.W)
        else:
            return 0.0
Esempio n. 4
0
def test_randomly_replace_elements():
    for val in (0.0, 0.5, 5):
        for p in (0.1, 0.2, 0.5, 0.75, 0.99):
            X = np.random.normal(size=(1024, 2048)).astype(np.float32)
            Xd = op.to_gpu(X)
            Xr, M = op.randomly_replace_elements(X, p, val)
            assert(Xr is X)
            assert_almost_equal((X == val).mean(), p, decimal=2,
                                err_msg="val: %.1f p: %.1f" % (val, p))
            assert_almost_equal(M.mean(), 1-p, decimal=2,
                                err_msg="M val: %.1f p: %.1f" % (val, p))

            Xrd, Md = op.randomly_replace_elements(Xd, p, val)
            assert(Xrd is Xd)
            assert_almost_equal(op.to_cpu(op.mean(Xd == val)), p, decimal=2,
                                err_msg="val: %.1f p: %.1f (gpu)" % (val, p))
            assert_almost_equal(op.to_cpu(op.mean(Md)), 1-p, decimal=2,
                                err_msg="M val: %.1f p: %.1f (gpu)" % (val, p))
Esempio n. 5
0
def test_csrmm_bug():
    ''' the 2nd call might crash'''
    from scipy.sparse import csr_matrix
    W = np.random.normal(size=(5, 3)).astype(np.float32, order="c")
    X = np.random.laplace(size=(6, 3)).astype(np.float32)
    X[X < 0.1] = 0
    X = csr_matrix(X, dtype=np.float32)

    Xd = GPUCSRArray(X)
    Wd = op.to_gpu(W)
    Cd = op.dot(Xd, Wd, False, True, out=None, stream=op.streams[0])
    op.add_dot(Cd,
               Xd,
               Wd,
               True,
               False,
               alpha=-0.3,
               beta=1.0,
               stream=op.streams[0])
    op.mean(Cd, axis=0, stream=op.streams[1])
Esempio n. 6
0
    def _get_score(self, target, pred):
        '''Calculates the quality of predictions of a model.

        Like sklearn, we follow the convention that higher return values are
        better than lower return values.'''

        if self.output == "softmax":
            # convert from 1hot
            if len(target.shape) != 1 and target.shape[1] != 1:
                target = op.to_cpu(op.argmax(target, 1))
            if len(pred.shape) != 1 and pred.shape[1] != 1:
                pred = op.to_cpu(op.argmax(pred, 1))
            acc = op.to_cpu(op.mean(pred == target))
            return float(acc)
        elif self.output == "sigmoid":
            # Note: this is meant for multitask learning, but for e.g.
            # using sigmoid+squarederror as multiclass problem, this will
            # give the wrong result!
            return op.to_cpu(op.mean(target == (pred > 0.5)))
        elif self.output == "linear":
            return -op.to_cpu(op.mean((target - pred)**2)) # negate by convention
        else:
            raise NotImplementedError()
Esempio n. 7
0
    def _get_score(self, target, pred):
        '''Calculates the quality of predictions of a model.

        Like sklearn, we follow the convention that higher return values are
        better than lower return values.'''

        if self.output == "softmax":
            # convert from 1hot
            if len(target.shape) != 1 and target.shape[1] != 1:
                target = op.to_cpu(op.argmax(target, 1))
            if len(pred.shape) != 1 and pred.shape[1] != 1:
                pred = op.to_cpu(op.argmax(pred, 1))
            acc = op.to_cpu(op.mean(pred == target))
            return float(acc)
        elif self.output == "sigmoid":
            # Note: this is meant for multitask learning, but for e.g.
            # using sigmoid+squarederror as multiclass problem, this will
            # give the wrong result!
            return op.to_cpu(op.mean(target == (pred > 0.5)))
        elif self.output == "linear":
            return -op.to_cpu(op.mean(
                (target - pred)**2))  # negate by convention
        else:
            raise NotImplementedError()
Esempio n. 8
0
    def bprop(self, delta, momentum=0.0):
        op.streams[2].synchronize()  # make sure layer above is done
        self.dfunc(delta, self.A, self.Z, stream=op.streams[0])
        op.streams[0].synchronize()
        op.add_dot(delta, self.X, self.dW, True, False,
                  alpha=1.0/delta.shape[0], beta=momentum, stream=op.streams[0])
        m = op.mean(delta, axis=0, stream=op.streams[1])
        op.add_vec(self.db, 1.0, m, beta=momentum, stream=op.streams[1])

        if self.l2_penalty > 0:
            op.add_vec(self.dW, self.l2_penalty, self.W, stream=op.streams[0])

        if not self.is_input_layer:
            return op.dot(delta, self.W, stream=op.streams[2])
        else:
            return 0.0