Esempio n. 1
0
def test_specified_rng():
    from npdl.utils.random import get_rng
    from npdl.utils.random import set_rng
    from npdl.initializations import Normal
    from npdl.initializations import Uniform
    from npdl.initializations import GlorotNormal
    from npdl.initializations import GlorotUniform

    from numpy.random import RandomState
    from numpy import allclose

    shape = (10, 20)
    seed = 12345
    rng = get_rng()

    for test_cls in [Normal, Uniform, GlorotNormal, GlorotUniform]:
        set_rng(RandomState(seed))
        sample1 = test_cls().call(shape)
        set_rng(RandomState(seed))
        sample2 = test_cls().call(shape)
        # reset to original RNG for other tests
        set_rng(rng)
        assert allclose(sample1, sample2), \
            "random initialization was inconsistent " \
            "for {}".format(test_cls.__name__)
Esempio n. 2
0
 def call(self, size):
     flat_shape = (size[0], np.prod(size[1:]))
     a = get_rng().normal(loc=0., scale=1., size=flat_shape)
     u, _, v = np.linalg.svd(a, full_matrices=False)
     q = u if u.shape == flat_shape else v
     q = q.reshape(size)
     q = self.gain * q
     return _cast_dtype(q)
Esempio n. 3
0
 def forward(self, input, train=True, *args, **kwargs):
     if 0. < self.p < 1.:
         if train:
             self.last_mask = get_rng().binomial(1, 1 - self.p,
                                                 input.shape) / (1 - self.p)
             return input * self.last_mask
         else:
             return input * (1 - self.p)
     else:
         return input
Esempio n. 4
0
    def forward(self, input, train=True, *args, **kwargs):
        """Apply the forward pass transformation to the input data.

        Parameters
        ----------
        input : numpy.array
            input data
        train : bool
            is inference only

        Returns
        -------
        numpy.array
            output data
        """
        if 0. < self.p < 1.:
            if train:
                self.last_mask = get_rng().binomial(1, 1 - self.p, input.shape) / (1 - self.p)
                return input * self.last_mask
            else:
                return input * (1 - self.p)
        else:
            return input
Esempio n. 5
0
    def fit(self,
            X,
            Y,
            max_iter=100,
            batch_size=64,
            shuffle=True,
            validation_split=0.,
            validation_data=None,
            file=sys.stdout):

        # prepare data
        train_X = X.astype(get_dtype()) if np.issubdtype(np.float64,
                                                         X.dtype) else X
        train_Y = Y.astype(get_dtype()) if np.issubdtype(np.float64,
                                                         Y.dtype) else Y

        if 1. > validation_split > 0.:
            split = int(train_Y.shape[0] * validation_split)
            valid_X, valid_Y = train_X[-split:], train_Y[-split:]
            train_X, train_Y = train_X[:-split], train_Y[:-split]
        elif validation_data is not None:
            valid_X, valid_Y = validation_data
        else:
            valid_X, valid_Y = None, None

        iter_idx = 0
        while iter_idx < max_iter:
            iter_idx += 1

            # shuffle
            if shuffle:
                seed = get_rng().randint(111, 1111111)
                np.random.seed(seed)
                np.random.shuffle(train_X)
                np.random.seed(seed)
                np.random.shuffle(train_Y)

            # train
            train_losses, train_predicts, train_targets = [], [], []
            for b in range(train_Y.shape[0] // batch_size):
                batch_begin = b * batch_size
                batch_end = batch_begin + batch_size
                x_batch = train_X[batch_begin:batch_end]
                y_batch = train_Y[batch_begin:batch_end]

                # forward propagation
                y_pred = self.predict(x_batch)

                # backward propagation
                next_grad = self.loss.backward(y_pred, y_batch)
                for layer in self.layers[::-1]:
                    next_grad = layer.backward(next_grad)

                # get parameter and gradients
                params = []
                grads = []
                for layer in self.layers:
                    params += layer.params
                    grads += layer.grads

                # update parameters
                self.optimizer.update(params, grads)

                # got loss and predict
                train_losses.append(self.loss.forward(y_pred, y_batch))
                train_predicts.extend(y_pred)
                train_targets.extend(y_batch)

            # output train status
            runout = "iter %d, train-[loss %.4f, acc %.4f]; " % (
                iter_idx, float(np.mean(train_losses)),
                float(self.accuracy(train_predicts, train_targets)))

            # runout = "iter %d, train-[loss %.4f, ]; " % (
            #     iter_idx, float(np.mean(train_losses)))

            if valid_X is not None and valid_Y is not None:
                # valid
                valid_losses, valid_predicts, valid_targets = [], [], []
                for b in range(valid_X.shape[0] // batch_size):
                    batch_begin = b * batch_size
                    batch_end = batch_begin + batch_size
                    x_batch = valid_X[batch_begin:batch_end]
                    y_batch = valid_Y[batch_begin:batch_end]

                    # forward propagation
                    y_pred = self.predict(x_batch)

                    # got loss and predict
                    valid_losses.append(self.loss.forward(y_pred, y_batch))
                    valid_predicts.extend(y_pred)
                    valid_targets.extend(y_batch)

                # output valid status
                runout += "valid-[loss %.4f, acc %.4f]; " % (
                    float(np.mean(valid_losses)),
                    float(self.accuracy(valid_predicts, valid_targets)))

            print(runout, file=file)
Esempio n. 6
0
 def call(self, size):
     return _cast_dtype(get_rng().uniform(-self.scale,
                                          self.scale,
                                          size=size))
Esempio n. 7
0
 def call(self, size):
     return _cast_dtype(get_rng().normal(loc=self.mean,
                                         scale=self.std,
                                         size=size))