예제 #1
0
    def train_epoch(self, network):
        self._setup(network)
        losses = []

        X_batch = batch_iterator(network.X, network.batch_size)
        y_batch = batch_iterator(network.y, network.batch_size)
        for X, y in tqdm(zip(X_batch, y_batch), 'Epoch progress'):
            loss = np.mean(network.update(X, y))
            self.update(network)
            losses.append(loss)
        epoch_loss = np.mean(losses)
        return epoch_loss
예제 #2
0
    def _train(self):

        for i in range(self.max_epochs):
            error = 0
            for batch in batch_iterator(self.X, batch_size=self.batch_size):
                positive_hidden = sigmoid(np.dot(batch, self.W) + self.bias_h)
                hidden_states = self._sample(positive_hidden)
                positive_associations = np.dot(batch.T, positive_hidden)

                negative_visible = sigmoid(
                    np.dot(hidden_states, self.W.T) + self.bias_v)
                negative_hidden = sigmoid(
                    np.dot(negative_visible, self.W) + self.bias_h)
                negative_associations = np.dot(negative_visible.T,
                                               negative_hidden)

                lr = self.lr / float(batch.shape[0])
                self.W += lr * (
                    (positive_associations - negative_associations) /
                    float(self.batch_size))
                self.bias_h += lr * (negative_hidden.sum(axis=0) -
                                     negative_associations.sum(axis=0))
                self.bias_v += lr * (np.asarray(batch.sum(axis=0)).squeeze() -
                                     negative_visible.sum(axis=0))

                error += np.sum((batch - negative_visible)**2)

            self.errors.append(error)
            logging.info('Iteration %s, error %s' % (i, error))
        logging.debug('Weights: %s' % self.W)
        logging.debug('Hidden bias: %s' % self.bias_h)
        logging.debug('Visible bias: %s' % self.bias_v)
예제 #3
0
    def _train(self):
        """Use CD-1 training procedure, basically an exact inference for `positive_associations`,
        followed by a "non burn-in" block Gibbs Sampling for the `negative_associations`."""

        for i in range(self.max_epochs):
            error = 0
            for batch in batch_iterator(self.X, batch_size=self.batch_size):
                positive_hidden = sigmoid(np.dot(batch, self.W) + self.bias_h)
                hidden_states = self._sample(positive_hidden)  # sample hidden state h1
                positive_associations = np.dot(batch.T, positive_hidden)

                negative_visible = sigmoid(np.dot(hidden_states, self.W.T) + self.bias_v)
                negative_visible = self._sample(negative_visible)  # use the samped hidden state h1 to sample v1
                negative_hidden = sigmoid(np.dot(negative_visible, self.W) + self.bias_h)
                negative_associations = np.dot(negative_visible.T, negative_hidden)

                lr = self.lr / float(batch.shape[0])
                self.W += lr * ((positive_associations - negative_associations) / float(self.batch_size))
                self.bias_h += lr * (negative_hidden.sum(axis=0) - negative_associations.sum(axis=0))
                self.bias_v += lr * (np.asarray(batch.sum(axis=0)).squeeze() - negative_visible.sum(axis=0))

                error += np.sum((batch - negative_visible) ** 2)

            self.errors.append(error)
            logging.info("Iteration %s, error %s" % (i, error))
        logging.debug("Weights: %s" % self.W)
        logging.debug("Hidden bias: %s" % self.bias_h)
        logging.debug("Visible bias: %s" % self.bias_v)
예제 #4
0
    def _predict(self, X=None):
        if not self._initialized:
            self._setup_layers(X.shape)

        y = []
        X_batch = batch_iterator(X, self.batch_size)
        for Xb in X_batch:
            y.append(self.fprop(Xb))
        return np.concatenate(y)
예제 #5
0
    def train_epoch(self, network):
        losses = []

        # Create batch iterator
        X_batch = batch_iterator(network.X, network.batch_size)
        y_batch = batch_iterator(network.y, network.batch_size)

        batch = zip(X_batch, y_batch)
        if network.verbose:
            batch = tqdm(batch, total=int(np.ceil(network.n_samples / network.batch_size)))

        for X, y in batch:
            loss = np.mean(network.update(X, y))
            self.update(network)
            losses.append(loss)

        epoch_loss = np.mean(losses)
        return epoch_loss
예제 #6
0
    def _predict(self, X=None):
        if not self._initialized:
            self._setup_layers(X.shape)

        y = []
        X_batch = batch_iterator(X, self.batch_size)
        for Xb in X_batch:
            y.append(self.fprop(Xb))
        return np.concatenate(y)
예제 #7
0
    def train_epoch(self, network):
        losses = []

        # Create batch iterator
        X_batch = batch_iterator(network.X, network.batch_size)
        y_batch = batch_iterator(network.y, network.batch_size)

        batch = zip(X_batch, y_batch)
        if network.verbose:
            batch = tqdm(batch)

        for X, y in batch:
            loss = np.mean(network.update(X, y))
            self.update(network)
            losses.append(loss)

        epoch_loss = np.mean(losses)
        return epoch_loss
 def _predict(self, X=None):
     y = []
     X_batch = batch_iterator(X, self.batch_size)
     for Xb in X_batch:
         y.append(self.fprop(Xb))
     return np.concatenate(y)