예제 #1
0
    def get_layer(self, load, name):
        """
        Get the actual values in array_like form from an abstract tensor.

        :param load: the loader object to iterate over
        :param name: the name of the tensor to evaluate for each point
        """
        tensor_name = "{}:0".format(name)
        tensor = tbn(tensor_name)
        layer = []
        labels = []
        for batch in load.iter_batches():

            feed = {
                tbn('x:0'): batch[0],
                tbn('y:0'): batch[0],
                tbn('is_training:0'): False
            }
            if len(batch) == 2:
                feed[tbn('batches:0')] = batch[1]
                labels.append(batch[1])

            [act] = self.sess.run([tensor], feed_dict=feed)
            layer.append(act)

        layer = np.concatenate(layer, axis=0)

        if labels:
            labels = np.concatenate(labels, axis=0)
            return layer, labels
        else:
            return layer
예제 #2
0
    def train(self, load, steps, batch_size=256):
        """
        Train SAUCIE.

        :param load: the loader object to yield batches from
        :param steps: the number of steps to scripts for
        :param batch_size: the number of points to scripts on in each step
        """
        start = self.iteration
        while (self.iteration - start) < steps:
            self.iteration += 1

            batch = load.next_batch(batch_size=batch_size)

            feed = {
                tbn('x:0'): batch[0],
                tbn('y:0'): batch[0],
                tbn('is_training:0'): True,
                tbn('learning_rate_tensor:0'): self.learning_rate
            }

            if len(batch) == 2:
                feed[tbn('batches:0')] = batch[1]

            # if using batch-correction, must have labels
            if (self.lambda_b and len(batch) < 2):
                raise Exception(
                    "If using lambda_b (batch correction), you must provide each point's batch as a label"
                )

            ops = [obn('train_op')]

            self.sess.run(ops, feed_dict=feed)
예제 #3
0
    def get_loss(self, load, batch_size=256):
        """
        Get the current losses over the dataset.

        :param load: the loader object to iterate over
        """
        losses = None

        for i, batch in enumerate(load.iter_batches(batch_size=batch_size)):

            feed = {
                tbn('x:0'): batch[0],
                tbn('y:0'): batch[0],
                tbn('is_training:0'): False
            }
            if len(batch) == 2:
                feed[tbn('batches:0')] = batch[1]

            batch_losses = self.sess.run(tf.get_collection('losses'),
                                         feed_dict=feed)

            if not losses:
                losses = batch_losses
            else:
                losses = [
                    loss + batch_loss
                    for loss, batch_loss in zip(losses, batch_losses)
                ]

        losses = [loss / float(i + 1) for loss in losses]
        lstring = ' '.join(['{:.3f}'.format(loss) for loss in losses])

        return lstring
예제 #4
0
    def _build_losses(self):
        """Build all the loss ops for the network."""
        self.loss_recon = 0.

        if self.lambda_b:
            with tf.variable_scope('reconstruction_mmd'):
                self._build_reconstruction_loss_mmd(self.reconstructed, self.x)
            with tf.variable_scope('batchcorrection'):
                self._build_reg_b()

        else:
            with tf.variable_scope('reconstruction'):
                self._build_reconstruction_loss(self.reconstructed, self.x)

        if self.lambda_c:
            with tf.variable_scope('clustering'):
                self.loss_c = 0

                act = tbn('layer_c:0')
                act = act / tf.reduce_max(act)
                self._build_reg_c(act)

        if self.lambda_d:
            with tf.variable_scope('intracluster_distances'):
                self._build_reg_d(act)

        self._build_total_loss()