Пример #1
0
    def save_img_step(data):
        # Train on data as usual
        result = train_step_o(data)

        # Save input and output images
        data = data_adapter.expand_1d(data)
        x, y_true, w = data_adapter.unpack_x_y_sample_weight(data)

        y_pred = model(x, training=False)

        in_batch = x.numpy()
        out_batch = y_pred[0].numpy()
        truth_batch = y_true['output'].numpy()

        for i, (img, mask,
                truth) in enumerate(zip(in_batch, out_batch, truth_batch)):
            img_path = str(Path(img_dir, f"input_{i}.jpg"))
            mask_path = str(Path(img_dir, f"label_{i}.jpg"))
            truth_path = str(Path(img_dir, f"truth_{i}.jpg"))
            tf.keras.preprocessing.image.save_img(img_path,
                                                  (img * 255).astype(int))

            mask_img = labels.out_array_to_label_img(mask)
            tf.keras.preprocessing.image.save_img(mask_path, mask_img)

            truth_img = labels.out_array_to_label_img(truth)
            tf.keras.preprocessing.image.save_img(truth_path, truth_img)

        return result
    def train_step(self, data):
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)

        with backprop.GradientTape() as tape:
            y_pred = self(x, training=True)

            mask = tf.not_equal(y, -1)
            y = tf.expand_dims(tf.boolean_mask(y, mask), 1)
            y_pred = tf.boolean_mask(y_pred, mask)

            sample_weight = tf.ones_like(y_pred)
            loss = self.compiled_loss(y,
                                      y_pred,
                                      sample_weight,
                                      regularization_losses=self.losses)

        _minimize(
            self.distribute_strategy,
            tape,
            self.optimizer,
            loss,
            self.trainable_variables,
        )

        self.compiled_metrics.update_state(y, y_pred, sample_weight)
        return {m.name: m.result() for m in self.metrics}
Пример #3
0
    def predict_step(self, data):
        """Perform an inference and returns the boxes, scores and labels associated.
        Background is discarded the max and argmax operation are performed.
        It means that if background was predicted the second maximum score would
        be outputed.

        Example: background + 3 classes
        [0.54, 0.40, 0.03, 0.03] => score = 0.40, label = 0 (1 - 1)


        "To optimize for AP, we override the prediction of these slots
        with the second highest scoring class, using the corresponding confidence"
        Part 4. Experiments of Object Detection with Transformers

        Returns:
            boxes: A Tensor of shape [batch_size, self.num_queries, (y1,x1,y2,x2)]
                containing the boxes with the coordinates between 0 and 1.
            scores: A Tensor of shape [batch_size, self.num_queries] containing
                the score of the boxes.
            classes: A Tensor of shape [batch_size, self.num_queries]
                containing the class of the boxes [0, num_classes).
        """
        data = data_adapter.expand_1d(data)
        x, _, _ = data_adapter.unpack_x_y_sample_weight(data)
        y_pred = self(x, training=False)
        boxes_without_padding, scores, labels = detr_postprocessing(
            y_pred[BoxField.BOXES],
            y_pred[BoxField.SCORES],
            x[DatasetField.IMAGES_INFO],
            tf.shape(x[DatasetField.IMAGES])[1:3],
        )
        return boxes_without_padding, scores, labels
Пример #4
0
    def train_step(self, data):
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)

        with backprop.GradientTape() as tape:
            x_mu, x_q, r, kl = self(x, training=True)

            decayed_sigma = self.sigma(self.optimizer.iterations)
            output_dist = tfp.distributions.Normal(loc=x_mu,
                                                   scale=decayed_sigma)
            log_likelihood = output_dist.log_prob(x_q, name="ll")
            log_likelihood = tf.reduce_logsumexp(log_likelihood,
                                                 name="reduce_ll")

            output_dist_const_var = tfp.distributions.Normal(
                loc=x_mu, scale=self.const_sigma)
            log_likelihood_const_var = output_dist_const_var.log_prob(
                x_q, name="llc")
            log_likelihood_const_var = tf.reduce_logsumexp(
                log_likelihood_const_var, name="reduce_llc")

            # ELBO (evidence lower bound): log_likelihood - kl (== -loss)
            loss = tf.subtract(kl, log_likelihood, name="loss")

        self.optimizer.minimize(loss, self.trainable_variables, tape=tape)

        return {
            "loss": loss,
            "kl": kl,
            "ll": log_likelihood,
            "llc": log_likelihood_const_var,
            "sigma": decayed_sigma,
        }
Пример #5
0
 def _process_input_data(x, y, sample_weight, model):
     iterator = data_adapter.single_batch_iterator(
         model.distribute_strategy, x, y, sample_weight, class_weight=None)
     data = next(iterator)
     data = data_adapter.expand_1d(data)
     x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
     return x, y, sample_weight
Пример #6
0
 def train_step(self, data):
     data =  data_adapter.expand_1d(data)
     x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
     if x.shape[1] is not None:
         rho = self.call_train(x, y)
         self.qm.weights[0].assign_add(rho)
     return {}
Пример #7
0
    def train_step(self, data):
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
        x, y, sample_weight = data_adapter.expand_1d((x, y, sample_weight))

        with backprop.GradientTape() as tape:
            y_pred = self(x, training=True)
            loss = self.compiled_loss(y,
                                      y_pred,
                                      sample_weight,
                                      regularization_losses=self.losses)
        self.compiled_metrics.update_state(y, y_pred, sample_weight)

        if isinstance(self.optimizer, (list, tuple)):
            linear_vars = self.linear_model.trainable_variables
            dnn_vars = self.dnn_model.trainable_variables
            linear_grads, dnn_grads = tape.gradient(loss,
                                                    (linear_vars, dnn_vars))

            linear_optimizer = self.optimizer[0]
            dnn_optimizer = self.optimizer[1]
            linear_optimizer.apply_gradients(zip(linear_grads, linear_vars))
            dnn_optimizer.apply_gradients(zip(dnn_grads, dnn_vars))
        else:
            trainable_variables = self.trainable_variables
            grads = tape.gradient(loss, trainable_variables)
            self.optimizer.apply_gradients(zip(grads, trainable_variables))

        return {m.name: m.result() for m in self.metrics}
Пример #8
0
    def test_step(self, data):
        """The logic for one evaluation step.
        This method can be overridden to support custom evaluation logic.
        This method is called by `Model.make_test_function`.
        This function should contain the mathemetical logic for one step of
        evaluation.
        This typically includes the forward pass, loss calculation, and metrics
        updates.
        Configuration details for *how* this logic is run (e.g. `tf.function` and
        `tf.distribute.Strategy` settings), should be left to
        `Model.make_test_function`, which can also be overridden.
        Arguments:
          data: A nested structure of `Tensor`s.
        Returns:
          A `dict` containing values that will be passed to
          `tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the
          values of the `Model`'s metrics are returned.
        """
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)

        # Updates stateful loss metrics.
        self.assigned_inputs = x
        with backprop.GradientTape(persistent=True) as tape:  #?
            self.tape_handler = tape
            tape.watch(x)
            y_pred = self(x, training=False)
            loss = self.compiled_loss(y,
                                      y_pred,
                                      sample_weight,
                                      regularization_losses=self.losses)

        self.compiled_metrics.update_state(y, y_pred, sample_weight)
        return {m.name: m.result() for m in self.metrics}
Пример #9
0
    def compute_gradients(self, data):
        """The logic for gradient computation of one training step.

        This method can be overridden to support custom training logic.
        This method is called by `Model.make_train_function`.

        This method should contain the mathemetical logic for one step of training.
        This typically includes the forward pass, loss calculation, backpropagation,
        and metric updates.

        Configuration details for *how* this logic is run (e.g. `tf.function` and
        `tf.distribute.Strategy` settings), should be left to
        `Model.make_train_function`, which can also be overridden.

        Arguments:
          data: A nested structure of `Tensor`s.

        Returns:
          A list of gradients corresponding to `Model.trainable_variables`.

        """
        # These are the only transformations `Model.fit` applies to user-input
        # data when a `tf.data.Dataset` is provided. These utilities will be exposed
        # publicly.
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)

        with backprop.GradientTape() as tape:
            y_pred = self(x, training=True)
            loss = self.compiled_loss(
                y, y_pred, sample_weight, regularization_losses=self.losses)

        # For custom training steps, users can just write:
        #   trainable_variables = self.trainable_variables
        #   gradients = tape.gradient(loss, trainable_variables)
        #   self.optimizer.apply_gradients(zip(gradients, trainable_variables))
        # The _minimize call does a few extra steps unnecessary in most cases,
        # such as loss scaling and gradient clipping.
        with tape:
            if isinstance(self.optimizer, lso.LossScaleOptimizer):
                loss = self.optimizer.get_scaled_loss(loss)
            gradients = tape.gradient(loss, self.trainable_variables)

        def _zeros_grads():
            zero_gradients = []
            for gradient in gradients:
                if isinstance(gradient, ops.IndexedSlices):
                    zero_gradients.append(ops.IndexedSlices(
                        tf.zeros_like(gradient.values),
                        gradient.indices,
                        dense_shape=gradient.dense_shape))
                else:
                    zero_gradients.append(tf.zeros_like(gradient))
            return zero_gradients

        gradients = tf.cond(tf.math.reduce_all(tf.math.is_finite(loss)),
                            lambda: gradients, _zeros_grads)

        self.compiled_metrics.update_state(y, y_pred, sample_weight)
        return gradients, {m.name: m.result() for m in self.metrics}
Пример #10
0
    def test_step(self, data):
        """The logic for one evaluation step.

        Standard prediction is performmed with one sample. To
        accommodate variational inference, the log probability of the
        data is computed by averaging over samples from the model:
        p(heldout | train) = int_model p(heldout|model) p(model|train)
                          ~= 1/n * sum_{i=1}^n p(heldout | model_i)
        where model_i is a draw from the posterior p(model|train).

        Arguments:
            data: A nested structure of `Tensor`s.

        Returns:
            A `dict` containing values that will be passed to
            `tf.keras.callbacks.CallbackList.on_train_batch_end`.
            Typically, the values of the `Model`'s metrics are
            returned.

        """
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
        # NOTE The first dimension of the Tensor returned from calling the
        # model is assumed to be `sample_size`. If this is a singleton
        # dimension, taking the mean is equivalent to a squeeze
        # operation.
        y_pred = tf.reduce_mean(self(x, training=False), axis=0)
        self.compiled_loss(y,
                           y_pred,
                           sample_weight,
                           regularization_losses=self.losses)
        self.compiled_metrics.update_state(y, y_pred, sample_weight)
        return {m.name: m.result() for m in self.metrics}
    def test_step(self, data):
        data = data_adapter.expand_1d(data)
        images_crops, y, sample_weight = data_adapter.unpack_x_y_sample_weight(
            data)

        total_number_crops = tf.shape(images_crops)[0]
        distinct_images = total_number_crops / 10  # there are 10 crops per image
        # Segments will look like this: [0, 0, ..., 1, 1, ...].
        segments = tf.repeat(tf.range(0, distinct_images, 1, dtype=tf.int32),
                             repeats=10)

        y_pred_crops = self.model(images_crops, training=False)
        # I segment to get the mean score per 10 crops for each image. Check the testing preprocessing as well.
        y_pred = tf.math.segment_mean(y_pred_crops, segments)
        y = tf.math.segment_max(
            y, segments
        )  # I segment y based on same rules to have the same final shape

        # Updates stateful loss metrics.
        self.compiled_loss(y,
                           y_pred,
                           sample_weight,
                           regularization_losses=self.losses)
        self.compiled_metrics.update_state(y, y_pred, sample_weight)
        return {m.name: m.result() for m in self.metrics}
Пример #12
0
    def custom_test_step(self, data):
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)

        y_pred = self.keras_model(x, training=False)
        # Updates stateful loss metrics.
        temploss = self._output_loss(y_pred[1], x['labels_err'])
        self.keras_model.compiled_loss._losses = temploss
        self.keras_model.compiled_loss._losses = nest.map_structure(
            self.keras_model.compiled_loss._get_loss_object,
            self.keras_model.compiled_loss._losses)
        self.keras_model.compiled_loss._losses = nest.flatten(
            self.keras_model.compiled_loss._losses)

        self.keras_model.compiled_loss(
            y,
            y_pred,
            sample_weight,
            regularization_losses=self.keras_model.losses)

        self.keras_model.compiled_metrics.update_state(y, y_pred,
                                                       sample_weight)
        # Collect metrics to return
        return_metrics = {}

        for metric in self.keras_model.metrics:
            result = metric.result()
            if isinstance(result, dict):
                return_metrics.update(result)
            else:
                return_metrics[metric.name] = result
        return return_metrics
Пример #13
0
    def train_step(self, data):
        data = data_adapter.expand_1d(data)
        noise_img, clean_img, sample_weight = data_adapter.unpack_x_y_sample_weight(
            data)
        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
            generated_img = self._generator(noise_img, training=True)
            real_output = self._discriminator(clean_img, training=True)
            gen_output = self._discriminator(generated_img, training=True)

            disc_loss = self.disc_loss_fn(real_output, gen_output)
            gen_loss = self.gen_loss_fn(generated_img, clean_img, gen_output)

            gp_penalty = self.gradient_penalty(clean_img, generated_img)
            disc_loss += gp_penalty * self.gp_weight

        gen_gradient = gen_tape.gradient(gen_loss,
                                         self._generator.trainable_variables)
        disc_gradient = disc_tape.gradient(
            disc_loss, self._discriminator.trainable_variables)
        self.gen_optimizer.apply_gradients(
            zip(gen_gradient, self._generator.trainable_variables))
        self.disc_optimizer.apply_gradients(
            zip(disc_gradient, self._discriminator.trainable_variables))
        mae = tf.reduce_mean(
            tf.keras.metrics.mean_absolute_error(clean_img, generated_img))

        return {
            'generator_loss': gen_loss,
            'generator_mae': mae,
            'discriminator_loss': disc_loss
        }
Пример #14
0
    def train_step(self, data):
        """override parent train_step, see description in parent
        Arguments:
          data: A nested structure of `Tensor`s.

        Returns:
          A `dict` containing values that will be passed to
          `tf.keras.callbacks.CallbackList.on_train_batch_end`. Typically, the
          values of the `Model`'s metrics are returned. Example:
          `{'loss': 0.2, 'accuracy': 0.7}`.

        """
        # These are the only transformations `Model.fit` applies to user-input
        # data when a `tf.data.Dataset` is provided. These utilities will be exposed
        # publicly.
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)

        with backprop.GradientTape() as tape:
            y_pred = self(x, training=True)
            loss = self.compiled_loss(y,
                                      y_pred,
                                      sample_weight,
                                      regularization_losses=self.losses)

        gradients = tape.gradient(loss, self.trainable_variables)
        self.optimizer.apply_gradients(zip(gradients,
                                           self.trainable_variables))

        self.backwards(list(zip(gradients, self.trainable_variables)))

        self.compiled_metrics.update_state(y, y_pred, sample_weight)
        return {m.name: m.result() for m in self.metrics}
Пример #15
0
 def call_custom_callbacks(original_data):
     data = data_adapter.expand_1d(original_data)
     x, y_true, w = data_adapter.unpack_x_y_sample_weight(data)
     y_pred = keras_model(x, training=True)
     result = original_train_step(original_data)
     # custom stuff called during training
     show_pygame.show(x, y_true, y_pred)
     return result
Пример #16
0
 def train_step(self, data):
     data =  data_adapter.expand_1d(data)
     x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
     rhos = self.call_train(x, y)
     if x.shape[1] is not None:
         for i in range(self.num_classes):
             self.qmd[i].weights[0].assign_add(rhos[i])
     return {}
Пример #17
0
    def _test_step_unsupervised(self, data):
        x, sample_weight, _ = data_adapter.unpack_x_y_sample_weight(data)
        out = self(x, training=False)
        # Updates stateful loss metrics.
        dummy_target = tf.stop_gradient(out)
        self.compiled_loss(dummy_target, out, sample_weight, regularization_losses=self.losses)

        self.compiled_metrics.update_state(dummy_target, out, sample_weight)
        return {m.name: m.result() for m in self.metrics}
Пример #18
0
    def predict_step(self, data):
        """override parent inference step, support return y label together
        """
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)

        y_pred = self(x, training=False)

        return y, y_pred, sample_weight
Пример #19
0
    def test_step(self, data):
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)

        y_pred = self(x, training=False)
        self.compiled_loss(
            y, y_pred, sample_weight, regularization_losses=self.losses)

        self.update_metrics(y_pred, y, sample_weight)

        return self.get_metrics_dict()
Пример #20
0
    def train_step(self, data):
        """
        Overwrite function for the Keras model indicating how a train step
        will operate.

        :param Tuple[np.ndarray, Tuple[np.ndarray, np.ndarray]] data: The input
            training data is expected to be provided in the form
            (input_features, (true_labels, true_concepts)).
        """
        # Massage the data
        data = data_adapter.expand_1d(data)
        input_features, (true_labels, true_concepts), sample_weight = \
            data_adapter.unpack_x_y_sample_weight(data)
        with tf.GradientTape() as tape:
            # Obtain a prediction of labels and concepts
            predicted_labels, predicted_concepts, extra_losses = self._call_fn(
                input_features
            )
            # Compute the actual losses
            task_loss, concept_loss, concept_accuracy = self._compute_losses(
                predicted_labels=predicted_labels,
                predicted_concepts=predicted_concepts,
                true_labels=true_labels,
                true_concepts=true_concepts,
            )
            # Accumulate both the concept and task-specific loss into a single
            # value
            total_loss = (
                task_loss +
                self.alpha * concept_loss
            )
            # And include any extra losses coming from this process
            for extra_loss in extra_losses:
                total_loss += extra_loss

        num_concepts = (
            len(predicted_concepts) if isinstance(predicted_concepts, list) else
            predicted_concepts.shape[-1]
        )
        grads = tape.gradient(total_loss, self.trainable_weights)
        self.optimizer.apply_gradients(zip(grads, self.trainable_weights))
        self.total_loss_tracker.update_state(total_loss, sample_weight)
        self.task_loss_tracker.update_state(task_loss, sample_weight)
        self.concept_loss_tracker.update_state(concept_loss, sample_weight)
        self.concept_accuracy_tracker.update_state(
            concept_accuracy,
            sample_weight,
        )
        for metric in self.extra_metrics:
            metric.update_state(true_labels, predicted_labels, sample_weight)
        return {
            metric.name: metric.result()
            for metric in self.metrics
        }
Пример #21
0
 def validate_fn(model, epoch=None):
     val_x, val_y, val_sample_weight = (
         data_adapter.unpack_x_y_sample_weight(validation_data))
     val_logs = model.evaluate(x=val_x,
                               y=val_y,
                               sample_weight=val_sample_weight,
                               batch_size=None,
                               steps=validation_steps,
                               callbacks=callbacks,
                               return_dict=True)
     return result_dict_to_val_metric_fn(val_logs)
Пример #22
0
    def train_step(self, data):
        data = data_adapter.expand_1d(data)
        input_data = data_adapter.unpack_x_y_sample_weight(data)
        with tf.GradientTape() as tape:
            y_pred = self(data, training=True)
            loss_value = self.my_loss(input_data[0][0], y_pred,
                                      input_data[0][1], input_data[0][2])

        grads = tape.gradient(loss_value, self.trainable_variables)
        self.optimizer.apply_gradients(zip(grads, self.trainable_variables))

        return {"training_loss": loss_value}
Пример #23
0
    def test_step(self, data):
        data = data_adapter.expand_1d(data)
        x, ground_truths, _ = data_adapter.unpack_x_y_sample_weight(data)

        # To compute the loss we need to get the results of each decoder layer
        # Setting training to True will provide it
        y_pred = self(x, training=True)
        input_shape = tf.cast(tf.shape(x[DatasetField.IMAGES])[1:3], self.compute_dtype)
        loss = self.compute_loss(ground_truths, y_pred, input_shape)
        loss += self.compiled_loss(None, y_pred, None, regularization_losses=self.losses)
        self.loss_metric.update_state(loss)
        return {m.name: m.result() for m in self.metrics}
Пример #24
0
    def predict_step(self, data):
        data = data_adapter.expand_1d(data)
        x, _, _ = data_adapter.unpack_x_y_sample_weight(data)

        classification_pred, localization_pred, rois = self(x, training=False)

        # Remove the background classes
        classification_pred = classification_pred[:, :, 1:]
        return post_process_fast_rcnn_boxes(classification_pred,
                                            localization_pred, rois,
                                            x[DatasetField.IMAGES_INFO],
                                            self.num_classes)
Пример #25
0
    def _fun_generator(self, x, iterator):
        """ Function optimized by scipy minimize.

            Returns function cost and gradients for all trainable variables.
        """
        model = self.model
        self._update_weights(x)
        losses = []

        dataset = iterator._dataset  # pylint:disable=protected-access
        assert dataset is not None
        iterator = iter(dataset)

        size = dataset.cardinality().numpy()
        if size > 0:
            n_steps = (size + dataset.batch_size - 1) // dataset.batch_size
        else:
            n_steps = None

        progbar = keras.utils.Progbar(n_steps, verbose=self.verbose)

        with tf.GradientTape() as tape:
            for step, data in enumerate(iterator):
                data = data_adapter.expand_1d(data)
                x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(
                    data)
                y_pred = self.func(x, training=True)
                loss = model.compiled_loss(y,
                                           y_pred,
                                           sample_weight,
                                           regularization_losses=model.losses)
                progbar.update(step, [('loss', loss.numpy())])
                losses.append(loss)
            xloss = tf.reduce_mean(tf.stack(losses))
            grads = tape.gradient(xloss, model.trainable_variables)

        cost = xloss.numpy()

        if all(isinstance(x, tf.Tensor) for x in grads):
            xgrads = np.concatenate([x.numpy().reshape(-1) for x in grads])
            return cost, xgrads

        if all(isinstance(x, tf.IndexedSlices) for x in grads):
            xgrad_list = []
            for var, grad in zip(model.trainable_variables, grads):
                value = tf.Variable(np.zeros(var.shape), dtype=var.dtype)
                value.assign_add(grad)
                xgrad_list.append(value.numpy())
            xgrads = np.concatenate([x.reshape(-1) for x in xgrad_list])
            return cost, xgrads

        raise NotImplementedError()
        return -1, np.array([])  # pylint:disable=unreachable
Пример #26
0
    def simulate(self, docket, session_id=None, batch_size=None):
        """Stochastically simulate similarity judgments.

        Arguments:
            docket: A RankDocket object representing the
                to-be-judged trials. The order of the stimuli in the
                stimulus set is ignored for the simulations.
            session_id: An integer array indicating the session ID of a
                trial. It is assumed that all IDs are non-negative.
                Trials with different session IDs were obtained during
                different sessions.
            batch_size (optional): If None, `batch_size` is equal to
                the total number of trials.

        Returns:
            RankObservations object representing the judged trials. The
                order of the stimuli is now informative.

        """
        if batch_size is None:
            batch_size = docket.n_trial

        agent_id = self.agent_id * np.ones((docket.n_trial), dtype=np.int32)
        group_id = self.group_id * np.ones((docket.n_trial), dtype=np.int32)

        # Create TF dataset.
        group = np.stack((group_id, agent_id), axis=-1)
        ds_docket = docket.as_dataset(group, all_outcomes=True).batch(
            batch_size, drop_remainder=False)

        # Call model with TensorFlow formatted docket and
        # stochastically sample an outcome.
        stimulus_set = None
        for data in ds_docket:
            data = data_adapter.expand_1d(data)
            x, _, _ = data_adapter.unpack_x_y_sample_weight(data)

            batch_stimulus_set = _rank_sample(x['stimulus_set'],
                                              self.model(x, training=False))
            if stimulus_set is None:
                stimulus_set = [batch_stimulus_set]
            else:
                stimulus_set.append(batch_stimulus_set)
        stimulus_set = tf.concat(stimulus_set, axis=0).numpy() - 1

        obs = RankObservations(stimulus_set,
                               n_select=docket.n_select,
                               is_ranked=docket.is_ranked,
                               group_id=group_id,
                               agent_id=agent_id,
                               session_id=session_id)
        return obs
Пример #27
0
    def train_step(self, data):
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)

        with backprop.GradientTape() as tape:
            y_pred = self(x, training=True)
            loss = self.compiled_loss(y, y_pred, sample_weight, regularization_losses=self.losses)
        _minimize(self.distribute_strategy, tape, self.optimizer, loss,
                  self.trainable_variables)

        self.update_metrics(y_pred, y, sample_weight, train=True)

        return self.get_metrics_dict()
Пример #28
0
    def _train_step_unsupervised(self, data):
        x, sample_weight, _ = data_adapter.unpack_x_y_sample_weight(data)
        with tf.GradientTape() as tape:
            out = self(x, training=True)
            dummy_target = tf.stop_gradient(out)
            loss = self.compiled_loss(dummy_target, out, sample_weight, regularization_losses=self.losses)

        trainable_variables = self.trainable_variables
        gradients = tape.gradient(loss, trainable_variables)
        self.optimizer.apply_gradients(zip(gradients, trainable_variables))

        self.compiled_metrics.update_state(dummy_target, out, sample_weight)
        return {m.name: m.result() for m in self.metrics}
Пример #29
0
    def train_step(self, data):
        data = data_adapter.expand_1d(data)
        x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)

        with backprop.GradientTape() as tape:
            y_pred = self(x, training=True)
            loss0 = tf.reduce_sum(self.losses)
            loss1, loss2, loss3 = self.loss(y, y_pred)
            total_loss = tf.reduce_sum([loss0, loss1, loss2, loss3])
        grads = tape.gradient(total_loss, self.trainable_variables)
        self.optimizer.apply_gradients(zip(grads, self.trainable_variables))
        self.compiled_metrics.update_state(y, y_pred)
        return {m.name: m.result() for m in self.metrics}
Пример #30
0
 def _process_input_data(x, y, sample_weight):
     if isinstance(input_data, tf.data.Dataset):
         iterator = tf.compat.v1.data.make_one_shot_iterator(x)
     else:
         iterator = data_adapter.single_batch_iterator(
             model.distribute_strategy,
             x,
             y,
             sample_weight,
             class_weight=None)
     data = next(iterator)
     data = data_adapter.expand_1d(data)
     x, y, sample_weight = data_adapter.unpack_x_y_sample_weight(data)
     return x, y, sample_weight