def update_model(self, states: str, actions: str, q_vals_target: str) -> None:
        """
        Takes in states, actions, and target q values. Updates the model:

            Runs the forward pass, computing Q(states, actions).
                Q(states, actions)[i][j] is an approximation of Q*(states[i], action_j).
            Comptutes Loss of Q(states, actions) with respect to q_vals_targets
            Updates Q Network's weights according to loss and optimizer

        :param states: Numpy array with shape (batch_size, state_dim). The ith
            row is a representation of the ith transition's state.
        :param actions: Numpy array with shape (batch_size, action_dim). The ith
            row contains the one-hotted representation of the ith action.
        :param q_vals_targets: Numpy array with shape (batch_size, 1). The ith
            row is the label to train against for the data from the ith transition.
        """
        model = C2.model()
        q_vals_target = C2.StopGradient(q_vals_target)
        output_blob = C2.NextBlob("train_output")
        if self.conv_ml_trainer is not None:
            conv_output_blob = C2.NextBlob("conv_output")
            self.conv_ml_trainer.make_conv_pass_ops(model, states, conv_output_blob)
            states = conv_output_blob

        self.ml_trainer.make_forward_pass_ops(model, states, output_blob, False)
        q_val_select = C2.ReduceBackSum(C2.Mul(output_blob, actions))
        q_values = C2.ExpandDims(q_val_select, dims=[1])

        self.loss_blob = self.ml_trainer.generateLossOps(model, q_values, q_vals_target)
        model.AddGradientOperators([self.loss_blob])
        for param in model.params:
            if param in model.param_to_grad:
                param_grad = model.param_to_grad[param]
                param_grad = C2.NanCheck(param_grad)
        self.ml_trainer.addParameterUpdateOps(model)
예제 #2
0
    def update_model(
        self,
        states: str,
        actions: str,
        q_vals_target: str,
    ) -> None:
        """
        Takes in states, actions, and target q values. Updates the model:

            Runs the forward pass, computing Q(states, actions).
                Q(states, actions)[i][j] is an approximation of Q*(states[i], action_j).
            Comptutes Loss of Q(states, actions) with respect to q_vals_targets
            Updates Q Network's weights according to loss and optimizer

        :param states: Numpy array with shape (batch_size, state_dim). The ith
            row is a representation of the ith transition's state.
        :param actions: Numpy array with shape (batch_size, action_dim). The ith
            row contains the one-hotted representation of the ith action.
        :param q_vals_targets: Numpy array with shape (batch_size, 1). The ith
            row is the label to train against for the data from the ith transition.
        """
        model = C2.model()
        q_vals_target = C2.StopGradient(q_vals_target)
        output_blob = C2.NextBlob("train_output")
        MakeForwardPassOps(
            model,
            self.model_id,
            states,
            output_blob,
            self.weights,
            self.biases,
            self.activations,
            self.layers,
            self.dropout_ratio,
            False,
        )
        q_val_select = C2.ReduceBackSum(C2.Mul(output_blob, actions))
        q_values = C2.ExpandDims(q_val_select, dims=[1])

        self.loss_blob = GenerateLossOps(
            model,
            q_values,
            q_vals_target,
        )
        model.AddGradientOperators([self.loss_blob])
        for param in model.params:
            if param in model.param_to_grad:
                param_grad = model.param_to_grad[param]
                param_grad = C2.NanCheck(param_grad)
        AddParameterUpdateOps(
            model,
            optimizer_input=self.optimizer,
            base_learning_rate=self.learning_rate,
            gamma=self.gamma,
            policy=self.lr_policy,
        )
예제 #3
0
    def normalize_sparse_matrix(
        self,
        lengths_blob: str,
        keys_blob: str,
        values_blob: str,
        normalization_parameters: Dict[int, NormalizationParameters],
        blobname_prefix: str,
        split_sparse_to_dense: bool,
        split_expensive_feature_groups: bool,
        normalize: bool = True,
        sorted_features_override: List[int] = None,
    ) -> Tuple[str, List[str]]:
        if sorted_features_override:
            sorted_features = sorted_features_override
        else:
            sorted_features, _ = sort_features_by_normalization(
                normalization_parameters)
        int_features = [int(feature) for feature in sorted_features]

        preprocess_num_batches = 8 if split_sparse_to_dense else 1

        lengths_batch = []
        keys_batch = []
        values_batch = []
        for _ in range(preprocess_num_batches):
            lengths_batch.append(C2.NextBlob(blobname_prefix +
                                             "_length_batch"))
            keys_batch.append(C2.NextBlob(blobname_prefix + "_key_batch"))
            values_batch.append(C2.NextBlob(blobname_prefix + "_value_batch"))

        C2.net().Split([lengths_blob], lengths_batch, axis=0)
        total_lengths_batch = []
        for x in range(preprocess_num_batches):
            total_lengths_batch.append(
                C2.Reshape(C2.ReduceBackSum(lengths_batch[x],
                                            num_reduce_dims=1),
                           shape=[1])[0])
        total_lengths_batch_concat, _ = C2.Concat(*total_lengths_batch, axis=0)
        C2.net().Split([keys_blob, total_lengths_batch_concat],
                       keys_batch,
                       axis=0)
        C2.net().Split([values_blob, total_lengths_batch_concat],
                       values_batch,
                       axis=0)

        dense_input_fragments = []
        parameters: List[str] = []

        MISSING_SCALAR = self._store_parameter(
            parameters, "MISSING_SCALAR",
            np.array([MISSING_VALUE], dtype=np.float32))
        C2.net().GivenTensorFill([], [MISSING_SCALAR],
                                 shape=[],
                                 values=[MISSING_VALUE])

        for preprocess_batch in range(preprocess_num_batches):
            dense_input_fragment = C2.SparseToDenseMask(
                keys_batch[preprocess_batch],
                values_batch[preprocess_batch],
                MISSING_SCALAR,
                lengths_batch[preprocess_batch],
                mask=int_features,
            )[0]

            if normalize:
                normalized_fragment, p = self.normalize_dense_matrix(
                    dense_input_fragment,
                    sorted_features,
                    normalization_parameters,
                    blobname_prefix,
                    split_expensive_feature_groups,
                )
                dense_input_fragments.append(normalized_fragment)
                parameters.extend(p)
            else:
                dense_input_fragments.append(dense_input_fragment)

        dense_input = C2.NextBlob(blobname_prefix + "_dense_input")
        dense_input_dims = C2.NextBlob(blobname_prefix + "_dense_input_dims")
        C2.net().Concat(dense_input_fragments, [dense_input, dense_input_dims],
                        axis=0)

        return dense_input, parameters