def test_output_feature_utils(): tensor_dict = {} output_feature_utils.set_output_feature_tensor(tensor_dict, "feature_1", "1", torch.Tensor([1])) output_feature_utils.set_output_feature_tensor(tensor_dict, "feature_1", "10", torch.Tensor([10])) output_feature_utils.set_output_feature_tensor(tensor_dict, "feature_2", "2", torch.Tensor([2])) output_feature_utils.set_output_feature_tensor(tensor_dict, "feature_2", "20", torch.Tensor([20])) assert list(tensor_dict.keys()) == [ "feature_1::1", "feature_1::10", "feature_2::2", "feature_2::20" ] assert output_feature_utils.get_output_feature_tensor( tensor_dict, "feature_1", "1") == torch.Tensor([1]) assert list( output_feature_utils.get_single_output_feature_tensors( tensor_dict, "feature_1").keys()) == ["1", "10"] assert list( output_feature_utils.get_single_output_feature_tensors( tensor_dict, "feature_3").keys()) == [] with pytest.raises(Exception): output_feature_utils.get_output_feature_tensor(tensor_dict, "feature_1", "2")
def decode(self, combiner_outputs, targets, mask): # Invoke output features. output_logits = {} output_last_hidden = {} for output_feature_name, output_feature in self.output_features.items(): # Use the presence or absence of targets to signal training or prediction. target = targets[output_feature_name] if targets is not None else None decoder_outputs = output_feature(combiner_outputs, output_last_hidden, mask=mask, target=target) # Add decoder outputs to overall output dictionary. for decoder_output_name, tensor in decoder_outputs.items(): output_feature_utils.set_output_feature_tensor( output_logits, output_feature_name, decoder_output_name, tensor ) # Save the hidden state of the output feature (for feature dependencies). output_last_hidden[output_feature_name] = decoder_outputs["last_hidden"] return output_logits
def forward( self, inputs: Union[Dict[str, torch.Tensor], Dict[str, np.ndarray], Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]], mask=None, ) -> Dict[str, torch.Tensor]: """Forward pass of the model. Args: inputs: Inputs to the model. Can be a dictionary of input names to input tensors or a tuple of (inputs, targets) where inputs is a dictionary of input names to input tensors and targets is a dictionary of target names to target tensors. mask: A mask for the inputs. Returns: A dictionary of output {feature name}::{tensor_name} -> output tensor. """ if isinstance(inputs, tuple): inputs, targets = inputs # Convert targets to tensors. for target_feature_name, target_value in targets.items(): if not isinstance(target_value, torch.Tensor): targets[target_feature_name] = torch.from_numpy( target_value) else: targets[target_feature_name] = target_value else: targets = None assert inputs.keys() == self.input_features.keys() # Convert inputs to tensors. for input_feature_name, input_values in inputs.items(): if not isinstance(input_values, torch.Tensor): inputs[input_feature_name] = torch.from_numpy(input_values) else: inputs[input_feature_name] = input_values encoder_outputs = {} for input_feature_name, input_values in inputs.items(): encoder = self.input_features[input_feature_name] encoder_output = encoder(input_values) encoder_outputs[input_feature_name] = encoder_output combiner_outputs = self.combiner(encoder_outputs) output_logits = {} output_last_hidden = {} for output_feature_name, decoder in self.output_features.items(): # use presence or absence of targets # to signal training or prediction decoder_inputs = (combiner_outputs, copy.copy(output_last_hidden)) if targets is not None: # targets are only used during training, # during prediction they are omitted decoder_inputs = (decoder_inputs, targets[output_feature_name]) decoder_outputs = decoder(decoder_inputs, mask=mask) # Add decoder outputs to overall output dictionary. for decoder_output_name, tensor in decoder_outputs.items(): output_feature_utils.set_output_feature_tensor( output_logits, output_feature_name, decoder_output_name, tensor) return output_logits
output_last_hidden = {} for output_feature_name, decoder in self.output_features.items(): # use presence or absence of targets # to signal training or prediction decoder_inputs = (combiner_outputs, copy.copy(output_last_hidden)) if targets is not None: # targets are only used during training, # during prediction they are omitted decoder_inputs = (decoder_inputs, targets[output_feature_name]) decoder_outputs = decoder(decoder_inputs, mask=mask) # Add decoder outputs to overall output dictionary. for decoder_output_name, tensor in decoder_outputs.items(): output_feature_utils.set_output_feature_tensor( output_logits, output_feature_name, decoder_output_name, tensor ) return output_logits def predictions(self, inputs, output_features=None): # check validity of output_features if output_features is None: of_list = self.output_features elif isinstance(output_features, str): if output_features == "all": of_list = set(self.output_features.keys()) elif output_features in self.output_features: of_list = [output_features] else: raise ValueError( "'output_features' {} is not a valid for this model. "
def forward( self, inputs: Union[ Dict[str, torch.Tensor], Dict[str, np.ndarray], Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]] ], mask=None, ) -> Dict[str, torch.Tensor]: """Forward pass of the model. Args: inputs: Inputs to the model. Can be a dictionary of input names to input tensors or a tuple of (inputs, targets) where inputs is a dictionary of input names to input tensors and targets is a dictionary of target names to target tensors. mask: A mask for the inputs. Returns: A dictionary of output {feature name}::{tensor_name} -> output tensor. """ if isinstance(inputs, tuple): inputs, targets = inputs # Convert targets to tensors. for target_feature_name, target_value in targets.items(): if not isinstance(target_value, torch.Tensor): targets[target_feature_name] = torch.from_numpy(target_value) else: targets[target_feature_name] = target_value else: targets = None assert list(inputs.keys()) == self.input_features.keys() # Convert inputs to tensors. for input_feature_name, input_values in inputs.items(): if not isinstance(input_values, torch.Tensor): inputs[input_feature_name] = torch.from_numpy(input_values) else: inputs[input_feature_name] = input_values encoder_outputs = {} for input_feature_name, input_values in inputs.items(): encoder = self.input_features[input_feature_name] encoder_output = encoder(input_values) encoder_outputs[input_feature_name] = encoder_output combiner_outputs = self.combiner(encoder_outputs) # Invoke output features. output_logits = {} output_last_hidden = {} for output_feature_name, output_feature in self.output_features.items(): # Use the presence or absence of targets to signal training or prediction. target = targets[output_feature_name] if targets is not None else None decoder_outputs = output_feature(combiner_outputs, output_last_hidden, mask=mask, target=target) # Add decoder outputs to overall output dictionary. for decoder_output_name, tensor in decoder_outputs.items(): output_feature_utils.set_output_feature_tensor( output_logits, output_feature_name, decoder_output_name, tensor ) # Save the hidden state of the output feature (for feature dependencies). output_last_hidden[output_feature_name] = decoder_outputs["last_hidden"] return output_logits
def forward( self, inputs: Union[Dict[str, torch.Tensor], Dict[str, np.ndarray], Tuple[Dict[str, torch.Tensor], Dict[str, torch.Tensor]]], mask=None, ) -> Dict[str, torch.Tensor]: if self.compiled_model is None: raise ValueError("Model has not been trained yet.") if isinstance(inputs, tuple): inputs, targets = inputs # Convert targets to tensors. for target_feature_name, target_value in targets.items(): if not isinstance(target_value, torch.Tensor): targets[target_feature_name] = torch.from_numpy( target_value) else: targets[target_feature_name] = target_value else: targets = None assert list(inputs.keys()) == self.input_features.keys() # Convert inputs to tensors. for input_feature_name, input_values in inputs.items(): if not isinstance(input_values, torch.Tensor): inputs[input_feature_name] = torch.from_numpy(input_values) else: inputs[input_feature_name] = input_values.view(-1, 1) # TODO(travis): include encoder and decoder steps during inference # encoder_outputs = {} # for input_feature_name, input_values in inputs.items(): # encoder = self.input_features[input_feature_name] # encoder_output = encoder(input_values) # encoder_outputs[input_feature_name] = encoder_output # concatenate inputs inputs = torch.cat(list(inputs.values()), dim=1) # Invoke output features. output_logits = {} output_feature_name = self.output_features.keys()[0] output_feature = self.output_features[output_feature_name] preds = self.compiled_model(inputs) if output_feature.type() == NUMBER: # regression if len(preds.shape) == 2: preds = preds.squeeze(1) logits = preds else: # classification _, probs = preds # keep positive class only for binary feature probs = probs[:, 1] if output_feature.type() == BINARY else probs logits = torch.logit(probs) output_feature_utils.set_output_feature_tensor(output_logits, output_feature_name, LOGITS, logits) return output_logits