Beispiel #1
0
def prune_fc_layer_with_craig(layer: nn.Linear,
                              prune_percent_per_layer: float,
                              similarity_metric: Union[Text, Dict] = "",
                              prune_type: Text = "craig",
                              **kwargs) -> Tuple[List[int], List[float]]:

    # Get CRAIG subset.
    subset_nodes: List
    subset_weights: List
    subset_nodes, subset_weights = get_layer_craig_subset(
        layer=layer,
        original_num_nodes=layer.out_features,
        prune_percent_per_layer=prune_percent_per_layer,
        similarity_metric=similarity_metric,
        prune_type=prune_type,
        **kwargs)

    # Remove nodes+weights+biases, and adjust weights.
    num_nodes: int = len(subset_nodes)

    # Prune current layer.
    # Multiply weights (and biases?) by subset_weights.
    subset_weights_tensor = torch.tensor(subset_weights)
    layer.weight = nn.Parameter(layer.weight[subset_nodes] *
                                subset_weights_tensor.reshape((num_nodes, 1)))
    if layer.bias is not None:
        layer.bias = nn.Parameter(layer.bias[subset_nodes] *
                                  subset_weights_tensor)
    layer.out_features = num_nodes

    return subset_nodes, subset_weights
 def _create_mean_predictor(self):
     """Creates a new predictor using the mean parameters across the predictor heads."""
     weights = []
     biases = []
     # Collect weights/biases from each predictor head and create tensors
     for i in range(self.n_participants):
         weights.append(self.predictor_heads[i].weight)
         biases.append(self.predictor_heads[i].bias)
     weights = torch.stack(weights)
     biases = torch.stack(biases)
     # Create new linear predictor and set weights/biases to means
     predictor_heads_mean = Linear(self.hidden_dim, self.output_dim)
     predictor_heads_mean.weight = Parameter(weights.mean(0))
     predictor_heads_mean.bias = Parameter(biases.mean(0))
     return predictor_heads_mean
 def _create_sampled_predictor(self):
     """Creates a new predictor using parameters sampled from the prior for random effects."""
     # Sample parameters and extract weight and bias parameters from flattened list
     sampled_params = MultivariateNormal(self.mean, self.cov).sample()
     flattened_mlp_params = sampled_params[
         : ((self.hidden_dim + 1) * self.output_dim)
     ]
     mlp_params = flattened_mlp_params.reshape(
         (self.output_dim, self.hidden_dim + 1)
     )
     weight, bias = mlp_params[:, :-1], mlp_params[:, -1]
     # Create new linear predictor and set weights/biases to sampled values
     predictor_head_sampled = Linear(self.hidden_dim, self.output_dim)
     predictor_head_sampled.weight = Parameter(weight)
     predictor_head_sampled.bias = Parameter(bias)
     return predictor_head_sampled