def _sparsify(self, sparsity_type, epsilon=0.01): sparse_vector = self._params[str(sparsity_type) + "_sparsity_vector"] sparse_vector = sparse_vector - sparse_vector.mean() sparse_vector = sparse_vector * (sparse_vector.var() + epsilon) sparse_vector = sparse_vector * torch.exp( self._params[str(sparsity_type) + "_sparsity_coef"]) return torch.sigmoid(sparse_vector)
def forward(self, x): x = F.relu(self.conv1(x)) x = F.relu(self.conv2(x)) x = F.relu(self.conv3(x)) x = self.mpool(x) x = F.relu(self.conv4(x)) x = x.view(-1, 2304) x = F.relu(self.linear1(x)) x = F.relu(self.linear2(x)) x = torch.sigmoid(x) return x
def _forward(self, constants): if self.elastic_feature_sparsity: mu = torch.sigmoid(self._params["feature_elastic_coef"]) y_hat = self._inversion_forward(constants, feature_sparsity=False) sparse_y_hat = self._inversion_forward(constants, feature_sparsity=True) return (y_hat * mu + (1 - mu) * sparse_y_hat) elif self.eigen_decomposition: return self._eigen_decomposition_forward(constants) else: return self._inversion_forward( constants, feature_sparsity=self.feature_sparsity)
def _inversion_coef(self, constants): X, y, permuted_y, XTX, XTy, XTmy = constants.values() feature_sparsity = self.feature_sparsity if self.GPU: identity = torch.diag(torch.ones(XTX.shape[0])).float().cuda() else: identity = torch.diag(torch.ones(XTX.shape[0])).float() penality = torch.exp(self._params["lambda"]) * identity if self.elastic_feature_sparsity: mu = torch.sigmoid(self._params["feature_elastic_coef"]) coef = self._inversion_coef_without_sparsity(penality, XTX, XTy) * mu coef += self._inversion_coef_with_sparsity(penality, XTX, XTy) * (1 - mu) elif feature_sparsity: coef = self._inversion_coef_with_sparsity(penality, XTX, XTy) else: coef = self._inversion_coef_without_sparsity(penality, XTX, XTy) self.coef_ = self._tensor_to_array(coef)