def __init__(self, size, t_min=0.0, t_max=1.0, method='uniform', noise_std=None): r"""Initializer method .. note:: A instance method `get_examples` is dynamically created to generate 1-D training points. It will be called by the function `solve` and `solve_system`. """ super(Generator1D, self).__init__() self.size = size self.t_min, self.t_max = t_min, t_max self.method = method if noise_std: self.noise_std = noise_std else: self.noise_std = ((t_max - t_min) / size) / 4.0 if method == 'uniform': self.examples = torch.zeros(self.size, requires_grad=True) self.getter = lambda: self.examples + torch.rand(self.size) * ( self.t_max - self.t_min) + self.t_min elif method == 'equally-spaced': self.examples = torch.linspace(self.t_min, self.t_max, self.size, requires_grad=True) self.getter = lambda: self.examples elif method == 'equally-spaced-noisy': self.examples = torch.linspace(self.t_min, self.t_max, self.size, requires_grad=True) self.getter = lambda: torch.normal(mean=self.examples, std=self.noise_std) elif method == 'log-spaced': start, end = _compute_log_negative(t_min, t_max, self.__class__) self.examples = torch.logspace(start, end, self.size, requires_grad=True) self.getter = lambda: self.examples elif method == 'log-spaced-noisy': start, end = _compute_log_negative(t_min, t_max, self.__class__) self.examples = torch.logspace(start, end, self.size, requires_grad=True) self.getter = lambda: torch.normal(mean=self.examples, std=self.noise_std) elif method in ['chebyshev', 'chebyshev1']: self.examples = _chebyshev_first(t_min, t_max, size) self.getter = lambda: self.examples elif method == 'chebyshev2': self.examples = _chebyshev_second(t_min, t_max, size) self.getter = lambda: self.examples else: raise ValueError(f'Unknown method: {method}')
def get_partition(args): """Create a non-decreasing sequence of values between zero and one. See https://en.wikipedia.org/wiki/Partition_of_an_interval. Args: args.num_partitions: length of sequence minus one args.schedule: \'linear\' or \'log\' args.log_beta_min: log (base ten) of beta_min. only used if partition_type is log. default -10 (i.e. beta_min = 1e-10). args.device: torch.device object (cpu by default) Returns: tensor of shape [num_partitions + 1] """ if args.K == 1: partition = tensor((0., 1), args) else: if args.schedule == 'linear': partition = torch.linspace(0, 1, steps=args.K + 1, device=args.device) elif args.schedule == 'log': partition = torch.zeros(args.K + 1, device=args.device, dtype=torch.float) partition[1:] = torch.logspace(args.log_beta_min, 0, steps=args.K, device=args.device, dtype=torch.float) else: # DEFAULT IS TO START WITH LOG partition = torch.zeros(args.K + 1, device=args.device, dtype=torch.float) partition[1:] = torch.logspace(args.log_beta_min, 0, steps=args.K, device=args.device, dtype=torch.float) return partition
def __init__(self, size, t_min=0.0, t_max=1.0, method='uniform', noise_std=None): super(Generator1D, self).__init__() r"""Initializer method .. note:: A instance method `get_examples` is dynamically created to generate 1-D training points. It will be called by the function `solve` and `solve_system`. """ self.size = size self.t_min, self.t_max = t_min, t_max if method == 'uniform': self.examples = torch.zeros(self.size, requires_grad=True) self.getter = lambda: self.examples + torch.rand(self.size) * ( self.t_max - self.t_min) + self.t_min elif method == 'equally-spaced': self.examples = torch.linspace(self.t_min, self.t_max, self.size, requires_grad=True) self.getter = lambda: self.examples elif method == 'equally-spaced-noisy': self.examples = torch.linspace(self.t_min, self.t_max, self.size, requires_grad=True) if noise_std: self.noise_std = noise_std else: self.noise_std = ((t_max - t_min) / size) / 4.0 self.getter = lambda: torch.normal(mean=self.examples, std=self.noise_std) elif method == 'log-spaced': self.examples = torch.logspace(self.t_min, self.t_max, self.size, requires_grad=True) self.getter = lambda: self.examples elif method == 'log-spaced-noisy': self.examples = torch.logspace(self.t_min, self.t_max, self.size, requires_grad=True) if noise_std: self.noise_std = noise_std else: self.noise_std = ((t_max - t_min) / size) / 4.0 self.getter = lambda: torch.normal(mean=self.examples, std=self.noise_std) else: raise ValueError(f'Unknown method: {method}')
def test_create_bool_tensors(self, device): expected = torch.tensor([0], dtype=torch.int64, device=device) self.assertEqual(torch.arange(False, True, device=device), expected) self.assertEqual(torch.arange(True, device=device), expected) expected = torch.tensor([0, 0.5], dtype=torch.get_default_dtype(), device=device) self.assertEqual(torch.arange(False, True, 0.5, device=device), expected) expected = torch.ones(0, dtype=torch.int64, device=device) self.assertEqual(torch.arange(False, False, device=device), expected) self.assertEqual(torch.linspace(False, True, device=device), torch.linspace(0, 1, device=device)) self.assertEqual(torch.logspace(False, True, device=device), torch.logspace(0, 1, device=device)) # this seems like odd behavior but ints also create float tensors, numpy doesn't have this function. self.assertEqual(torch.scalar_tensor(False, device=device), torch.tensor(0., device=device))
def __init__(self, model, train_loader, val_loader, trainval_loader, test_loader, metric, device, num_classes, feature_dim=2048, wd_range=None): self.model = model self.train_loader = train_loader self.val_loader = val_loader self.trainval_loader = trainval_loader self.test_loader = test_loader self.metric = metric self.device = device self.num_classes = num_classes self.feature_dim = feature_dim self.best_params = {} if wd_range is None: self.wd_range = torch.logspace(-6, 5, 45) else: self.wd_range = wd_range self.classifier = LogisticRegression(self.feature_dim, self.num_classes, self.metric).to(self.device)
def get_partition(num_partitions, partition_type, log_beta_min=-10, device=None): """Create a non-decreasing sequence of values between zero and one. See https://en.wikipedia.org/wiki/Partition_of_an_interval. Args: num_partitions: length of sequence minus one partition_type: \'linear\' or \'log\' log_beta_min: log (base ten) of beta_min. only used if partition_type is log. default -10 (i.e. beta_min = 1e-10). device: torch.device object (cpu by default) Returns: tensor of shape [num_partitions + 1] """ if device is None: device = torch.device('cpu') if num_partitions == 1: partition = torch.tensor([0, 1], dtype=torch.float, device=device) else: if partition_type == 'linear': partition = torch.linspace(0, 1, steps=num_partitions + 1, device=device) elif partition_type == 'log': partition = torch.zeros(num_partitions + 1, device=device, dtype=torch.float) partition[1:] = torch.logspace( log_beta_min, 0, steps=num_partitions, device=device, dtype=torch.float) return partition
def plot_delta_measure(self, start, end, steps=50): """ Plot delta measure :param start: :param end: :return: """ # Gamma values gamma_values = torch.logspace(start=start, end=end, steps=steps) # Log10 of gamma values gamma_log_values = torch.log10(gamma_values) # Delta measures C_norms = torch.zeros(steps) delta_scores = torch.zeros(steps) # For each gamma measure for i, gamma in enumerate(gamma_values): delta_scores[i], C_norms[i] = self.delta_measure(float(gamma), epsilon=0.1) # end for # Plot plt.plot(gamma_log_values.numpy(), delta_scores.numpy()) plt.plot(gamma_log_values.numpy(), C_norms.numpy()) plt.show()
def fourier_encode(x, max_freq, num_bands=4, base=2): """ Positional encodings for input x: Enrich the input features with fourier feature encodings. This is a way to tag each input units with a position and construct topographic maps. :param x: :param max_freq: :param num_bands: :param base: :return: each input unit of x is tagged with a position encoding. """ x = x.unsqueeze(-1) device, dtype, orig_x = x.device, x.dtype, x scales = torch.logspace(1., log(max_freq / 2) / log(base), num_bands, base=base, device=device, dtype=dtype) scales = scales[(*((None, ) * (len(x.shape) - 1)), Ellipsis)] x = x * scales * pi x = torch.cat([x.sin(), x.cos()], dim=-1) x = torch.cat((x, orig_x), dim=-1) return x
def get_power_spectrum(target, bins): ''' get the power spectrum of a given image ''' M, N = target.shape modulus = torch.fft( torch.cat((target.reshape(M, N, 1), torch.zeros( (M, N, 1)).type(torch.cuda.FloatTensor)), 2), 2) modulus = (modulus[:, :, 0]**2 + modulus[:, :, 1]**2)**0.5 modulus = torch.cat((torch.cat( (modulus[M // 2:, M // 2:], modulus[M // 2:, :M // 2]), 0), torch.cat( (modulus[:M // 2, M // 2:], modulus[:M // 2, :M // 2]), 0)), 1) X = np.arange(0, M) Y = np.arange(0, N) Xgrid, Ygrid = np.meshgrid(X, Y) R = ((Xgrid - M / 2)**2 + (Ygrid - N / 2)**2)**0.5 R = torch.from_numpy(R).type(torch.cuda.FloatTensor) R_range = torch.logspace(0.0, np.log10(M / 2), bins).type(torch.cuda.FloatTensor) R_range = torch.cat( (torch.tensor([0]).type(torch.cuda.FloatTensor), R_range)) power_spectrum = torch.zeros(len(R_range) - 1).type(torch.cuda.FloatTensor) for i in range(len(R_range) - 1): select = (R >= R_range[i]) * (R < R_range[i + 1]) power_spectrum[i] = modulus[select].mean() return power_spectrum, R_range
def compute_ls_grid(As, y, sns_vec, m, ks, n_ls, l_eps): """Compute l values for each given k and return a dictionary mapping k to a list (in decreasing order) of lambda values. Arguments have the same meaning as in gel_paths2. sns_vec is a vector of sns_j values as opposed to the matrix computed in gel_paths2. """ ls_grid = {} # The bound is given by max{||A_j.T@(y - b_0)||/(m*sqrt{n_j}*k)} where b_0 = # 1.T@y/m. So most things can be precomputed. l_max_b_0 = y.mean() l_max_unscaled = max((A_j.t() @ (y - l_max_b_0)).norm(p=2) / (m * sns_j) for A_j, sns_j in zip(As, sns_vec)) for k in ks: l_max = l_max_unscaled / k if n_ls == 1: ls_grid[k] = [l_max] else: l_min = l_max * l_eps ls = torch.logspace(math.log10(l_min), math.log10(l_max), steps=n_ls) ls = sorted(ls, reverse=True) ls_grid[k] = ls return ls_grid
def __init__( self, ts: torch.Tensor = torch.logspace(-1, 1, 256), k: int = 5, m: int = 10, niters: int = 100, rademacher: bool = False, normalized_laplacian: bool = True, normalize: str = 'empty', msid_mode: str = "max", ) -> None: r""" Args: ts: Temperature values. k: Number of neighbours for graph construction. m: Lanczos steps in SLQ. niters: Number of starting random vectors for SLQ. rademacher: True to use Rademacher distribution, False - standard normal for random vectors in Hutchinson. normalized_laplacian: if True, use normalized Laplacian. normalize: 'empty' for average heat kernel (corresponds to the empty graph normalization of NetLSD), 'complete' for the complete, 'er' for erdos-renyi normalization, 'none' for no normalization msid_mode: 'l2' to compute the l2 norm of the distance between `msid1` and `msid2`; 'max' to find the maximum abosulute difference between two descriptors over temperature """ super(MSID, self).__init__() self.ts = ts.numpy() # MSID works only with Numpy tensors self.k = k self.m = m self.niters = niters self.rademacher = rademacher self.msid_mode = msid_mode self.normalized_laplacian = normalized_laplacian self.normalize = normalize
def get_position_code(axis, max_resolution, num_bands, frequency_base, pi, dtype, device="cpu"): # Adpated from https://github.com/lucidrains/perceiver-pytorch/blob/main/perceiver_pytorch/perceiver_pytorch.py#L31 normalized_grid = [ torch.linspace(start=-1, end=1, steps=ax, device=device, dtype=dtype) for ax in axis ] coordinate = torch.stack(torch.meshgrid(*normalized_grid), dim=-1) coordinate = coordinate.unsqueeze( dim=-1 ) # To broadcast to num_bands when multiplying with freq at Line 156 freq = torch.logspace(start=1, end=(log(max_resolution) / log(frequency_base)), steps=num_bands, base=frequency_base, dtype=dtype, device=device) freq = freq[[None for _ in range(len(coordinate.shape) - 1)] + [...]] # Expand dimensions to (1, ..., 1, num_bands) freq = freq * pi * coordinate fourier_features = torch.cat([freq.sin(), freq.cos()], dim=-1) return torch.cat([coordinate, fourier_features], dim=-1)
def initialize(self, agent, n_itr, batch_spec, mid_batch_reset, examples): if agent.recurrent: raise TypeError("For recurrent agents use r2d1 algo.") self.agent = agent if (self.eps_final_min is not None and self.eps_final_min != self.eps_final): # vector-valued epsilon self.eps_init = self.eps_init * torch.ones(batch_spec.B) self.eps_final = torch.logspace( torch.log10(torch.tensor(self.eps_final_min)), torch.log10(torch.tensor(self.eps_final)), batch_spec.B) agent.set_sample_epsilon_greedy(self.eps_init) agent.give_eval_epsilon_greedy(self.eps_eval) self.n_itr = n_itr self.optimizer = self.OptimCls(agent.parameters(), lr=self.learning_rate, **self.optim_kwargs) if self.initial_optim_state_dict is not None: self.optimizer.load_state_dict(self.initial_optim_state_dict) sample_bs = batch_spec.size train_bs = self.batch_size self.updates_per_optimize = round(self.training_ratio * sample_bs / train_bs) logger.log( f"From sampler batch size {sample_bs}, training " f"batch size {train_bs}, and training ratio " f"{self.training_ratio}, computed {self.updates_per_optimize} " f"updates per iteration.") self.target_update_itr = round(self.target_update_steps / sample_bs) self.eps_itr = max(1, self.eps_steps // sample_bs) self.min_itr_learn = self.min_steps_learn // sample_bs if self.prioritized_replay: self.pri_beta_itr = max(1, self.pri_beta_steps // sample_bs) self.initialize_replay_buffer(batch_spec, examples, mid_batch_reset)
def __forward(self, episode): # training part of the episode self.feature_extractor.train() x_train, y_train = episode['Dtrain'] m = len(x_train) phis = self.feature_extractor(x_train) if self.do_cv: l2s = torch.logspace(-4, 1, 10).to( self.device) if not self.fixe_hps else self.l2s if self.kernel == 'linear': kernels_params = dict() elif self.kernel == 'rbf': kernels_params = dict( gamma=torch.logspace(-4, 1, 10).to(self.device)) elif self.kernel == 'sm': raise NotImplementedError else: raise NotImplementedError learner = KrrLearnerCV(l2s, self.kernel, dual=False, **kernels_params) else: # l2 = torch.clamp(self.l2, min=1e-3) l2 = torch.FloatTensor([self.l2]).to(self.device) kp = { k: torch.clamp(self.kernel_params[k], min=1e-6) for k in self.kernel_params } learner = KrrLearner(l2, self.kernel, dual=False, **kp) learner.fit(phis, y_train) # Testing part of the episode self.feature_extractor.eval() x_test, _ = episode['Dtest'] n = len(x_test) bsize = 10 res = torch.cat([ learner(self.feature_extractor(x_test[i:i + bsize])) for i in range(0, n, bsize) ]) self.phis_norms.append(torch.norm(phis, dim=1)) self.l2_ = learner.l2 self.kernel_params_ = learner.kernel_params return res
def __init__(self, dim, max_freq=10): super().__init__() self.dim = dim scales = torch.logspace(0., log(max_freq / 2) / log(2), self.dim // 4, base=2) self.register_buffer('scales', scales)
def _sample_sigmas(self, v): if self.dist == "linear": sigmas = torch.linspace(self.sigma_begin, self.sigma_end, len(v)) elif self.dist == "geometrical": sigmas = torch.logspace(np.log10(self.sigma_begin), np.log10(self.sigma_end), len(v)) else: raise NotImplementedError return sigmas.to(v.device)
def test_log_beta_stirling(tol): x = torch.logspace(-5, 5, 200) y = x.unsqueeze(-1) expected = log_beta(x, y) actual = log_beta(x, y, tol=tol) assert (actual <= expected).all() assert (expected < actual + tol).all()
def create_tensor(self): # 通过torch.tensor创建张量 if self.m1 == 1: arr = np.ones((3, 3)) arr_to_tensor = torch.tensor(arr, device="cuda") # 将tensor放在GPU上 print("arr:{},\ntensor:{}".format(arr, arr_to_tensor.data)) # 通过torch.from_numpy创建张量, 该方式是浅拷贝,修改tensor也会修改np.arr if self.m1 == 2: arr = np.array([[1, 2, 3], [4, 5, 6]]) arr_to_tensor = torch.from_numpy(arr) arr_to_tensor[0, 0] = -1 print("arr:{}\ntensor:{}".format(arr, arr_to_tensor.data)) # 通过torch.zeros创建全0张量 if self.m1 == 3: out_t = torch.tensor([]) tensor = torch.zeros((3, 3), out=out_t) # out参数也是浅拷贝,指向同一内存 print("tensor:{}\nout_t:{}\n".format(tensor, out_t)) print("the tensor's address:{}\nthe out_t's address:{}".format( id(tensor), id(out_t))) # 通过torch.full创建全为某数字的张量 if self.m1 == 4: tensor = torch.full((3, 3), 1) print(tensor) # 通过torch.arange创建等差数列张量 if self.m1 == 5: tensor = torch.arange(2, 10, 2) #参数3是步长 print(tensor) # 通过torch.linspace创建均分数列张量 if self.m1 == 6: tensor = torch.linspace(2, 10, 6) #参数3是数列长度 print(tensor) # 通过torch.logspace从创建对数均分的数列张量 if self.m1 == 7: tensor = torch.logspace(1, 100) print(tensor) # 创建单位对角矩阵 if self.m1 == 8: tensor = torch.eye(3) print(tensor) # 通过torch.normal创建正态分布张量,注意比较mean和std分别是张量/标量的四种组合 if self.m1 == 9: mean = torch.arange(1, 5, dtype=torch.float) std = 1.0 t_normal = torch.normal(mean, std) print(t_normal) # 生成0至n-1的随机排列 if self.m1 == 10: torch.randperm(10)
def get_precision_recall(args, score, label, num_samples, beta=1.0, sampling='log', predicted_score=None): ''' :param args: :param score: anomaly scores :param label: anomaly labels :param num_samples: the number of threshold samples :param beta: :param scale: :return: ''' if predicted_score is not None: score = score - torch.FloatTensor(predicted_score).squeeze().to( args.device) maximum = score.max() score = score.to(args.device) if sampling == 'log': # Sample thresholds logarithmically # The sampled thresholds are logarithmically spaced between: math:`10 ^ {start}` and: math:`10 ^ {end}`. th = torch.logspace(0, torch.log10(torch.tensor(maximum)), num_samples).to(args.device) else: # Sample thresholds equally # The sampled thresholds are equally spaced points between: attr:`start` and: attr:`end` th = torch.linspace(0, maximum, num_samples).to(args.device) precision = [] recall = [] for i in range(len(th)): anomaly = (torch.sum((score > th[i]).float(), 1) > 0).float() idx = anomaly * 2 + label tn = (idx == 0.0).sum().item() # tn fn = (idx == 1.0).sum().item() # fn fp = (idx == 2.0).sum().item() # fp tp = (idx == 3.0).sum().item() # tp p = tp / (tp + fp + 1e-7) r = tp / (tp + fn + 1e-7) if p != 0 and r != 0: precision.append(p) recall.append(r) precision = torch.FloatTensor(precision) recall = torch.FloatTensor(recall) f1 = (1 + beta**2) * (precision * recall).div(beta**2 * precision + recall + 1e-7) return precision, recall, f1
def generate_grid(selection, model, initial_conditions, t_0, t_final, size, perc=1): perc_size = int(size * perc) grid = torch.linspace(t_0, t_final, size).reshape(-1, 1) if not selection: return grid # TODO MAKE JACOBIAN AND LOSS GENERALIZABLE TO DIFFERENT LOSSES elif selection == 'jacobian': jacobians = compute_jacobian(grid, model, initial_conditions, norm='l2') indices = torch.argsort(torch.Tensor(jacobians), descending=True) return grid[indices][:perc_size] elif selection == 'loss': losses = compute_losses(grid, model, initial_conditions) indices = torch.argsort(torch.Tensor(losses), descending=True) return grid[indices][:perc_size] elif selection == 'exponential': start = 0 end = np.log(t_final) grid = torch.logspace(start=start, end=end, steps=perc_size, base=np.exp(1)) grid = grid.unsqueeze(dim=1) return grid elif selection == 'inv_exponential': start = np.log(t_final) end = 0 grid = torch.logspace(start=start, end=end, steps=perc_size, base=np.exp(1)) grid = grid.unsqueeze(dim=1) return grid else: raise ValueError("Selection must be ['jacobian', 'loss']")
def get_samples(self, steps=None): if steps == None: steps = self.steps if self.sampling == "linear": candidate_set = torch.linspace(self.lower, self.upper, steps) elif self.sampling == "log_uniform": candidate_set = torch.logspace(self.lower, self.upper, steps) else: raise ValueError("Sampling can only handle linear or log_uniform") return candidate_set
def test2(): import pyro import pylab as plt alpha = torch.tensor(0.0, requires_grad = True) log_prob = lambda x: x**alpha grid = torch.logspace(-1, 1, 100) x = pyro.sample("x", InverseTransformSampling(log_prob, grid).expand_by((10000,))) x.sum().backward() print(alpha.grad) plt.hist(x.detach().numpy()) plt.show()
def logspace(*args, **kwargs): """ Creates a 1D :class:`Tensor` with logarithmically spaced values (see PyTorch's `logspace`). :param args: :param kwargs: :return: a 1D :class:`Tensor` """ return tn.Tensor([torch.logspace(*args, **kwargs)[None, :, None]])
def __init__(self, lower, upper, sampling="linear", steps=1000): if upper <= lower: raise RuntimeError("the lower bound {} has to be less than the" "upper bound {}".format(lower, upper)) super(Real, self).__init__() if sampling == "linear": self.candidate_set = torch.linspace(lower, upper, steps) elif sampling == "log_uniform": self.candidate_set = torch.logspace(lower, upper, steps) else: raise ValueError("Sampling can only handle linear or log_uniform")
def fourier_encode(x, max_freq, num_bands = 4, base = 2): x = x.unsqueeze(-1) device, dtype, orig_x = x.device, x.dtype, x scales = torch.logspace(1., log(max_freq / 2) / log(base), num_bands, base = base, device = device, dtype = dtype) scales = scales[(*((None,) * (len(x.shape) - 1)), Ellipsis)] x = x * scales * pi x = torch.cat([x.sin(), x.cos()], dim=-1) x = torch.cat((x, orig_x), dim = -1) return x
def __init__(self, in_features: int, out_features: int, scale: Union[float, List[float]] = 1.0, learnable: bool = False, init: str = 'log', compute_sin: bool = True, compute_cos: bool = True): super().__init__() assert compute_sin or compute_cos assert scale > 0 if compute_sin and compute_cos: assert out_features % 2 == 0 self.in_features = in_features self.out_features = out_features self.compute_sin = compute_sin self.compute_cos = compute_cos self.init = init out_size = (out_features // 2) if (self.compute_sin and self.compute_cos) else out_features if init == 'standard': basis = scale * torch.randn(out_size, in_features) elif init == 'uniform': basis = torch.FloatTensor(out_size, in_features).uniform_(-scale, scale) elif init == 'log': if isinstance(scale, int) or isinstance(scale, float): scale = [scale for k in range(in_features)] else: assert len(scale) == in_features basis = [] for i in range(in_features): basis.append( torch.logspace( start=-3, end=scale[i], base=2., steps=out_size - 2) * np.pi) basis[-1] = torch.cat((basis[-1], torch.tensor([0.0, 0.1]))) basis = torch.stack(basis, dim=1) else: raise NotImplementedError(f'Unknown init: {init}') basis = basis.t() # (C_in, C_out) if learnable: self.register_parameter("basis", nn.Parameter(basis)) else: self.register_buffer("basis", basis)
def test_relaxed_beta_binomial(): total_count = torch.arange(1, 17) concentration1 = torch.logspace(-1, 2, 8).unsqueeze(-1) concentration0 = concentration1.unsqueeze(-1) d1 = beta_binomial_dist(concentration1, concentration0, total_count) assert isinstance(d1, dist.ExtendedBetaBinomial) with set_relaxed_distributions(): d2 = beta_binomial_dist(concentration1, concentration0, total_count) assert isinstance(d2, dist.Normal) assert_close(d2.mean, d1.mean) assert_close(d2.variance, d1.variance.clamp(min=_RELAX_MIN_VARIANCE))
def make_vec_eps(self, global_B, env_ranks): if (self.eps_final_min is not None and self.eps_final_min != self._eps_final_scalar): # vector epsilon. if self.alternating: # In FF case, sampler sets agent.alternating. assert global_B % 2 == 0 global_B = global_B // 2 # Env pairs will share epsilon. env_ranks = list(set([i // 2 for i in env_ranks])) self.eps_init = self._eps_init_scalar * torch.ones(len(env_ranks)) global_eps_final = torch.logspace( torch.log10(torch.tensor(self.eps_final_min)), torch.log10(torch.tensor(self._eps_final_scalar)), global_B) self.eps_final = global_eps_final[env_ranks] self.eps_sample = self.eps_init
def __init__(self, shape, nbins): """Calculate binned power spectrum of 2-dim images. - Returns power integrated/summed over logarithmically spaced k-bins. - We adopt the convention that pixel size = unit length. - Note that sum(power) = var(img). """ self.shape = shape self.nbins = nbins self.K = self._get_kgrid_2d(shape[0], shape[1]) self.kedges = torch.logspace( torch.log10(min(self.K[1, 0], self.K[0, 1])), torch.log10(self.K.max()) + 0.001, nbins + 1) self.kmeans = (self.kedges[1:] * self.kedges[:-1])**0.5
def tensor_creation_ops(self): i = torch.tensor([[0, 1, 1], [2, 0, 2]]) v = torch.tensor([3, 4, 5], dtype=torch.float32) real = torch.tensor([1, 2], dtype=torch.float32) imag = torch.tensor([3, 4], dtype=torch.float32) inp = torch.tensor([-1.5, 0.0, 2.0]) values = torch.tensor([0.5]) quantized = torch.quantize_per_channel( torch.tensor([[-1.0, 0.0], [1.0, 2.0]]), torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8, ) return ( torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]), # torch.sparse_coo_tensor(i, v, [2, 3]), # not work for iOS torch.as_tensor([1, 2, 3]), torch.as_strided(torch.randn(3, 3), (2, 2), (1, 2)), torch.zeros(2, 3), torch.zeros((2, 3)), torch.zeros([2, 3], out=i), torch.zeros(5), torch.zeros_like(torch.empty(2, 3)), torch.ones(2, 3), torch.ones((2, 3)), torch.ones([2, 3]), torch.ones(5), torch.ones_like(torch.empty(2, 3)), torch.arange(5), torch.arange(1, 4), torch.arange(1, 2.5, 0.5), torch.range(1, 4), torch.range(1, 4, 0.5), torch.linspace(3.0, 3.0, steps=1), torch.logspace(start=2, end=2, steps=1, base=2.0), torch.eye(3), torch.empty(2, 3), torch.empty_like(torch.empty(2, 3), dtype=torch.int64), torch.empty_strided((2, 3), (1, 2)), torch.full((2, 3), 3.141592), torch.full_like(torch.full((2, 3), 3.141592), 2.71828), torch.quantize_per_tensor( torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8 ), torch.dequantize(quantized), torch.complex(real, imag), torch.polar(real, imag), torch.heaviside(inp, values), )
def get_precision_recall(args, score, label, num_samples, beta=1.0, sampling='log', predicted_score=None): ''' :param args: :param score: anomaly scores :param label: anomaly labels :param num_samples: the number of threshold samples :param beta: :param scale: :return: ''' if predicted_score is not None: score = score - torch.FloatTensor(predicted_score).squeeze().to(args.device) maximum = score.max() if sampling=='log': # Sample thresholds logarithmically # The sampled thresholds are logarithmically spaced between: math:`10 ^ {start}` and: math:`10 ^ {end}`. th = torch.logspace(0, torch.log10(torch.tensor(maximum)), num_samples).to(args.device) else: # Sample thresholds equally # The sampled thresholds are equally spaced points between: attr:`start` and: attr:`end` th = torch.linspace(0, maximum, num_samples).to(args.device) precision = [] recall = [] for i in range(len(th)): anomaly = (score > th[i]).float() idx = anomaly * 2 + label tn = (idx == 0.0).sum().item() # tn fn = (idx == 1.0).sum().item() # fn fp = (idx == 2.0).sum().item() # fp tp = (idx == 3.0).sum().item() # tp p = tp / (tp + fp + 1e-7) r = tp / (tp + fn + 1e-7) if p != 0 and r != 0: precision.append(p) recall.append(r) precision = torch.FloatTensor(precision) recall = torch.FloatTensor(recall) f1 = (1 + beta ** 2) * (precision * recall).div(beta ** 2 * precision + recall + 1e-7) return precision, recall, f1