示例#1
0
 def idwt(self, data):
     size = data.shape[-1]
     idx = (int)(np.log2(size))
     try:
         # TO test if have the weight stored in the dict
         self.iM_dict['{}_{}'.format(size, idx)]
         #print('Exist In the Dict')
     except KeyError:
         loop = idx
         #print('Not Exist In the Dict')
         try:
             self.iM_dict['{}_{}'.format(size, idx)] = [
                 t.inv(item)
                 for item in self.M_dict['{}_{}'.format(size, idx)]
             ]
             self.iM_dict['{}_{}'.format(size, idx)].reverse()
         except:
             self.iM_dict['{}_{}'.format(size, idx)] = []
             while (loop != 0):
                 w = t.eye(size, dtype=t.float, device=self.DEVICE)
                 w[:2 * loop, :2 * loop] = 0
                 for i in range(loop):
                     w[2 * i, i] = 0.5
                     w[2 * i + 1, i] = 0.5
                     w[2 * i, i + loop] = 0.5
                     w[2 * i + 1, i + loop] = -0.5
                 self.iM_dict['{}_{}'.format(size, idx)].append(t.inv(w))
                 #print(w)
                 loop -= 1
             self.iM_dict['{}_{}'.format(size, idx)].reverse()
     finally:
         for w in self.iM_dict['{}_{}'.format(size, idx)]:
             data = t.matmul(data, w)
         #print(data)
         return data
示例#2
0
    def ir_logistic(self, X, w0, y_inner):
        # iteration 0
        eta = w0  # + zeros
        mu = torch.sigmoid(eta)
        s = mu * (1 - mu)
        z = eta + (y_inner - mu) / s
        S = torch.diag(s)
        # Woodbury with regularization
        w_ = mm(t(X, 0, 1), inv(mm(X, t(X, 0, 1)) + self.lambda_(inv(S))))
        z_ = t(z.unsqueeze(0), 0, 1)
        w = mm(w_, z_)
        # it 1...N
        for i in range(self.iterations - 1):
            eta = w0 + mm(X, w).squeeze(1)
            mu = torch.sigmoid(eta)
            s = mu * (1 - mu)
            z = eta + (y_inner - mu) / s
            S = torch.diag(s)
            z_ = t(z.unsqueeze(0), 0, 1)
            if not self.linsys:
                w_ = mm(t(X, 0, 1),
                        inv(mm(X, t(X, 0, 1)) + self.lambda_(inv(S))))
                w = mm(w_, z_)
            else:
                A = mm(X, t(X, 0, 1)) + self.lambda_(inv(S))
                w_, _ = gesv(z_, A)
                w = mm(t(X, 0, 1), w_)

        return w
示例#3
0
    def _generate(self):
        """
        Generate dataset
        :return:
        """
        # Sizes
        total_size = self.sample_len

        # List of samples
        samples = list()

        # XYZ
        xyz = self.xyz

        # Washout
        for t in range(self.washout):
            # Derivatives of the X, Y, Z state
            x_dot, y_dot, z_dot = self._rossler(xyz[0], xyz[1], xyz[2])

            # Apply changes
            xyz[0] += self.dt * x_dot
            xyz[1] += self.dt * y_dot
            xyz[2] += self.dt * z_dot
        # end for

        # For each sample
        for i in range(self.n_samples):
            # Tensor
            sample = torch.zeros(total_size, 3)

            # Time steps
            for t in range(1, self.sample_len):
                # Derivatives of the X, Y, Z state
                x_dot, y_dot, z_dot = self._rossler(xyz[0], xyz[1], xyz[2])

                # Apply changes
                xyz[0] += self.dt * x_dot
                xyz[1] += self.dt * y_dot
                xyz[2] += self.dt * z_dot

                # Set
                sample[t, 0] = xyz[0]
                sample[t, 1] = xyz[1]
                sample[t, 2] = xyz[2]
            # end for

            # Normalize
            if self.normalize:
                maxval = torch.max(sample, dim=0)
                minval = torch.min(sample, dim=0)
                sample = torch.mm(torch.inv(torch.diag(maxval - minval)),
                                  (sample - minval.repeat(total_size, 1)))
            # end if

            # Append
            samples.append(sample)
        # end for

        return samples
示例#4
0
    def _get_new_L(self, X, E1_arr, E2_arr):
        arr = []
        for i in range(X.shape[1]):
            x = X[:, i]
            x = x.reshape(x.shape[0], -1)
            arr.append(mul(x, E1_arr[i].T))
        t1 = torch.stack(arr).sum(0)

        t2 = inv(torch.stack(E2_arr).sum(axis=0))
        return mul(t1, t2)
示例#5
0
    def rr_standard(self, x, n_way, n_shot, I, yrr_binary, linsys):
        x /= np.sqrt(n_way * n_shot * self.n_augment)

        if not linsys:
            w = mm(mm(inv(mm(t(x, 0, 1), x) + self.lambda_rr(I)), t(x, 0, 1)),
                   yrr_binary)
        else:
            A = mm(t_(x), x) + self.lambda_rr(I)
            v = mm(t_(x), yrr_binary)
            w, _ = gesv(v, A)

        return w
示例#6
0
    def rr_woodbury(self, x, n_way, n_shot, I, yrr_binary, linsys):
        x /= np.sqrt(n_way * n_shot * self.n_augment)

        if not linsys:
            w = mm(mm(t(x, 0, 1), inv(mm(x, t(x, 0, 1)) + self.lambda_rr(I))),
                   yrr_binary)
        else:
            A = mm(x, t_(x)) + self.lambda_rr(I)
            v = yrr_binary
            w_, _ = torch.solve(v, A)
            w = mm(t_(x), w_)

        return w
示例#7
0
def update_metric(generate_momentum, V, sample_var, metric):
    if metric == "diag_e":
        var = 1 / sample_var
        generate_momentum = generate_momentum_wrap(metric, var_vec=var)
        T = T_fun_wrap(metric, var=var, returns_float=False)
        H_fun = H_fun_wrap(V, T)
    elif metric == "dense_e":
        var = torch.inv(sample_var)
        generate_momentum = generate_momentum_wrap(metric, Cov=var)
        T = T_fun_wrap(metric, Cov=var, returns_float=False)
        H_fun = H_fun_wrap(V, T)
    else:
        return ("error")
    return (generate_momentum, H_fun)
    def __getitem__(self, idx):
        """
        Get item
        :param idx:
        :return:
        """
        # Total size
        total_size = self.sample_len
        oldval = 1.2
        samples = list()

        # History
        history = 1.2 * torch.ones(
            self.history_len) + 0.2 * (torch.rand(self.history_len) - 0.5)

        # For each sample
        for n in range(self.n_samples):
            # Preallocate tensor for time-serie
            sample = torch.zeros(self.sample_len, 2)

            # For each time step
            step = 0
            for t in range(total_size):
                for _ in range(self.delta_t * self.subsample_rate):
                    step = step + 1
                    tauval = history[step % self.history_len]
                    newval = oldval + (0.2 * tauval / (1.0 + tauval**10) -
                                       0.1 * oldval) / self.delta_t
                    history[step % self.history_len] = oldval
                    oldval = newval
                # end for
                sample[t, 0] = newval
                sample[t, 1] = tauval
            # end for

            # Normalize
            if self.normalize:
                maxval = torch.max(sample, dim=0)
                minval = torch.min(sample, dim=0)
                sample = torch.mm(torch.inv(torch.diag(maxval - minval)),
                                  (sample - minval.repeat(total_size, 1)))
            # end if

            # Append
            samples.append(sample)
        # end for

        # Squash timeseries through tanh
        return samples
示例#9
0
    def _generate(self):
        """
        Generate dataset
        :return:
        """
        # Sizes
        total_size = self.sample_len

        # First position
        xy = self.xy

        # Samples
        samples = list()

        # Washout
        for t in range(self.washout):
            xy = self._henon(xy[0], xy[1])
        # end for

        # For each sample
        for n in range(self.n_samples):
            # Tensor
            sample = torch.zeros(total_size, 2)

            # Timesteps
            for t in range(total_size):
                xy = self._henon(xy[0], xy[1])
                sample[t] = xy
            # end for

            # Normalize
            if self.normalize:
                maxval = torch.max(sample, dim=0)
                minval = torch.min(sample, dim=0)
                sample = torch.mm(torch.inv(torch.diag(maxval - minval)),
                                  (sample - minval.repeat(total_size, 1)))
            # end if

            # Add
            samples.append(sample)
        # end for

        # Shuffle
        shuffle(samples)

        return samples
示例#10
0
 def _get_new_L(self, X, E1, E2):
     '''
     Returns updated prediction for L(n,d).
     '''
     L_pred = mul(mul(X,E1.T), inv(E2))
     return L_pred
示例#11
0
 def _get_beta(self, L, S):
     '''
     Returns B(d,n) from L(n,d) and S(n,n)
     '''
     B = mul(L.T, inv(S + mul(L, L.T)))
     return B
示例#12
0
 def _get_beta(self, L, S):
     B = mul(L.T, inv(S + mul(L, L.T)))
     return B
示例#13
0
 def sample(self, n, x=None):
     if (type(x) == type(None)):
         x = Variable(torch.randn(n, 2)).cuda()
     return torch.matmul(x - self.fc.bias, torch.inv(self.fc.weight).T())