Exemple #1
0
def spcl(dfx, Nc, Nfactor):
    N = dfx.size(0)
    S = ((dfx + dfx.t()) / 2).float()
    degs = S.sum(1)
    Ds = torch.eye(N)
    Ds[Ds == 1] = degs
    Ls = Ds - S
    D = torch.eye(N)
    degs[degs == 0] = 0.000001
    D[D == 1] = 1 / (degs**.5)
    Ls = D.mm(Ls).mm(D)
    _, v = Ls.symeig(eigenvectors=True)
    F = v[:, :Nfactor]
    #print(F[:10, :])
    return KMeans(n_clusters=Nc, random_state=0).fit(
        F.detach().numpy()).labels_, F.detach().numpy()
Exemple #2
0
def plotnormvsN(meas, A, lw=100, rb=1500, points=150):
    N1 = torch.linspace(lw, rb, points)
    F = torch.zeros_like(N1)
    #meas, measn, u, A,Fw= gensingledata(n=500,alpha=20,m=10,Ncount=1000,sigma=1)
    #plt.plot(u)
    #plt.plot(meas)
    #plt.show()
    #print(torch.norm(meas,1)/torch.norm(u,1))
    for i in range(0, points):
        F[i] = l1solv(torch.matrix_power(A, int(N1[i])), meas, 2)
    plt.plot(N1.detach().numpy(), F.detach().numpy())
    plt.xlabel('Ncount')
    plt.ylabel('Defined norm')
    #plt.title(' network norm')
    #plt.savefig('figures/network_norm_N10000.svg', format='svg', dpi=1200)
    return F, N1
Exemple #3
0
    def constraint(self):
        i, o, do, cl = self.B
        clsign = (2 * cl - 1)
        # ipdb.set_trace()
        # def fun(*args):
        #     # self.model.load_state_dict(w)
        #     for p, pm in zip(self.model.parameters(), args):
        #         p.data = pm
        #     return self.model._net(i)
        # J = jacobian(fun, tuple(self.model.parameters()))
        self.zero_grad()

        Jdata = {
            k: np.zeros((i.shape[0], p.flatten().shape[0]))
            for k, p in self.named_parameters() if p.requires_grad
        }

        ndim = i.shape[1]
        Jderiv = [{k: np.zeros_like(j)
                   for k, j in Jdata.items()} for d in range(ndim)]

        o_ = torch.zeros(o.shape)
        do_ = torch.zeros(do.shape)
        deriv = torch.zeros(do.shape)
        for ind in range(i.shape[0]):

            ici = i[ind:ind + 1]
            ici.requires_grad = True

            oci = self.model(ici)
            o_[ind] = oci

            # Direct prediction
            oci.backward()
            # oci.backward(create_graph=True)
            for k, p in self.named_parameters():
                if p.requires_grad:
                    Jdata[k][ind] = (clsign[ind] *
                                     p.grad).detach().flatten().numpy()
                    p.grad *= 0

            # Derivative prediction
            # for d in range(ndim):
            #     ici = i[ind:ind+1]
            #     ici.requires_grad = True
            #     self.model.zero_grad()

            #     oci = self.model(ici)
            #     doci = grad(oci, [ici], create_graph=True)[0].squeeze()

            #     # do_[ind] = ici.grad.detach()
            #     do_[ind] = doci.detach()
            #     # doci = ici.grad.squeeze()
            #     # self.model.zero_grad()
            #     # ipdb.set_trace()
            #     drv = (do[ind, d] ** 2 - doci[d] **  2)
            #     deriv[ind, d] = drv
            #     drv.backward()
            #     # drv.backward(retain_graph=True)
            #     for k, p in self.named_parameters():
            #         if p.requires_grad:
            #             if p.grad is not None:
            #                 Jderiv[d][k][ind] = p.grad.flatten().numpy()
            #                 p.grad *= 0

        funcs = {
            'data': ((o_ - o) * clsign).detach().squeeze().numpy(),
            'obj': 0.,
            'grads': {
                'data': Jdata,
                'obj': {k: Jdata[k][0] * 0
                        for k in Jdata}
            }
        }

        # for d in range(ndim):
        #     funcs['deriv_'+str(d)] = deriv[:, d].detach().squeeze().numpy()
        #     funcs['grads']['deriv_'+str(d)] = Jderiv[d]
        funcs['classification_loss'] = np.maximum(funcs['data'], 0).sum()

        F = 0
        self.model.zero_grad()
        for k, p in self.named_parameters():
            if p.requires_grad and 'weight' in k:
                # ipdb.set_trace()
                # funcs['grads'][k] = {k:grad(f, [p])[0].detach().flatten().numpy()}
                F = F + (p @ p.T @ p - p).square().sum()
        F.backward()
        funcs['obj'] = F.detach().numpy()
        funcs['grads']['obj'] = {
            k: p.grad.detach().flatten().numpy()
            for k, p in self.named_parameters()
            if p.requires_grad and 'weight' in k
        }
        return funcs
Exemple #4
0
def testBlobs(tosave=False):
    inputs, target = datasets.make_blobs(n_samples=600,
                                         cluster_std=[2.0, 1.5, 1.0])
    nsamples, nfeat = inputs.shape
    inputsTensor = torch.from_numpy(inputs).unsqueeze(1).float()
    ut = np.unique(target)
    print(nsamples, ' samples', nfeat, ' dimensions ,targets:',
          Counter(target))
    Nclusters = len(ut)
    Nsparse = 10
    Nfactor = 0
    batchsize = 25
    model = dmClustering(shapeNet(dim=nfeat),
                         Nfactor=Nfactor,
                         Nclusters=Nclusters)
    F, labels = model.unSupervisedLearner(inputsTensor,
                                          Nepochs=6,
                                          Ninner=1,
                                          sparsity=Nsparse,
                                          bsize=batchsize,
                                          lamda=1)

    print('clusters:', ''.join(str(l) for l in labels))
    print(Counter(labels))
    nmi = normalized_mutual_info_score(target, labels)
    print('Normalized mutual information(NMI): {:.2f}%'.format(nmi * 100))
    acc = group_label_acc(target, labels)
    print('Group accuracy: {:.2f}%'.format(acc * 100))

    model = SpectralClustering(n_clusters=2).fit(inputs)
    labels_spcl = model.labels_
    ss = torch.from_numpy(model.affinity_matrix_).float()
    N = ss.size(0)
    degs = ss.sum(1)
    Ds = torch.eye(N)
    Ds[Ds == 1] = degs
    Ls = Ds - ss
    _, v = Ls.symeig(eigenvectors=True)
    f_spcl = v[:, :2].numpy()
    fig, ax = plt.subplots(ncols=5, figsize=(20, 3))
    c = ['b', 'r', 'm']
    for i, l in enumerate(np.unique(target)):
        id = (l == target)
        ax[0].scatter(inputs[id, 0], inputs[id, 1], marker='.', color=c[i])
    ax[0].set_title('Three Blobs')
    for i, l in enumerate(np.unique(labels_spcl)):
        id = (l == labels_spcl)
        ax[1].scatter(inputs[id, 0], inputs[id, 1], marker='.', color=c[i])
    ax[1].set_title('Spectral Clustering')
    for i, l in enumerate(np.unique(labels)):
        id = (l == labels)
        ax[2].scatter(f_spcl[id, 0], f_spcl[id, 1], marker='.', color=c[i])
    ax[2].set_title('Learnt Subspace')
    for i, l in enumerate(np.unique(labels)):
        id = (l == labels)
        ax[3].scatter(inputs[id, 0], inputs[id, 1], marker='.', color=c[i])
    ax[3].set_title('Proposed Clustering')
    for i, l in enumerate(np.unique(labels)):
        id = (l == labels)
        ax[4].scatter(F.detach().numpy()[id, 0],
                      F.detach().numpy()[id, 1],
                      marker='.',
                      color=c[i])
    ax[4].set_title('Learnt Subspace')
    plt.tight_layout()
    if tosave:
        fig.savefig('ClusteringCompare-ThreeBlobs.png')
        return 1
    else:
        plt.show(block=False)
        time.sleep(5)
        return 1
Exemple #5
0
def __main__():
    torch.set_default_dtype(torch.float64)

    # We don't want the time-domain filter to have energy above an angular
    # frequency of pi, which corresponds to a frequency of 0.5.  The sampling
    # rate in the time domain (the f_t values) is D, so the relative
    # frequency of the cutoff is 0.5 / D.  (This would be the
    # arg to filter_utils.filters.high_pass_filter).  This will make an
    # inconveniently wide filter, though.  We are already penalizing these high
    # energies explicitly in the fourier space, up to T * pi, so we only really
    # need to penalize in the time domain for frequencies above this; that means
    # we can boost up the relative cutoff frequency by a factor of T, giving
    # us a cutoff frequency of 0.5 T / D.
    (f, filter_width) = filters.high_pass_filter(0.5 * T / D, num_zeros=10)
    filt = torch.tensor(f)  # convert from Numpy into Torch format.

    # f_approx is a hand-tuned function very close to the 'f' we want.  The
    # optimization gets stuck in nasty local minima, and we know where we are
    # going, so we use this as a constraint.
    f_approx = get_f_approx()
    f = f_approx.clone().detach().requires_grad_(True)

    M = get_fourier_matrix()
    print("M = {}".format(M))

    lrate = 0.00005  # Learning rate
    momentum = 0.995
    momentum_grad = torch.zeros(f.shape, dtype=torch.float64)

    for iter in range(2500):

        F = torch.mv(M, f)
        O = get_freq_objf(F, iter, (iter % 100 == 0))

        max_error = 0.02  # f should stay at least this close to f_approx.
        # Actually it's within 0.01, we're giving it a little
        # more freedom than that.  This is to get it in the
        # region of a solution that we know is good, and avoid
        # bad local minima.
        f_penalty1 = torch.max(torch.tensor([0.0]),
                               torch.abs(f - f_approx) - max_error).sum() * 5.0

        f_extended = torch.cat((torch.flip(f[1:], dims=[0]), f))
        f_extended_highpassed = torch.nn.functional.conv1d(
            f_extended.unsqueeze(0).unsqueeze(0),
            filt.unsqueeze(0).unsqueeze(0),
            padding=filter_width)
        f_extended_highpassed = f_extended_highpassed.squeeze(0).squeeze(0)
        f_penalty2 = torch.sqrt((f_extended_highpassed**2).sum() + 1.0e-20)

        highpassed_integral = (
            1.0 / D) * f_penalty2  # multiply by distance between samplesa

        if (iter % 100 == 0):
            print(
                "f_penalty = {}+{}; integral of abs(highpassed-signal) = {} ".
                format(f_penalty1, f_penalty2, highpassed_integral))

        if (iter > 2000 and f_penalty1 != 0):
            raise RuntimeError(
                "Expected, at convergence, not to be encountering 1st penalty."
            )

        O = O + f_penalty1 + f_penalty2

        O.backward()
        with torch.no_grad():
            momentum_grad = (momentum * momentum_grad) + f.grad
            f -= momentum_grad * lrate
            f.grad.data.zero_()

    plt.plot((1.0 / D) * np.arange(S * D), f.detach())
    plt.plot((math.pi / D) * np.arange(D * T), F.detach())

    plt.ylabel('f_k, F_k')
    plt.grid()
    plt.show()
    print("F at pi/2 is: ", F[D // 2].item())
    print("F = ", repr(F.detach()))
    torch.set_printoptions(profile='full', precision=20)
    print("f = ", repr(f.detach()))