Esempio n. 1
0
def main():
    import os
    if not os.path.exists('data'):
        os.chdir('..')
    import evaluation
    import optimize
    import svm

    for dataset in [0, 1, 2]:
        print('DATASET={}'.format(dataset))
        X = data.load(k=dataset)
        spec_k = data.precomputed_kernels(None, 'cum_spectrum_31')[0][dataset]

        def levenshtein_kernel_diff(params, I):
            factors = ag.exp(params)
            dists = levenshtein_distance_v2(X[I],
                                            X[I],
                                            weights=factors[:10],
                                            tqdm=False)
            scale = factors[10]
            return ag.exp(
                -dists / (dists.mean() + 1e-3) *
                scale) + factors[11] * spec_k[I][:, I].astype(np.float32)

        n = 512
        num_folds = 2
        θ = ag.zeros(12)
        λ = ag.zeros(1)

        θ, λ, stats = optimize.optimize(
            kernel=levenshtein_kernel_diff,
            clf=optimize.KernelRidge,
            Y=data.train_Ys[dataset],
            indices=lambda: np.random.permutation(len(X))[:n],
            folds=lambda p: data.k_folds_indices(p, num_folds),
            θ=θ,
            λ=λ,
            β=1e2,
            iters=50,
            verbose=False,
        )
        print(θ, λ)

        K = levenshtein_kernel_diff(θ, np.arange(len(X))).data
        for _ in range(3):
            print(
                evaluation.evaluate(svm.SVC(C=10),
                                    K,
                                    data.train_Ys[dataset],
                                    folds=20))
Esempio n. 2
0
    def fit(self, k, y):
        n = k.shape[0]
        y = y.astype(np.float32) * 2 - 1
        k, y = ag.tensors(k, y)

        if self.loss_ == 'hinge':
            self.alpha, _ = ag.qp(k,
                                  -y,
                                  ag.concatenate(
                                      (ag.diagflat(y), -ag.diagflat(y))),
                                  ag.concatenate(
                                      (self.C * ag.ones(n), ag.zeros(n))),
                                  options=dict(show_progress=False))
        elif self.loss_ == 'squared_hinge':
            self.alpha, _ = ag.qp(k + ag.eye(n) / (2 * self.C),
                                  -y,
                                  -ag.diagflat(y),
                                  ag.zeros(n),
                                  options=dict(show_progress=False))

        return self
Esempio n. 3
0
    def run():
        spectrum_kernels = []
        for k in range(1, 14):
            spectrum_kernels.append(
                precomputed_kernels(k_spectrum, 'spectrum_{}'.format(k), k=k))
        spectrum_K = np.sum([kernels[0][0] for kernels in spectrum_kernels],
                            axis=0).astype(float)
        del spectrum_kernels

        # print(ag.test.summary(spectrum_K))
        fake_K = ag.ones((len(spectrum_K), len(spectrum_K)))

        K = ag.stack((spectrum_K, fake_K)).astype(np.float32)

        def spectrum_sum(θ, I):
            return ag.tensordot(K[np.index_exp[:] + np.ix_(I, I)],
                                ag.exp(θ),
                                axes=([0], [0]))

        n = 200
        num_folds = 2
        θ = ag.tensor([-10, 0])
        λ = ag.zeros(1)

        θ, λ, stats = optimize(
            kernel=spectrum_sum,
            clf=SVM,
            Y=train_Ys[0],
            indices=lambda: np.arange(n),
            folds=lambda n: k_folds_indices(n, num_folds),
            θ=θ,
            λ=λ,
            β=1e2,
            iters=100,
        )
        print(θ, λ)
Esempio n. 4
0
            self.delete_params(id(x))

            self.reset_minibatch_grad()
            return rescaled_params

        return x


if __name__ == "__main__":
    import sys
    sys.path.append('..')
    sys.path.append('../autograd')
    from autograd import Variable, zeros, Matrix
    from activation_functions import sigmoid

    W = zeros(1, 2)
    W.init_normal()
    b = zeros(1, 1)
    b.init_normal()

    x = zeros(2, 1)
    x.init_normal()  # so x is nonzero
    optimizer = Adam(0.001, 0.9, 0.999)

    target = zeros(1, 1)
    target[0][0] = 0.5
    for _ in range(5000):
        f = sigmoid(W * x + b)
        output = (f - target).abs()
        if not (_ % 100):
            print(output)
Esempio n. 5
0
if __name__ == "__main__":
    import sys
    sys.path.append('..')
    from autograd import Variable, Matrix, zeros
    from activation_functions import sigmoid, relu

    W0 = zeros(500, 2)
    W0.init_normal()

    b0 = zeros(500, 1)
    b0.init_normal()

    W1 = zeros(1, 500)
    W1.init_normal()

    b1 = zeros(1, 1)
    b1.init_normal()

    x = Matrix([[Variable(1)], [Variable(2)]])

    hidden = relu(W0 * x + b0)
    output = relu(W1 * hidden + b1)
    print(output)
    print(output.get_grad(W0))
    print(output.get_grad(b0))
    print(output.get_grad(W1))
    print(output.get_grad(b1))