Example #1
0
def mmd(x0, y0, h0=None):

    assert h0 is not None, 'illegal inputs'
    x = sharedX(np.copy(x0))
    h = sharedX(h0)

    if len(y0) > 5000:
        y = sharedX(np.copy(y0[:5000]))
    else:
        y = sharedX(np.copy(y0))

    kxx = sqr_dist(x, x)
    kxy = sqr_dist(x, y)
    kyy = sqr_dist(y, y)

    kxx = T.exp(-kxx / h)
    kxy = T.exp(-kxy / h)
    kyy = T.exp(-kyy / h)

    m = x.shape[0].astype(theano.config.floatX)
    n = y.shape[0].astype(theano.config.floatX)

    sumkxx = T.sum(kxx)
    sumkxy = T.sum(kxy)
    sumkyy = T.sum(kyy)

    mmd = T.sqrt(sumkxx / (m * m) + sumkyy / (n * n) - 2. * sumkxy / (m * n))

    f = theano.function(inputs=[], outputs=[mmd])
    return f()
Example #2
0
 def __call__(self, shape, name=None):
     flat_shape = (shape[0], numpy.prod(shape[1:]))
     a = rng.np_rng.normal(0.0, 1.0, flat_shape)
     u, _, v = numpy.linalg.svd(a, full_matrices=False)
     q = u if u.shape == flat_shape else v
     q = q.reshape(shape)
     return theano_utils.sharedX(self.scale * q[:shape[0], :shape[1]], name=name)
Example #3
0
 def __call__(self, shape, name=None):
     flat_shape = (shape[0], np.prod(shape[1:]))
     a = np_rng.normal(0.0, 1.0, flat_shape)
     u, _, v = np.linalg.svd(a, full_matrices=False)
     q = u if u.shape == flat_shape else v  # pick the one with the correct shape
     q = q.reshape(shape)
     return sharedX(self.scale * q[:shape[0], :shape[1]], name=name)
Example #4
0
 def __call__(self, shape, name=None):
     flat_shape = (shape[0], np.prod(shape[1:]))
     a = np_rng.normal(0.0, 1.0, flat_shape)
     u, _, v = np.linalg.svd(a, full_matrices=False)
     q = u if u.shape == flat_shape else v  # pick the one with the correct shape
     q = q.reshape(shape)
     return sharedX(self.scale * q[: shape[0], : shape[1]], name=name)
Example #5
0
def orthogonal(shape, scale=1.1):
    """ benanne lasagne ortho init (faster than qr approach)"""
    flat_shape = (shape[0], np.prod(shape[1:]))
    a = np.random.normal(0.0, 1.0, flat_shape)
    u, _, v = np.linalg.svd(a, full_matrices=False)
    q = u if u.shape == flat_shape else v # pick the one with the correct shape
    q = q.reshape(shape)
    return sharedX(scale * q[:shape[0], :shape[1]])
Example #6
0
 def __call__(self, shape):
     if len(shape) == 2:
         scale = np.sqrt(2.0 / shape[0])
     elif len(shape) == 4:
         scale = np.sqrt(2.0 / np.prod(shape[1:]))
     else:
         raise NotImplementedError
     return sharedX(np_rng.normal(size=shape, scale=scale))
Example #7
0
 def __call__(self, shape, name=None):
     if len(shape) == 2:
         scale = np.sqrt(2./shape[0])
     elif len(shape) == 4:
         scale = np.sqrt(2./np.prod(shape[1:]))
     else:
         raise NotImplementedError
     return sharedX(np_rng.normal(size=shape, scale=scale), name=name)
Example #8
0
 def __call__(self, shape):
     if len(shape) == 2:
         scale = numpy.sqrt(2.0 / shape[0])
     elif len(shape) == 4:
         scale = numpy.sqrt(2.0 / numpy.prod(shape[1:]))
     else:
         raise NotImplementedError
     return theano_utils.sharedX(rng.np_rng.normal(size=shape, scale=scale))
Example #9
0
def orthogonal(shape, scale=1.1):
    """ benanne lasagne ortho init (faster than qr approach)"""
    flat_shape = (shape[0], np.prod(shape[1:]))
    a = np.random.normal(0.0, 1.0, flat_shape)
    u, _, v = np.linalg.svd(a, full_matrices=False)
    q = u if u.shape == flat_shape else v  # pick the one with the correct shape
    q = q.reshape(shape)
    return sharedX(scale * q[:shape[0], :shape[1]])
    def get_hog(self, x_o):
        use_bin = self.use_bin
        NO = self.NO
        BS = self.BS
        nc = self.nc
        x = (x_o + sharedX(1)) / (sharedX(2))
        Gx = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]) / 4.0
        Gy = Gx.T
        f1_w = []
        for i in range(NO):
            t = np.pi / NO * i
            g = np.cos(t) * Gx + np.sin(t) * Gy
            gg = np.tile(g[np.newaxis, np.newaxis, :, :], [1, 1, 1, 1])
            f1_w.append(gg)
        f1_w = np.concatenate(f1_w, axis=0)
        G = np.concatenate([
            Gx[np.newaxis, np.newaxis, :, :], Gy[np.newaxis, np.newaxis, :, :]
        ],
                           axis=0)
        G_f = sharedX(floatX(G))

        a = np.cos(np.pi / NO)
        l1 = sharedX(floatX(1 / (1 - a)))
        l2 = sharedX(floatX(a / (1 - a)))
        eps = sharedX(1e-3)
        if nc == 3:
            x_gray = T.mean(x, axis=1).dimshuffle(0, 'x', 1, 2)
        else:
            x_gray = x
        f1 = sharedX(floatX(f1_w))
        h0 = T.abs_(dnn_conv(x_gray, f1, subsample=(1, 1), border_mode=(1, 1)))
        g = dnn_conv(x_gray, G_f, subsample=(1, 1), border_mode=(1, 1))

        if use_bin:
            gx = g[:, [0], :, :]
            gy = g[:, [1], :, :]
            gg = T.sqrt(gx * gx + gy * gy + eps)
            hk = T.maximum(0, l1 * h0 - l2 * gg)

            bf_w = np.zeros((NO, NO, 2 * BS, 2 * BS))
            b = 1 - np.abs(
                (np.arange(1, 2 * BS + 1) - (2 * BS + 1.0) / 2.0) / BS)
            b = b[np.newaxis, :]
            bb = b.T.dot(b)
            for n in range(NO):
                bf_w[n, n] = bb

            bf = sharedX(floatX(bf_w))
            h_f = dnn_conv(hk,
                           bf,
                           subsample=(BS, BS),
                           border_mode=(BS / 2, BS / 2))
            return h_f
        else:
            return g
Example #11
0
 def __call__(self, shape, name=None):
     if shape[0] != shape[1]:
         w = np.zeros(shape)
         o_idxs = np.arange(shape[0])
         i_idxs = np.random.permutation(np.tile(np.arange(shape[1]), shape[0]/shape[1]+1))[:shape[0]]
         w[o_idxs, i_idxs] = self.scale
     else:
         w = np.identity(shape[0]) * self.scale
     return sharedX(w, name=name)
def init_model(dim, Q, cond_num):
    def search_optm_cond(V, cond_num_star):
        min_c = 0
        max_c = 100

        if cond_num_star == 1:
            return 0, 1

        while True:
            curr_c = (min_c + max_c) / 2.
            M = np.eye(dim, ) + curr_c * V
            eigvals = np.linalg.eigvals(M)
            assert np.all(eigvals > 0), 'illegal input'
            val = np.max(eigvals) / np.min(eigvals)
            if val - cond_num_star > 0.3:
                max_c = curr_c
            elif val - cond_num_star < -0.3:
                min_c = curr_c
            else:
                return curr_c, val

    t0 = np.eye(dim)

    mu0 = sharedX(np_rng.uniform(-3, 3, size=(dim, )))
    V = np.dot(
        np.dot(Q.T, np.diag(1. + np.random.uniform(0, 1, size=(dim, )))), Q)
    coeff, cond_approx = search_optm_cond(V, cond_num)
    M = np.eye(dim, ) + coeff * V

    A0 = sharedX(np.linalg.inv(M))
    model_params = {'mu': mu0, 'A': A0}

    ### score function
    score_q = score_gaussian
    log_prob = logp_gaussian

    ## ground truth
    gt = np.random.multivariate_normal(mu0.get_value(),
                                       M, (10000, ),
                                       check_valid='raise')

    return model_params, score_q, log_prob, gt
Example #13
0
 def def_comp_mask(self):
     BS = self.BS
     
     t = time()
     m = T.tensor4()
     bf_w = np.ones((1, 1, 2 * BS, 2 * BS))
     bf = sharedX(floatX(bf_w))
     m_b = dnn_conv(m, bf, subsample=(BS, BS), border_mode=(BS / 2, BS / 2))
     _comp_mask = theano.function(inputs=[m], outputs=m_b)
     
     return _comp_mask
 def def_comp_mask(self):
     BS = self.BS
     print('COMPILING')
     t = time()
     m = T.tensor4()
     bf_w = np.ones((1, 1, 2 * BS, 2 * BS))
     bf = sharedX(floatX(bf_w))
     m_b = dnn_conv(m, bf, subsample=(BS, BS), border_mode=(BS / 2, BS / 2))
     _comp_mask = theano.function(inputs=[m], outputs=m_b)
     print('%.2f seconds to compile [compMask] functions' % (time() - t))
     return _comp_mask
Example #15
0
    def __call__(self, shape, name=None):
        w = np.zeros(shape)
        ycenter = shape[2]//2
        xcenter = shape[3]//2

        if shape[0] == shape[1]:
            o_idxs = np.arange(shape[0])
            i_idxs = np.arange(shape[1])
        elif shape[1] < shape[0]:
            o_idxs = np.arange(shape[0])
            i_idxs = np.random.permutation(np.tile(np.arange(shape[1]), shape[0]/shape[1]+1))[:shape[0]]
        w[o_idxs, i_idxs, ycenter, xcenter] = self.scale
        return sharedX(w, name=name)
Example #16
0
def init_model():

    G = nx.grid_2d_graph(args.grid, args.grid)
    A = np.asarray(nx.adjacency_matrix(G).todense())

    #A = A * np_rng.uniform(-0.1, 0.1, size=A.shape)
    noise = np.tril(np_rng.uniform(-0.1, 0.1, size=A.shape))
    noise = noise + noise.T
    A = A * noise

    np.fill_diagonal(A, np.sum(np.abs(A), axis=1) + .1)

    if not np.all(np.linalg.eigvals(A) > 0) or not np.all(
            np.linalg.eigvals(np.linalg.inv(A)) > 0):
        raise NotImplementedError

    b = np_rng.normal(0, 1, size=(args.grid**2, 1))

    mu0 = sharedX(np.dot(np.linalg.inv(A), b).flatten())
    A0 = sharedX(A)

    model_params = {'mu': mu0, 'A': A0}

    ## score function
    score_q = score_gaussian

    ## ground truth samples
    gt = np.random.multivariate_normal(mean=mu0.get_value().flatten(),
                                       cov=np.linalg.inv(A),
                                       size=(10000, ),
                                       check_valid='raise')

    ## adjacency matrix
    W = np.zeros(A.shape).astype(int)
    W[A != 0] = 1

    assert np.all(np.sum(W, axis=1) > 0), 'illegal inputs'
    assert np.sum((W - W.T)**2) < 1e-8, 'illegal inputs'
    return model_params, score_q, gt, W
Example #17
0
 def init(self):
     naxes = len(self.out_shape)
     if naxes == 2 or naxes == 4:
         dim = self.out_shape[1]
     elif naxes == 3:
         dim = self.out_shape[-1]
     else:
         raise NotImplementedError
     self.g = inits.Constant(c=1.)(dim)
     self.b = inits.Constant(c=0.)(dim)
     self.u = inits.Constant(c=0.)(dim)
     self.s = inits.Constant(c=0.)(dim)
     self.n = sharedX(0.)
     self.params = [self.g, self.b]
Example #18
0
def ksd_eval(X0, h0, score_q, **model_params):

    X = sharedX(X0)
    h = sharedX(h0)

    Sqx = score_q(X, **model_params)

    H = sqr_dist(X, X)
    h = T.sqrt(h / 2.)

    V = H.flatten()
    # median distance
    h = T.switch(
        T.eq((V.shape[0] % 2), 0),
        # if even vector
        T.mean(T.sort(V)[((V.shape[0] / 2) - 1):((V.shape[0] / 2) + 1)]),
        # if odd vector
        T.sort(V)[V.shape[0] // 2])

    # compute the rbf kernel
    Kxy = T.exp(-H / h**2 / 2.)

    Sqxdy = -(T.dot(Sqx, X.T) - T.tile(
        T.sum(Sqx * X, axis=1).dimshuffle(0, 'x'), (1, X.shape[0]))) / (h**2)

    dxSqy = T.transpose(Sqxdy)
    dxdy = (-H / (h**4) + X.shape[1].astype(theano.config.floatX) / (h**2))

    M = (T.dot(Sqx, Sqx.T) + Sqxdy + dxSqy + dxdy) * Kxy
    M2 = M - T.diag(T.diag(M))

    ksd_u = T.sum(M2) / (X.shape[0] * (X.shape[0] - 1))
    ksd_v = T.sum(M) / (X.shape[0]**2)

    f = theano.function(inputs=[], outputs=[ksd_u, ksd_v])

    return f()
Example #19
0
def random_features_kernel(x, n_features, score_q, fixed_weights=True, cos_feat_dim_scale=True, **model_params):
    dim = x.get_value().shape[1]

    H = sqr_dist(x, x)
    h = median_distance(H)
    #h = select_h_by_ksdv(x, score_q, **model_params)
    gamma = 1./h

    if fixed_weights:
        random_offset = sharedX(np_rng.uniform(0, 2*pi, (n_features,)))
        weights = sharedX(np_rng.normal(0, 1, (dim, n_features)))
        random_weights = T.sqrt(2*gamma) * weights
    else:
        #random_weights = T.sqrt(2 * gamma) * t_rng.normal(
        #     (x.shape[1], n_features))

        #random_offset = t_rng.uniform((n_features,), 0, 2 * pi)
        raise NotImplementedError

    if cos_feat_dim_scale:
        alpha = T.sqrt(2.) / T.sqrt(n_features).astype(theano.config.floatX)
    else:
        alpha = T.sqrt(2.)

    coff = T.dot(x, random_weights) + random_offset
    projection = alpha * T.cos(coff)
    
    kxy = T.dot(projection, projection.T)

    sinf = -alpha*T.sin(coff)
    wd = random_weights.T

    inner = T.sum(sinf, axis=0).dimshuffle(0,'x') * wd
    dxkxy = T.dot(projection, inner)

    return kxy, dxkxy
Example #20
0
 def init(self):
     naxes = len(self.out_shape)
     if naxes == 2 or naxes == 4:
         dim = self.out_shape[1]
     elif naxes == 3:
         dim = self.out_shape[-1]
     else:
         raise NotImplementedError
     self.g = inits.Constant(c=1.)(dim)
     self.b = inits.Constant(c=0.)(dim)
     self.u = inits.Constant(c=0.)(dim)
     self.s = inits.Constant(c=0.)(dim)
     self.n = sharedX(0.)
     self.params = [self.g, self.b]
     self.other_params = [self.u, self.s, self.n]
Example #21
0
 def __call__(self, vocab, name=None):
     t = time.time()
     w2v_vocab = sklearn.externals.joblib.load(
         os.path.join(self.data_dir, '3m_w2v_gn_vocab.jl'))
     w2v_embed = sklearn.externals.joblib.load(
         os.path.join(self.data_dir, '3m_w2v_gn.jl'))
     mapping = {}
     for i, w in enumerate(w2v_vocab):
         w = w.lower()
         if w in mapping:
             mapping[w].append(i)
         else:
             mapping[w] = [i]
     widxs, w2vidxs = list(), list()
     for i, w in enumerate(vocab):
         w = w.replace('`', "'")
         if w in mapping:
             w2vi = min(mapping[w])
             w2vidxs.append(w2vi)
             widxs.append(i)
     w = numpy.zeros((len(vocab), w2v_embed.shape[1]))
     w[widxs, :] = w2v_embed[w2vidxs, :] / 2.0
     return theano_utils.sharedX(w, name=name)
Example #22
0
 def __call__(self, vocab, name=None):
     t = time()
     w2v_vocab = joblib.load(os.path.join(self.data_dir, "3m_w2v_gn_vocab.jl"))
     w2v_embed = joblib.load(os.path.join(self.data_dir, "3m_w2v_gn.jl"))
     mapping = {}
     for i, w in enumerate(w2v_vocab):
         w = w.lower()
         if w in mapping:
             mapping[w].append(i)
         else:
             mapping[w] = [i]
     widxs = []
     w2vidxs = []
     for i, w in enumerate(vocab):
         w = w.replace("`", "'")
         if w in mapping:
             w2vi = min(mapping[w])
             w2vidxs.append(w2vi)
             widxs.append(i)
     # w = np_rng.uniform(low=-0.05, high=0.05, size=(len(vocab), w2v_embed.shape[1]))
     w = np.zeros((len(vocab), w2v_embed.shape[1]))
     w[widxs, :] = w2v_embed[w2vidxs, :] / 2.0
     return sharedX(w, name=name)
Example #23
0
 def __call__(self, vocab, name=None):
     t = time()
     w2v_vocab = joblib.load(
         os.path.join(self.data_dir, '3m_w2v_gn_vocab.jl'))
     w2v_embed = joblib.load(os.path.join(self.data_dir, '3m_w2v_gn.jl'))
     mapping = {}
     for i, w in enumerate(w2v_vocab):
         w = w.lower()
         if w in mapping:
             mapping[w].append(i)
         else:
             mapping[w] = [i]
     widxs = []
     w2vidxs = []
     for i, w in enumerate(vocab):
         w = w.replace('`', "'")
         if w in mapping:
             w2vi = min(mapping[w])
             w2vidxs.append(w2vi)
             widxs.append(i)
     # w = np_rng.uniform(low=-0.05, high=0.05, size=(len(vocab), w2v_embed.shape[1]))
     w = np.zeros((len(vocab), w2v_embed.shape[1]))
     w[widxs, :] = w2v_embed[w2vidxs, :] / 2.
     return sharedX(w, name=name)
Example #24
0
 def __call__(self, shape):
     return sharedX(np.identity(shape[0]) * self.scale)
Example #25
0
def normal(shape, scale=0.05):
    return sharedX(np.random.randn(*shape) * scale)
def uniform(shape, scale=0.05):
	return sharedX(np.random.uniform(low=-scale, high=scale, size=shape))
Example #27
0
 def __call__(self, shape):
     return sharedX(np.ones(shape) * self.c)
Example #28
0
 def __call__(self, shape):
     return sharedX(np.ones(shape) * self.c)
Example #29
0
 def __call__(self, shape, name=None):
     r = np_rng.normal(loc=0, scale=0.01, size=shape)
     r = r/np.sqrt(np.sum(r**2))*np.sqrt(shape[1])
     return sharedX(r, name=name)
Example #30
0
 def __call__(self, shape, name=None):
     return sharedX(np.ones(shape) * self.c, name=name)
Example #31
0
 def __call__(self, shape, name=None):
     return sharedX(np_rng.uniform(low=-self.scale, high=self.scale, size=shape), name=name)
Example #32
0
 def __call__(self, shape, name=None):
     return sharedX(np_rng.normal(loc=self.loc, scale=self.scale, size=shape), name=name)
Example #33
0
 def __call__(self, shape):
     return theano_utils.sharedX(numpy.identity(shape[0]) * self.scale)
Example #34
0
 def __call__(self, shape, name=None):
     return theano_utils.sharedX(rng.np_rng.normal(
         loc=self.loc, scale=self.scale, size=shape), name=name)
Example #35
0
 def __call__(self, shape):
     return theano_utils.sharedX(
         rng.np_rng.uniform(low=-self.scale, high=self.scale, size=shape))
Example #36
0
 def __call__(self, shape):
     return sharedX(np_rng.uniform(low=-self.scale, high=self.scale, size=shape))
Example #37
0
 def __call__(self, shape, name=None):
     r = rng.np_rng.normal(loc=0, scale=0.01, size=shape)
     r = r/numpy.sqrt(numpy.sum(r**2))*numpy.sqrt(shape[1])
     return theano_utils.sharedX(r, name=name)
Example #38
0
 def __call__(self, shape, name=None):
     return sharedX(np_rng.normal(loc=self.loc, scale=self.scale, size=shape), name=name)
Example #39
0
 def __call__(self, shape):
     return theano_utils.sharedX(numpy.ones(shape) * self.c)
Example #40
0
 def __call__(self, shape, name=None):
     r = np_rng.normal(loc=0, scale=0.01, size=shape)
     r = r / np.sqrt(np.sum(r ** 2)) * np.sqrt(shape[1])
     return sharedX(r, name=name)
def normal(shape, scale=0.05):
	# Note: the asterix before the parameter means that the parameter will 
	# be a tuple of optional arguments to the function.
	return sharedX(np.random.randn(*shape) * scale)
Example #42
0
 def __call__(self, shape):
     return sharedX(np.identity(shape[0]) * self.scale)
Example #43
0
def uniform(shape, scale=0.05):
    return sharedX(np.random.uniform(low=-scale, high=scale, size=shape))