Пример #1
0
def project((T, A_mat, b_vec, marginals, gamma, eps)):
    K = len(T)

    x = np.zeros((2,)*K)
    for ind in product([0,1], repeat=K):
        x[ind] = prod([marginals[t] if state==1 else 1-marginals[t] for t,state in zip(T, ind)])

    m_vec = np.matrix(x.reshape((x.size,1)))
    temp = np.array(b_vec).reshape((2,)*K)

    print 'projecting', T, "A b", A_mat, b_vec
    for k in xrange(K):
        if np.prod(temp.sum(tuple(set(xrange(K)) - set([k])))) == 0: #if any complete marginalization has a 0 - ie there's an     anchor that never appears
            print 'here'
            return T, np.array(m_vec).reshape((2,)*K)

    x_init = m_vec.copy()
    x_init = x_init / x_init.sum()

    #A_mat = np.matrix(A[T])

    #b_vec = np.matrix(B[K][T])

    def f(x, withGrad=False, args=''):
        if 'L2' in prog_args:
            _dist = L2
            _grad = gradL2
        elif 'smoothkl' in prog_args:
            _dist = smoothKL
            _grad = gradsmoothKL
        else:
            _dist = KL
            _grad = gradKL
        kl =  _dist(b_vec, A_mat*x) + gamma*_dist(m_vec, x)
        if withGrad:
            grad = _grad(x,b_vec,A_mat).T + gamma*_grad(x,m_vec, sparse.identity(m_vec.size)).T
            return kl, grad
        return kl

    x,f,steps,gap = expGrad(f, x_init, eps=eps / float(len(T)), verbose=True)
    print 'f steps gap', f, steps, gap
    return T,  np.array(x).reshape((2,)*K)
Пример #2
0
  def transform_to_latent((key, intent, noise_list, D)):
      if 'ground_truth' in sys.argv:
          print 'short circuit'
          return tuple(key), D / float(D.sum()), 0

      #if 'ground_graph' in sys.argv and set(key)==set(intent):
      #    return tuple(key), D / float(D.sum()), 0

      new_k = []
      original_shape = D.shape
      latents = []
      observed = []

      #print 'transforming', intent, 'to', key


      for var,obs in zip(intent,key):
          if var == obs:
              new_k.append(obs)
              observed.append(key.index(obs))
          else:
              new_k.append(var)
              latents.append(var)

      if len(latents) == 0 or 'ground_truth' in sys.argv:
          return tuple(new_k), D / float(D.sum()), 0

      A, A_inv = calculate_adjustment(noise_list)

      order = len(key)

      counter = marginals[tuple(sorted(latents))].transpose(index_sort(latents))
      counter = counter / counter.sum()

      x_indep = np.zeros(original_shape)
      obs_index = new_k.index(key[observed[0]])
      assert obs_index == len(new_k)-1, "observed index is always last?"

      if len(original_shape) == 2:
          x_indep[:,0] = counter / 2.0
          x_indep[:,1] = counter / 2.0
      elif len(original_shape) == 3:
          x_indep[:,:,0] = counter / 2.0
          x_indep[:,:,1] = counter / 2.0
      else:
          print 'moment larger than expected!'
          assert 0
      x_indep = x_indep.reshape((x_indep.size, 1))


      D = np.matrix(D.reshape((D.size, 1)))
      D = D / D.sum()
      A = np.matrix(A)
      gamma= 0.1

      def _f(X, withGrad=False):
          val = KL(D, A*X) + gamma*KL(x_indep, X)
          if withGrad:
              g = gradKL(X, D, A).T + gamma*gradKL(X, x_indep, np.matrix(np.identity(x_indep.size))).T
              return val, g
          return val

      #print [header[i] for i in intent]
      #print 'A', A
      #print 'D', np.array(D).reshape((2,)*len(intent))

      res,val,steps,gap = expGrad(_f, x_indep.copy(), 1e-5, verbose=False, lower=0)
      #print 'X', np.array(res).reshape(original_shape)

      return tuple(new_k), np.array(res).reshape(original_shape), val
Пример #3
0
    def transform_to_latent((key, intent, noise_list, D)):
        if 'ground_truth' in sys.argv:
            print 'short circuit'
            return tuple(key), D / float(D.sum()), 0

        #if 'ground_graph' in sys.argv and set(key)==set(intent):
        #    return tuple(key), D / float(D.sum()), 0

        new_k = []
        original_shape = D.shape
        latents = []
        observed = []

        #print 'transforming', intent, 'to', key

        for var, obs in zip(intent, key):
            if var == obs:
                new_k.append(obs)
                observed.append(key.index(obs))
            else:
                new_k.append(var)
                latents.append(var)

        if len(latents) == 0 or 'ground_truth' in sys.argv:
            return tuple(new_k), D / float(D.sum()), 0

        A, A_inv = calculate_adjustment(noise_list)

        order = len(key)

        counter = marginals[tuple(sorted(latents))].transpose(
            index_sort(latents))
        counter = counter / counter.sum()

        x_indep = np.zeros(original_shape)
        obs_index = new_k.index(key[observed[0]])
        assert obs_index == len(new_k) - 1, "observed index is always last?"

        if len(original_shape) == 2:
            x_indep[:, 0] = counter / 2.0
            x_indep[:, 1] = counter / 2.0
        elif len(original_shape) == 3:
            x_indep[:, :, 0] = counter / 2.0
            x_indep[:, :, 1] = counter / 2.0
        else:
            print 'moment larger than expected!'
            assert 0
        x_indep = x_indep.reshape((x_indep.size, 1))

        D = np.matrix(D.reshape((D.size, 1)))
        D = D / D.sum()
        A = np.matrix(A)
        gamma = 0.1

        def _f(X, withGrad=False):
            val = KL(D, A * X) + gamma * KL(x_indep, X)
            if withGrad:
                g = gradKL(X, D, A).T + gamma * gradKL(
                    X, x_indep, np.matrix(np.identity(x_indep.size))).T
                return val, g
            return val

        #print [header[i] for i in intent]
        #print 'A', A
        #print 'D', np.array(D).reshape((2,)*len(intent))

        res, val, steps, gap = expGrad(_f,
                                       x_indep.copy(),
                                       1e-5,
                                       verbose=False,
                                       lower=0)
        #print 'X', np.array(res).reshape(original_shape)

        return tuple(new_k), np.array(res).reshape(original_shape), val