示例#1
0
文件: run.py 项目: stsievert/FORTE
def run_NuclearNormPGD(n, d, plot=False):
    """
    Creates random data and finds an embedding.
    Inputs:
    n: The number of points
    d: The number of dimensions
    plot: Whether to plot the points
    """
    print('Here')
    n = n
    d = d
    m = n * n
    Xtrue = np.random.rand(n, d) / np.sqrt(d)
    # Xtrue = Xtrue - 1. / n * np.dot(np.ones((n, n)),  Xtrue)
    Mtrue = np.dot(Xtrue, Xtrue.transpose())
    max_norm = np.max(
        [np.linalg.norm(Xtrue[i]) for i in range(Xtrue.shape[0])])
    Strain = utils.triplets(Xtrue, m, noise_func=None)
    Stest = utils.triplets(Xtrue, m, noise_func=None)

    ts = time.time()
    b = 0
    total = 0
    for i in range(n):
        for j in range(i):
            for k in range(n):
                if i != j and i != k and j != k:
                    p = 1 / (1 + np.exp(-(Mtrue[k, k] - 2 * Mtrue[i, k] +
                                          2 * Mtrue[i, j] - Mtrue[j, j])))
                    b += -p * np.log(p)
                    total += 1
    b = b / total
    print time.time() - ts
    print('Bayes loss: ', b)
    Mhat = NuclearNormPGD.computeEmbedding(n,
                                           d,
                                           Strain,
                                           max_iter_GD=200,
                                           trace_norm=4 * np.trace(Mtrue),
                                           epsilon=1e-12,
                                           verbose=True)
    emp_loss_train = utils.empirical_lossM(Mhat, Strain)
    emp_loss_test = utils.empirical_lossM(Mhat, Stest)
    print('Empirical Training loss = {},   '
          'Empirical Test loss = {},   '
          'Relative Error = {} ').format(
              emp_loss_train, emp_loss_test,
              (np.linalg.norm(Mtrue - Mhat, 'fro')**2 /
               np.linalg.norm(Mtrue, 'fro')**2))
    if plot:
        _, Xhat = utils.transform_MtoX(Mhat, 2)
        _, Xpro, _ = utils.procrustes(Xtrue, Xhat)
        plt.figure(1)
        plt.subplot(121)
        plt.plot(*zip(*Xtrue), marker='o', color='r', ls='')
        plt.subplot(122)
        plt.plot(*zip(*Xpro), marker='o', color='b', ls='')
        plt.show()
示例#2
0
文件: run.py 项目: stsievert/FORTE
def run_RankdPGD(n, d, plot=False):
    """
    Creates random data and finds an embedding.
    Inputs:
    n: The number of points
    d: The number of dimensions
    plot: Whether to plot the points
    """
    n = n
    d = d
    m = n * n * n
    # 20 * n * d * np.log(n)  # number of labels

    # Generate centered data points
    Xtrue = np.random.randn(n, d)
    #Xtrue = Xtrue - 1. / n * np.dot(np.ones((n, n)),  Xtrue)
    Mtrue = np.dot(Xtrue, Xtrue.transpose())
    print "Bayes Loss", LogisticLoss.getLossX(Xtrue, all_triplets(Xtrue))

    # Strain = utils.triplets(Xtrue, m, logistic_noise)
    Strain = utils.triplets(Xtrue, m)
    print "Xtrue Empirical Loss", utils.empirical_lossX(Xtrue, Strain)
    print "Strain Log Loss", LogisticLoss.getLossX(Xtrue, Strain)

    # Stest = utils.triplets(Xtrue, m, logistic_noise)
    Stest = utils.triplets(Xtrue, m)
    Mhat = RankdPGD.computeEmbedding(n,
                                     d,
                                     Strain,
                                     max_iter_GD=100,
                                     num_random_restarts=0,
                                     epsilon=0.00001,
                                     verbose=True)
    emp_loss_train = utils.empirical_lossM(Mhat, Strain)
    emp_loss_test = utils.empirical_lossM(Mhat, Stest)
    print('Empirical Training loss = {},   '
          'Empirical Test loss = {},   '
          'Relative Error = {} ').format(
              emp_loss_train, emp_loss_test,
              (np.linalg.norm(Mtrue - Mhat, 'fro')**2 /
               np.linalg.norm(Mtrue, 'fro')**2))
    if plot:
        _, Xhat = utils.transform_MtoX(Mhat, 2)
        _, Xpro, _ = utils.procrustes(Xtrue, Xhat)
        plt.figure(1)
        plt.subplot(121)
        plt.plot(*zip(*Xtrue), marker='o', color='r', ls='')
        plt.subplot(122)
        plt.plot(*zip(*Xpro), marker='o', color='b', ls='')
        plt.show()
示例#3
0
文件: run.py 项目: stsievert/FORTE
def run_CK(n, d, plot=False):
    """
    Creates random data and finds an embedding.
    Inputs:
    n: The number of points
    d: The number of dimensions
    plot: Whether to plot the points
    """
    n = n
    d = d
    m = int(10 * n * d * np.log(n))  # number of labels
    # Generate centered data points
    Xtrue = np.random.randn(n, d)
    print "CK Loss - Bayes", CrowdKernelLoss.getLoss(Xtrue,
                                                     all_triplets(Xtrue))
    # Strain = utils.triplets(Xtrue, m, ck_noise)
    Strain = utils.triplets(Xtrue, m, None)
    print "Empirical Loss on Strain", utils.empirical_lossX(Xtrue, Strain)
    print "CK Loss on Strain", CrowdKernelLoss.getLoss(Xtrue, Strain)

    # Stest = utils.triplets(Xtrue, m, ck_noise)
    Stest = utils.triplets(Xtrue, m, None)
    Xhat = CrowdKernel.computeEmbedding(n,
                                        d,
                                        Strain,
                                        mu=mu,
                                        num_random_restarts=0,
                                        max_num_passes_SGD=16,
                                        max_iter_GD=50,
                                        max_norm=1.,
                                        epsilon=0.0001,
                                        verbose=True)

    emp_loss_train = utils.empirical_lossX(Xhat, Strain)
    emp_loss_test = utils.empirical_lossX(Xhat, Stest)
    print('Empirical Training loss = {},   '
          'CK Loss on all Triplets = {},').format(
              emp_loss_train, CrowdKernelLoss.getLoss(Xhat,
                                                      all_triplets(Xtrue)))

    if plot:
        _, Xpro, _ = utils.procrustes(Xtrue, Xhat)
        plt.figure(1)
        plt.subplot(121)
        plt.plot(*zip(*Xtrue), marker='o', color='r', ls='')
        plt.subplot(122)
        plt.plot(*zip(*Xpro), marker='o', color='b', ls='')

        plt.show()
示例#4
0
文件: run.py 项目: stsievert/FORTE
def run_RankdPGDHingeLoss(n, d, plot=False):
    """
    Creates random data and finds an embedding.
    Inputs:
    n: The number of points
    d: The number of dimensions
    plot: Whether to plot the points
    """
    n = n
    d = d
    m = n * n * n
    # noise_func = logistic_noise   # change it to logistic noise
    noise_func = None
    # 20 * n * d * np.log(n)  # number of labels

    # Generate centered data points
    Xtrue = np.random.rand(n, d)
    Xtrue = Xtrue - 1. / n * np.dot(np.ones((n, n)), Xtrue)
    Mtrue = np.dot(Xtrue, Xtrue.transpose())

    Strain = utils.triplets(Xtrue, m, noise_func=noise_func)
    Stest = utils.triplets(Xtrue, m, noise_func=noise_func)
    Mhat = RankdPGDHingeLoss.computeEmbedding(n,
                                              d,
                                              Strain,
                                              max_iter_GD=2000,
                                              num_random_restarts=0,
                                              epsilon=1e-10,
                                              verbose=True)
    emp_loss_train = utils.empirical_lossM(Mhat, Strain)
    emp_loss_test = utils.empirical_lossM(Mhat, Stest)
    print('performance:')
    print('Empirical Training loss = {},   '
          'Empirical Test loss = {},   '
          'Relative Error = {} ').format(
              emp_loss_train, emp_loss_test,
              (np.linalg.norm(Mtrue - Mhat, 'fro')**2 /
               np.linalg.norm(Mtrue, 'fro')**2))
    if plot:
        _, Xhat = utils.transform_MtoX(Mhat, 2)
        _, Xpro, _ = utils.procrustes(Xtrue, Xhat)
        plt.figure(1)
        plt.subplot(121)
        plt.plot(*zip(*Xtrue), marker='o', color='r', ls='')
        plt.subplot(122)
        plt.plot(*zip(*Xpro), marker='o', color='b', ls='')
        plt.show()
示例#5
0
文件: run.py 项目: stsievert/FORTE
def run_FG(n, d, plot=False):
    """
    Creates random data and finds an embedding.
    Inputs:
    n: The number of points
    d: The number of dimensions
    plot: Whether to plot the points
    """
    n = n
    d = d
    m = n * n * n  #10 * n * d * np.log(n)  # number of labels

    # Generate centered data points
    Xtrue = np.random.randn(n, d)
    Xtrue = Xtrue - 1. / n * np.dot(np.ones((n, n)), Xtrue)
    max_norm = np.max(
        [np.linalg.norm(Xtrue[i]) for i in range(Xtrue.shape[0])])

    Strain = utils.triplets(Xtrue, m, noise_func=logistic_noise)
    Stest = utils.triplets(Xtrue, m, noise_func=logistic_noise)

    Xhat = FactoredGradientSGDHingeLoss.computeEmbedding(n,
                                                         d,
                                                         Strain,
                                                         num_random_restarts=0,
                                                         max_num_passes_SGD=16,
                                                         max_iter_GD=100,
                                                         max_norm=max_norm,
                                                         epsilon=5e-3,
                                                         verbose=True)

    emp_loss_train = utils.empirical_lossX(Xhat, Strain)
    emp_loss_test = utils.empirical_lossX(Xhat, Stest)
    print('Empirical Training loss = {},   '
          'Empirical Test loss = {},').format(emp_loss_train, emp_loss_test)

    if plot:
        _, Xpro, _ = utils.procrustes(Xtrue, Xhat)
        plt.figure(1)
        plt.subplot(121)
        plt.plot(*zip(*Xtrue), marker='o', color='r', ls='')
        plt.subplot(122)
        plt.plot(*zip(*Xpro), marker='o', color='b', ls='')

        plt.show()