コード例 #1
0
def plot_estimators_vary_Z(n, trials=100, random=True):
    '''
    See how estimators compare when changing the value of Z with fixed n
    Inputs:
    - n: int, generate set of Z vectors in {-1,1}^n
    - trials: int, solve max problem trials times and take the mean
    - random: bool, true->generate random set, false->generate hypercube set
    '''
    Z_values = [2**i for i in range(n+1)]
    print Z_values
    gumbel_estimators = []
    barvinok_estimators = []
    barvinok_uppers = []
    barvinok_lowers = []

    barvinok_estimators_gaussian = []
    barvinok_gaussian_uppers = []
    barvinok_gaussian_lowers = []

    exact_log_Zs = []
    for Z in Z_values:
        print "working on Z =", Z
        gumbel_maxs = []
        barvinok_maxs = []
        barvinok_maxs_gaus = []
        for idx in range(trials):
            vector_set = generate_set(m=Z, n=n, random=random)
            barvinok_stats = summary_stats_random_c(n, vector_set, randomness='binomial')
            barvinok_stats_gaus = summary_stats_random_c(n, vector_set, randomness='gaussian')
            gumbel_stats = summary_stats_gumbels(n, vector_set)
            gumbel_maxs.append(gumbel_stats["max"])    
            barvinok_maxs.append(barvinok_stats["max"])
            barvinok_maxs_gaus.append(barvinok_stats_gaus["max"])

        gumbel_estimators.append(np.mean(gumbel_maxs))
        barvinok_estimators.append(np.mean(barvinok_maxs))
        cur_barv_est_log_2_Z = np.mean(barvinok_maxs)/np.log(2) #barvinok estimate of log_2(Z)
        cur_barv_upper_bound = gen_upper_bound_Z(delta_bar=cur_barv_est_log_2_Z, n=n, k=trials, log_base=np.e, w_max=1, w_min=1, verbose=True)
        cur_barv_lower_bound = gen_lower_bound_Z(delta_bar=cur_barv_est_log_2_Z, n=n, k=trials, log_base=np.e, w_min=1, verbose=True)
        barvinok_uppers.append(cur_barv_upper_bound)
        barvinok_lowers.append(cur_barv_lower_bound)

        barvinok_estimators_gaussian.append(np.mean(barvinok_maxs_gaus))
        cur_gaus_barv_est_log_2_Z = np.mean(barvinok_maxs_gaus)/np.log(2) #barvinok estimate of log_2(Z)
        cur_gaus_barv_upper_bound = gen_upper_bound_Z(delta_bar=cur_gaus_barv_est_log_2_Z, n=n, k=trials, log_base=np.e, w_max=1, w_min=1, verbose=True)
        cur_gaus_barv_lower_bound = gen_lower_bound_Z(delta_bar=cur_gaus_barv_est_log_2_Z, n=n, k=trials, log_base=np.e, w_min=1, verbose=True)
        barvinok_gaussian_uppers.append(cur_gaus_barv_upper_bound)
        barvinok_gaussian_lowers.append(cur_gaus_barv_lower_bound)

        exact_log_Zs.append(np.log(Z))

    fig = plt.figure()
    ax = plt.subplot(111)

    ax.plot(exact_log_Zs, barvinok_estimators, 'bx', label='our estimator c in {-1,1}^n', markersize=10)
    ax.plot(exact_log_Zs, barvinok_uppers, 'b+', label='our upper c in {-1,1}^n', markersize=10)
    ax.plot(exact_log_Zs, barvinok_lowers, 'b^', label='our lower c in {-1,1}^n', markersize=7)
    ax.plot(exact_log_Zs, barvinok_estimators_gaussian, 'yx', label='our estimator c gaussian', markersize=10)
    ax.plot(exact_log_Zs, barvinok_gaussian_uppers, 'y+', label='our upper c gaussian', markersize=10)
    ax.plot(exact_log_Zs, barvinok_gaussian_lowers, 'y^', label='our lower c gaussian', markersize=7)
    ax.plot(exact_log_Zs, gumbel_estimators, 'r+', label='gumbel upper bound', markersize=10)
    ax.plot(exact_log_Zs, exact_log_Zs, 'gx', label='exact ln(Z)', markersize=10)

    ax.plot(exact_log_Zs, [val*128 for val in barvinok_lowers], 'bs', label='conjectured upper for c in {-1,1}^n', markersize=7)


    plt.title('Gumbel UB vs. Barvinok estimator, random=%s, mean over %d trials' % (random, trials))
    plt.xlabel('ln(exact Z)')
    plt.ylabel('ln(Z) or estimator')
    #make the font bigger
    matplotlib.rcParams.update({'font.size': 15})

    # Shrink current axis's height by 10% on the bottom
    box = ax.get_position()
    ax.set_position([box.x0, box.y0 + box.height * 0.1,
                     box.width, box.height * 0.9])
    
    # Put a legend below current axis
    lgd = ax.legend(loc='upper center', bbox_to_anchor=(0.5, -.1),
              fancybox=False, shadow=False, ncol=2, numpoints = 1)

    if random:
        fig.savefig('varyZ_randomSet', bbox_extra_artists=(lgd,), bbox_inches='tight')    
    else:
        fig.savefig('varyZ_hypercubeSet', bbox_extra_artists=(lgd,), bbox_inches='tight')    

    plt.close()   
コード例 #2
0
def plot_estimators_vary_n(n_min ,n_max, Z, trials=100, random=True, plot_gaus_rand=False):
    '''
    See how estimators compare when changing n with fixed Z
    Inputs:
    - Z: int, number of vectors in set, partition function of unweighted problem    
    - n_min: int, min value of n to test    
    - n_max: int, max value of n to test    
    - trials: int, solve max problem trials times and take the mean
    - random: bool, true->generate random set, false->generate hypercube set
    - plot_gaus_rand: bool, if true also plot results when each element of c
        is sampled form a zero mean gaussian with standard deviation 1
    '''

#    n_min = int(math.ceil(np.log(Z)/np.log(2)))
    n_values = [i for i in range(n_min, n_max, 1)]
    print n_values
    gumbel_estimators = []
    barvinok_estimators = []
    barvinok_uppers = []
    barvinok_lowers = []

    conjectured_barv_uppers =[]

    barvinok_estimators_gaussian = []    
    barvinok_gaussian_uppers = []
    barvinok_gaussian_lowers = []

    exact_log_Zs = []
    for n in n_values:
        print "working on n =", n
        gumbel_maxs = []
        barvinok_maxs = []
        barvinok_maxs_gaus = []

        for idx in range(trials):
            vector_set = generate_set_efficient(m=Z, n=n, random=random)
            barvinok_stats = summary_stats_random_c(n, vector_set, randomness='binomial')
            barvinok_maxs.append(barvinok_stats["max"])

            if plot_gaus_rand:
                barvinok_stats_gaus = summary_stats_random_c(n, vector_set, randomness='gaussian')
                barvinok_maxs_gaus.append(barvinok_stats_gaus["max"])

            gumbel_stats = summary_stats_gumbels(n, vector_set)
            gumbel_maxs.append(gumbel_stats["max"])

        gumbel_estimators.append(np.mean(gumbel_maxs))
        barvinok_estimators.append(np.mean(barvinok_maxs))
        cur_barv_est_log_2_Z = np.mean(barvinok_maxs)/np.log(2) #barvinok estimate of log_2(Z)
        cur_barv_upper_bound = gen_upper_bound_Z(delta_bar=cur_barv_est_log_2_Z, n=n, k=trials, log_base=np.e, w_max=1, w_min=1, verbose=True)
        cur_barv_lower_bound = gen_lower_bound_Z(delta_bar=cur_barv_est_log_2_Z, n=n, k=trials, log_base=np.e, w_min=1, verbose=True)
        barvinok_uppers.append(cur_barv_upper_bound)
        barvinok_lowers.append(cur_barv_lower_bound)

        cur_barv_upper_bound_conj = gen_upper_bound_Z_conjecture(delta_bar=cur_barv_est_log_2_Z, n=n, k=trials, log_base=np.e)
        conjectured_barv_uppers.append(cur_barv_upper_bound_conj)

        if plot_gaus_rand:
            barvinok_estimators_gaussian.append(np.mean(barvinok_maxs_gaus))
            cur_gaus_barv_est_log_2_Z = np.mean(barvinok_maxs_gaus)/np.log(2) #barvinok estimate of log_2(Z)
            cur_gaus_barv_upper_bound = gen_upper_bound_Z(delta_bar=cur_gaus_barv_est_log_2_Z, n=n, k=trials, log_base=np.e, w_max=1, w_min=1, verbose=True)
            cur_gaus_barv_lower_bound = gen_lower_bound_Z(delta_bar=cur_gaus_barv_est_log_2_Z, n=n, k=trials, log_base=np.e, w_min=1, verbose=True)
            barvinok_gaussian_uppers.append(cur_gaus_barv_upper_bound)
            barvinok_gaussian_lowers.append(cur_gaus_barv_lower_bound)

        exact_log_Zs.append(np.log(Z))

    fig = plt.figure()
    ax = plt.subplot(111)

    ax.plot(n_values, barvinok_estimators, 'bx', label='our estimator c in {-1,1}^n', markersize=10)
    ax.plot(n_values, barvinok_uppers, 'b+', label='our upper c in {-1,1}^n', markersize=10)
    ax.plot(n_values, barvinok_lowers, 'b^', label='our lower c in {-1,1}^n', markersize=7)
    ax.plot(n_values, [val+np.sqrt(6*n/trials) for val in barvinok_estimators], 'm+', label='conjectured upper c in {-1,1}^n', markersize=10)

#    ax.plot(n_values, conjectured_barv_uppers, 'bs', label='conjectured upper for c in {-1,1}^n', markersize=7)
#    ax.plot(n_values, [128*val for val in barvinok_lowers], 'bs', label='conjectured upper for c in {-1,1}^n', markersize=7)
  
    if plot_gaus_rand:   
        ax.plot(n_values, barvinok_estimators_gaussian, 'yx', label='our estimator c gaussian', markersize=10)
        ax.plot(n_values, barvinok_gaussian_uppers, 'y+', label='our upper c gaussian', markersize=10)
        ax.plot(n_values, barvinok_gaussian_lowers, 'y^', label='our lower c gaussian', markersize=7)
    ax.plot(n_values, gumbel_estimators, 'r+', label='gumbel upper bound', markersize=10)
    ax.plot(n_values, exact_log_Zs, 'gx', label='exact ln(Z)', markersize=10)

    plt.title('Gumbel UB vs. Barvinok estimator, random=%s, mean over %d trials' % (random, trials))
    plt.xlabel('n')
    plt.ylabel('ln(Z) or estimator')
    #make the font bigger
    matplotlib.rcParams.update({'font.size': 15})

    # Shrink current axis's height by 10% on the bottom
    box = ax.get_position()
    ax.set_position([box.x0, box.y0 + box.height * 0.1,
                     box.width, box.height * 0.9])
    
    # Put a legend below current axis
    lgd = ax.legend(loc='upper center', bbox_to_anchor=(0.5, -.1),
              fancybox=False, shadow=False, ncol=2, numpoints = 1)
    if random:
        fig.savefig('vary_n_randomSet', bbox_extra_artists=(lgd,), bbox_inches='tight')    
    else:
        fig.savefig('vary_n_hypercubeSet_mean%dtrials' % trials, bbox_extra_artists=(lgd,), bbox_inches='tight')            
    plt.close()   
コード例 #3
0
def approx_permanent3(matrix, k_2, log_base, debugged=True, w_min=None):
    '''
    Approximate the permanent of the specified matrix using the gamma from 1.2.
    To accomplish this, we need to map up to N! permutations of our matrix to
    2^total_num_random_vals vectors in {-1,1}^total_num_random_vals for some 
    number total_num_random_vals.  We pick total_num_random_vals = N*log(N) so that
    2^(N*log(N)) = (2^log(N))^N = N^N > N!. 


    Steps:
    1. generate N*log(N) random values of -1 or 1 (each with probability .5),
    where log(N) values will be used to compute costs for each of the N rows in our matrix
    2. compute N^2 costs using the random numbers from (1), particularly using log(N)
    random numbers for each of the N rows
    3. solve the assignment problem using the costs from (2)

    *note, the code below doesn't exactly follow the steps in this order, but this is the idea*

    Inputs:
    - matrix: type numpy.ndarray of size NxN, the matrix whose permanent we are approximating,
    - log_base: float.  We will return bounds and an estimator for log(Z) using this base


    Outputs:
    - log_perm_estimate: float, log_perm_estimate for the permanent of the specified matrix (gamma*n), where
        n=total_num_random_vals=N*log(N)
    - lower_bound: float, lower bound to the permanent that holds with probability > .95
    - upper_bound: float, upper bound to the permanent that holds with probability > .95
    '''

    N = matrix.shape[0]
    assert (N == matrix.shape[1])
    #we will generate a random vector in {-1,1}^total_num_random_vals
    total_num_random_vals = N * int(math.ceil(np.log(N) / np.log(2)))

    #we will take the mean of k_2 solutions to independently perturbed optimization
    #problems to tighten the slack term between max and expected max
    deltas = []
    for i in range(k_2):
        random_cost_matrix = compute_cost_matrix(N, debugged=debugged)
        #        random_cost_matrix = sample_spherical(N,N)
        (cur_delta, max_cost_assignments) = find_max_cost(
            np.log(matrix) / np.log(2) +
            random_cost_matrix)  #want log_2(matrix)
        #        print "cur_delta =", cur_delta
        deltas.append(cur_delta)

    delta_bar = np.mean(deltas)

    (log_w_max, assignments_ignored) = find_max_cost(np.log(matrix))
    w_max = np.exp(log_w_max)

    upper_bound = gen_upper_bound_Z(delta_bar=delta_bar,
                                    n=total_num_random_vals,
                                    k=k_2,
                                    log_base=log_base,
                                    w_max=w_max,
                                    w_min=w_min,
                                    verbose=True)
    lower_bound = gen_lower_bound_Z(delta_bar=delta_bar,
                                    n=total_num_random_vals,
                                    k=k_2,
                                    log_base=log_base,
                                    w_min=w_min,
                                    verbose=True)
    scaled_delta_bar = delta_bar * np.log(2) / np.log(log_base)
    return (scaled_delta_bar, lower_bound, upper_bound)