def adam_solve(lambda_flows,
               grad_energy_bound,
               samples,
               u_func,
               h,
               m=1000,
               step_size=0.001):
    '''
        Uses adam solver to optimize the energy bound
    '''
    output = np.copy(
        lambda_flows)  # Copies so original parameters are not modified
    print("BEFORE LEARNING:\n{}".format(output))
    grad_energy_bound = autograd.grad(
        energy_bound)  # Autograd gradient of energy bound
    g_eb = lambda lambda_flows, i: grad_energy_bound(
        lambda_flows,
        samples,
        h,
        u_func,
        #beta= (0.1 + i/1000))
        beta=min(1, 0.01 + i / 10000))  # Annealing
    output = adam(g_eb,
                  output,
                  num_iters=m,
                  callback=callback,
                  step_size=step_size)
    print("AFTER LEARNING:\n{}".format(output))

    # Resample and flow a larger number of samples to better show fit
    samples = np.random.randn(20000)[:, np.newaxis]
    samples_flowed = flow_samples(output, samples, h)
    np.savetxt("./linear_plots/flow_params.txt", output)
    return samples_flowed
Ejemplo n.º 2
0
def runSB(psf, psf_k, imageArray):
    nImages = np.shape(imageArray)[2]
    results = imageArray * 0

    for imageIdx in range(0, nImages):
        if imageIdx < start:
            continue
        grndpath = '/home/moss/SMLM/data/fluorophores/frames/' + str(
            imageIdx + 1).zfill(5) + '.csv'
        grnd = Table.read(grndpath, format='ascii')
        no_source = len(grnd['xnano'])
        img = imageArray[:, :, imageIdx]
        sub = img - np.average(img[img < np.average(img) + 3 * np.std(img)])
        subnorm = sub / np.max(sub)
        mock = makeMock(grnd)
        #mocknorm = mock/np.max(mock);
        sb = SparseBayes(subnorm, psf, psf_k, no_source)
        #sb = SparseBayes_alpha(mock,psf,psf_k,no_source);
        #sb = SparseBayes_nofft(mock,psf,psf_k,sig_psf,no_source);
        #sb = SparseBayes_gaussian(subnorm,psf,psf_k);
        results[:, :, imageIdx] = sb.res
        s = 'nopri' + str(imageIdx + 1).zfill(5) + '.out'
        np.savetxt(s, sb.res)
        plt.imshow(results[:, :, imageIdx])
        plt.show()
    return results
Ejemplo n.º 3
0
def p_abs(dofold,nG,bproj,method='RT'):
    dof = b_filter(dofold,bproj)
    
    planewave={'p_amp':0,'s_amp':1,'p_phase':0,'s_phase':0}
    theta = 0.
    phi = 0.
    
    t1 = time.time()
    obj= rcwa_assembly(dof,nG,bproj,theta,phi,planewave)

    vals = 0.
    R = 0.
    T = 0.
    
    if method == 'V':
        for i in range(Nlayer):
            Mv=get_conv(1./Mx/My,dof[i*Mx*My:(i+1)*Mx*My].reshape((Mx,My)),obj.G)
            vals = vals + np.real(obj.Volume_integral(1+i,Mv,Mv,Mv,normalize=1))*np.real(obj.omega)*np.imag(epsdiff)
    elif method == 'RT':
        R,T = obj.RT_Solve(normalize=1) 
        vals = 1-R-T
    t2 = time.time()
    if 'autograd' not in str(type(vals)):
        global ctrl
        np.savetxt('dof.txt',dof)
        if ctrl<=1:
            print(t2-t1)
        if method == 'V':
            print(ctrl,vals)
        elif method == 'RT':
            print(ctrl, 'R =',R,'T =',T,'Abs=',vals,'time=',t2-t1)
            
        ctrl +=1
    return vals
Ejemplo n.º 4
0
	def evaluate(self, y_test , y_pred):
		y_test = y_test.flatten()
		y_test = self.normalize_Y(y_test)
		np.savetxt("pred", y_pred, newline=" ")
		np.savetxt("test", y_test, newline=" ")
		
		error_rate = (np.sum(y_test == y_pred)/ y_test.size)
		print(error_rate)
		return error_rate
Ejemplo n.º 5
0
def adam_solve(lambda_flows, grad_energy_bound, samples, u_func, h, m=1000, step_size=0.001,
               bnn=False):
    '''
        Uses adam solver to optimize the energy bound
    '''
    output = np.copy(lambda_flows) # Copies to avoid changing initial conditions
    print("BEFORE LEARNING:\n{}".format(output))
    grad_energy_bound = autograd.grad(energy_bound)  # Autograd gradient of energy
    g_eb = lambda lambda_flows, i: grad_energy_bound(lambda_flows, samples, h, u_func, 
                                                     #beta= (0.1 + i/1000))
                                                     beta=min(2, i/1000), # Annealing
                                                     #beta=min(1, 0.01+i/10000),
                                                     bnn=bnn) # Annealing
    output = adam(g_eb, output, num_iters=m, callback=callback, step_size=step_size)
    print("\nAFTER LEARNING:\n{}".format(output))

    #samples = np.random.randn(30000)[:,np.newaxis] # Plot with more samples for better clarity
    q_0_mu = np.array([0,0])
    q_0_sigma = 1
    D = q_0_mu.shape[0]
    #samples = np.random.multivariate_normal(q_0_mu, q_0_sigma*np.eye(D), 20000)

    samples_flowed = flow_samples(output, samples, h)
    #np.savetxt("./data_fit_1d/flow_params.txt", output)
    np.savetxt("./nn_fit/flow_params.txt", output)
    if(bnn):
        np.savetxt("./nn_fit/energy_bound.txt", e_bound)
        fig, ax = plt.subplots()
        ax.plot(e_bound)
        ax.set(title="Energy Bound")
        plt.savefig("./nn_fit/energy_bound.png")
        plt.close()

        np.savetxt("./nn_fit/joint_probs.txt", joint_probs)
        fig, ax = plt.subplots()
        ax.plot(joint_probs)
        ax.set(title="Joint Probability")
        plt.savefig("./nn_fit/joint_probs.png")
        plt.close()

        np.savetxt("./nn_fit/flow_probs.txt", flow_probs)
        fig, ax = plt.subplots()
        ax.plot(flow_probs)
        ax.set(title="Flow Probs")
        plt.savefig("./nn_fit/flow_probs.png")
        plt.close()

        np.savetxt("./nn_fit/grad_norms.txt", grad_norms)
        fig, ax = plt.subplots()
        ax.plot(grad_norms)
        ax.set(title="Gradient Norms")
        plt.savefig("./nn_fit/grad_norms.png")
        plt.close()


    return samples_flowed
Ejemplo n.º 6
0
    def print_perf(combined_params, iter, grad):
        if iter % 10 == 0:
            model_params, prop_params = combined_params
            bound = -objective(combined_params, iter)
            message = "{:15}|{:20}".format(iter, bound)

            with open(f_head + '_ELBO.csv', 'a') as f_handle:
                np.savetxt(f_handle, [[iter, bound]], fmt='%i,%f')

            print(message)
Ejemplo n.º 7
0
    def dump_state(self, xk):
        '''
    callback to save the state to disk during optimization
    '''
        filename = 'state.txt'

        if not os.path.exists(filename):
            past = np.zeros((0, xk.shape[0]))
        else:
            past = np.loadtxt(filename)
            if past.ndim < 2:
                past = past.reshape(1, -1)
        np.savetxt(filename, np.append(past, xk.reshape(1, -1), axis=0))
Ejemplo n.º 8
0
def save_checkpoint(i_epoch,
                    i_batch,
                    output_folder,
                    shared_file_object=True,
                    obj_array=None,
                    optimizer=None):

    np.savetxt(os.path.join(output_folder, 'checkpoint.txt'),
               np.array([i_epoch, i_batch]),
               fmt='%d')
    if not shared_file_object:
        np.save(os.path.join(output_folder, 'obj_checkpoint.npy'), obj_array)
        optimizer.save_param_arrays_to_checkpoint()
    return
Ejemplo n.º 9
0
def runSB(imageDir, saveDir, psf, psf_k, imageArray):
    nImages = np.shape(imageArray)[2]
    results = imageArray * 0

    for imageIdx in range(0, nImages):
        if imageIdx < start:
            continue
        grndpath = imageDir + str(imageIdx) + '.truth'
        grnd = np.loadtxt(grndpath)
        no_source = np.shape(grnd)[0]
        if len(np.shape(grnd)) < 2:
            no_source = 1
        img = imageArray[:, :, imageIdx]
        sb = SparseBayes(img, psf, psf_k, no_source)
        results[:, :, imageIdx] = sb.res
        s = saveDir + str(imageIdx) + '.out'
        np.savetxt(s, sb.res)
        #plt.imshow(results[:,:,imageIdx]);
        #plt.show();
    return results
Ejemplo n.º 10
0
        print 'choice: ', solve_choice

        smooth_choice = sys.argv[10]
        recursive_choice = sys.argv[11]

        global order_file_name
        order_file_name = sys.argv[12]

        if order_file_name != None:
            order = np.loadtxt(order_file_name)
            order = order.astype(np.uint8)
            print 'order', order

            ### save for application
            np.savetxt(save_for_application_path_prefix + order_file_name,
                       order)

            #### reorder the primary pigments
            H_ordered = H[order, :]
            H = H_ordered.copy()

            ### save reordered primary pigments.
            np.savetxt(
                os.path.splitext(KS_file_name)[0] + "-" +
                os.path.splitext(order_file_name)[0] + ".txt", H)
            R_vector = equations_in_RealPigments(H[:, :L],
                                                 H[:, L:],
                                                 r=1.0,
                                                 h=1.0)
            P_vector = R_vector * Illuminantnew[:, 1].reshape(
                (1, -1))  ### shape is N*L
Ejemplo n.º 11
0
def save_layers(x0, arr, H, output_prefix):
    shape = arr.shape
    original_shape = shape
    img_size = shape[:2]
    N = shape[0] * shape[1]
    M = H.shape[0]
    L = H.shape[1] / 2

    Thickness = x0.reshape((N, M))
    K0 = H[:, :L]
    S0 = H[:, L:]
    print Thickness.sum(axis=1).min()
    print Thickness.sum(axis=1).max()

    R_vector = np.ones((N, L))
    for i in range(M):
        R_vector = equations_in_RealPigments(K0[i:i + 1, :],
                                             S0[i:i + 1, :],
                                             r=R_vector,
                                             h=Thickness[:, i:i + 1])

    ### from R spectrum x wavelength spectrums to linear rgb colors
    P_vector = R_vector * Illuminantnew[:, 1].reshape(
        (1, -1))  ### shape is N*L
    R_xyz = (P_vector.reshape((N, 1, L)) * R_xyzcoeff.reshape(
        (1, 3, L))).sum(axis=2)  ###shape N*3*L to shape N*3
    Normalize = (Illuminantnew[:, 1] *
                 R_xyzcoeff[1, :]).sum()  ### scalar value.
    R_xyz = R_xyz / Normalize  ####xyz value shape is N*3
    R_rgb = np.dot(
        xyztorgb,
        R_xyz.transpose()).transpose()  ###linear rgb value, shape is N*3
    R_rgb = Gamma_trans_img(R_rgb.clip(0, 1))  ##clip and gamma correction

    print 'RGB RMSE: ', np.sqrt(
        np.square(255 * (arr.reshape((-1, 3)) - R_rgb)).sum() / N)

    filename = output_prefix + "-fixed_KS-reconstructed.png"
    plt.imsave(filename, (R_rgb.reshape(original_shape) * 255.0).clip(
        0, 255).round().astype(np.uint8))
    np.savetxt(output_prefix + "-thickness.txt", Thickness)

    #### save for applications
    filename = save_for_application_path_prefix + os.path.splitext(
        img_file)[0] + "-" + str(M) + "-KM_layers-" + os.path.splitext(
            order_file_name)[0] + "-reconstructed.png"
    plt.imsave(filename, (R_rgb.reshape(original_shape) * 255.0).clip(
        0, 255).round().astype(np.uint8))

    ### compute sparsity
    sparsity_thres_array = np.array(
        [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1])
    Thickness_sparsity_list = np.ones(len(sparsity_thres_array))
    for thres_ind in xrange(len(sparsity_thres_array)):
        Thickness_sparsity_list[thres_ind] = len(Thickness[
            Thickness <= sparsity_thres_array[thres_ind]]) * 1.0 / (N * M)

    print "Thickness_sparsity_list: ", Thickness_sparsity_list
    np.savetxt(output_prefix + "-Thickness-Sparsity.txt",
               Thickness_sparsity_list)

    # normalized_Thickness=Thickness/Thickness.sum(axis=1).reshape((-1,1))

    # for i in xrange(M):
    #     #### save normalized_weights_map for each pigment.
    #     normalized_thickness_map_name=output_prefix+"-normalized_thickness_map-%02d.png" % i
    #     normalized_thickness_map=normalized_Thickness[:,i].reshape(img_size).copy()
    #     Image.fromarray((normalized_thickness_map*255.0).clip(0,255).round().astype(np.uint8)).save(normalized_thickness_map_name)

    Thickness_sum_map = Thickness.sum(axis=1).reshape(img_size)
    T_min = Thickness_sum_map.min()
    T_max = Thickness_sum_map.max()
    Thickness_sum_map = Thickness_sum_map / T_max

    Image.fromarray((Thickness_sum_map * 255.0).round().astype(
        np.uint8)).save(output_prefix + "-thickness_sum_map-min-" +
                        str(T_min) + "-max-" + str(T_max) + ".png")

    for i in xrange(M):
        thickness_map_name = output_prefix + "-layer_thickness_map-%02d.png" % i
        Thickness_map = Thickness[:, i].reshape(img_size).copy()
        Large_than_one = len(Thickness_map[Thickness_map > 1.0])
        if Large_than_one > 0:
            print "Number of Thickness value that is larger than 1.0 is : ", Large_than_one
        Image.fromarray((Thickness_map * 255.0).clip(0, 255).round().astype(
            np.uint8)).save(thickness_map_name)

        ####save for application
        thickness_map_name = save_for_application_path_prefix + os.path.splitext(
            img_file)[0] + "-" + str(M) + "-KM_layers-" + os.path.splitext(
                order_file_name)[0] + "-thickness_map-%02d.png" % i
        Image.fromarray((Thickness_map * 255.0).clip(0, 255).round().astype(
            np.uint8)).save(thickness_map_name)
Ejemplo n.º 12
0
        history.append((loss, ws, thetas))

        # get gradient of loss function
        grad = gradlossFn(ws, thetas, yobs, **lossfn_kwargs)
        # compute new support
        theta = lmo(grad)
        # update signal parameters
        if i == 0: _thetas = np.append(thetas, theta)
        else:
            _thetas = np.append(np.atleast_2d(thetas),
                                np.atleast_2d(theta),
                                axis=0)

        ws, thetas = local_update(_thetas, yobs, lossFn, **lossfn_kwargs)

        # calculate output
        output = Psi(ws, thetas)

        if (i > 2) and (history[-2][0] - history[-1][0] < 1.):
            return loss, ws, thetas

    return loss, ws, thetas


# run adcg
loss, ws, thetas = adcg(image.flatten(), ell, gradell, coordinate_descent,
                        n_adcg)

# writeout ADCG output
np.savetxt(foutput, np.vstack([thetas.T, ws.T]).T)
Ejemplo n.º 13
0
                P0 = (logprob(ws.reshape(1, num_weights), inputs, targets))
                print("P0", P0)
                alpha = (Pprime - P0)
                print("alpha", alpha)
                bb = np.log(rs.rand(1))
                print(bb)
                accp[i] = 1 if bb <= alpha else 0
                print("accept", bb <= alpha, "value", accp[i])
                wi = wi_prime if bb <= alpha else wi
                print("new wi", wi)
                ws[i] = wi
                # Let the first 9999 to burn-in
                # for m in range(500):
                #     wi = ws[i]
                #     wi_prime = rs.randn(1) + wi
                #     ws_tmp = ws
                #     ws_tmp[i] = wi_prime
                #     Pprime = (logprob(ws_tmp.reshape(1,num_weights), inputs, targets))
                #     P0 = (logprob(ws.reshape(1, num_weights), inputs, targets))
                #     alpha = np.exp(Pprime - P0)
                #     wi = wi_prime if rs.rand(1) < alpha else wi
                #     ws[i] = wi
            ws_df = np.concatenate([ws_df, ws.reshape(num_weights, 1)], axis=1)
            print("accp vec", accp.reshape(num_weights, 1))
            accp_df = np.concatenate(
                [accp_df, accp.reshape(num_weights, 1)], axis=1)
        filename = "gibbs_wts2" + "K" + str(k) + ".csv"
        accp_file = "accp" + "K" + str(k) + ".csv"
        np.savetxt(filename, ws_df, delimiter=',', fmt='%f')
        np.savetxt(accp_file, accp_df, delimiter=',', fmt='%d')
Ejemplo n.º 14
0
D1, D2, D3, G, nodes_index = gsm.get_similarity_matrix(dotpath)
#D3=np.zeros((len(D1),len(D1)))
#D1[D1==0]=100
#D2[D2==0]=100
#D3[D3==0]=100
print(len(np.where(D1 == 0)))
A = np.random.rand(len(D1) * dim, 1)

mview = multiview(D1, D2, D3, dim, eps)

pos1, costs = mview.multiview_mds(A, steps, alpha, stopping_eps, outdir,
                                  'collaboration')

pos1 = pos1.reshape(int(len(pos1) / dim), dim)

np.savetxt("data_pos_" + graphname + ".csv", pos1, delimiter=",")

#write to file
jsdata = "var points=" + str(pos1.tolist()) + ";"
file_path = outdir + "data_pos_" + graphname + ".js"
f = open(file_path, "w")
f.write(jsdata)
f.close()

d = json_graph.node_link_data(G)
#json.dump(d, open(outdir+'graph_'+graphname+'.json', 'w'))
j = open(outdir + 'graph_' + graphname + '.json', 'w')
j.write("edges='" + str(d['links']).replace("\'", "\"") + "';")
j.write("nodes='" + str(d['nodes']).replace("\'", "\"") + "';")
j.close()
Ejemplo n.º 15
0
        init_var_params = np.concatenate([init_mean, init_log_std])

        print("Optimizing variational parameters...")
        variational_params = adam(gradient,
                                  init_var_params,
                                  step_size=0.1,
                                  num_iters=200,
                                  callback=callback)
        # print(variational_params)
        #
        # Sample functions from the final posterior.
        rs = npr.RandomState(0)
        mean, log_std = unpack_params(variational_params)
        # rs = npr.RandomState(0)
        sample_weights = rs.randn(1000, num_weights) * np.exp(log_std) + mean
        np.savetxt("VIwts.csv", sample_weights, delimiter=',', fmt='%f')
        plot_inputs = np.linspace(-2, 2, num=400)
        outputs_final = predictions(sample_weights,
                                    np.expand_dims(plot_inputs, 1))
        lowerbd = numpy.quantile(outputs_final, 0.05, axis=0)
        upperbd = numpy.quantile(outputs_final, 0.95, axis=0)
        inconint = np.logical_and(lowerbd < tot_targets,
                                  upperbd > tot_targets).ravel()
        con_ind = np.zeros(len(lowerbd))
        con_ind[inconint] = 1
        con_ind = con_ind.reshape(len(con_ind), 1)
        coverage_df = np.concatenate([coverage_df, con_ind], axis=1)
        # # Plot data and functions.
        fig = plt.figure(figsize=(12, 8), facecolor='white')
        ax = fig.add_subplot(111, frameon=False)
        ax.plot(tot_inputs.ravel(),
Ejemplo n.º 16
0
    Nr = TrackNormal(rx)

    # psie is sort of backwards: higher angles go to the left
    return np.angle(Nx) - np.angle(Nr)


if __name__ == '__main__':
    TRACK_SPACING = 19.8  # cm
    x = SVGPathToTrackPoints("oakwarehouse.path", TRACK_SPACING)[:-1]

    xm = np.array(x)[:, 0] / 50  # 50 pixels / meter
    track_k = TrackCurvature(xm)
    Nx = TrackNormal(xm)
    u = 1j * Nx
    np.savetxt("track_x.txt",
               np.vstack([np.real(xm), np.imag(xm)]).T.reshape(-1),
               newline=",\n")
    np.savetxt("track_u.txt",
               np.vstack([np.real(u), np.imag(u)]).T.reshape(-1),
               newline=",\n")
    np.savetxt("track_k.txt", track_k, newline=",\n")

    ye, val, stuff = OptimizeTrack(xm, 1.4, 0.1)
    psie = RelativePsie(ye, xm)

    rx = u*ye + xm
    raceline_k = TrackCurvature(rx)

    np.savetxt("raceline_k.txt", raceline_k, newline=",\n")
    np.savetxt("raceline_ye.txt", ye, newline=",\n")
    np.savetxt("raceline_psie.txt", psie, newline=",\n")
Ejemplo n.º 17
0
        print "vector + loop: ", t2-t1
        w_map = funProj_mat(param)
        t3 = time.time()
        print "mat : ", t3 - t2
        print "diff: ", np.linalg.norm(w_loop - w_map)

    elif flag_test == 3:
        #v0 = np.random.randn(100)
        v0 = np.complex(0,1)
        print isLegal(v0)
    elif flag_test == 4:
        # test polyinterp
        points = np.random.randn(2, 3)
        print polyinterp(points)
        np.savetxt(\
        "./Mark_Schmidt/minConf/minFunc/test_data/polyinterp_input_1.csv",\
                points)
    elif flag_test == 5:
        # test lbfgsUpdate
        [p, m, corrections, debug, Hdiag] = [2, 5, 2, 0, 1e-3]
        y = np.random.randn(p)
        s = y + 0.1 * np.random.randn(p)
        old_dirs = np.random.randn(p, m)
        old_stps = np.random.randn(p, m)
        data_mat = {'p':p, 'm':m, 'corrections':corrections, 'debug':debug, \
                'Hdiag':Hdiag, 'y':y, 's':s, 'old_dirs':old_dirs, \
                'old_stps':old_stps}
        savemat("./Mark_Schmidt/minConf/minFunc/test_data/lbfgsUpdate_input_1.csv",\
                data_mat)
        [new_dirs, new_stps, new_Hdiag] = lbfgsUpdate(y, s, corrections, \
                debug, old_dirs, old_stps, Hdiag)
Ejemplo n.º 18
0
if __name__ == "__main__":
    # Get the Data
    File = "yelp_train.txt"
    (rating_matrix, opinion_matrix, opinion_matrixI) = getRatingMatrix(File)
    print("Dimensions of the training dataset: ", rating_matrix.shape)
    # Set the number of Factors
    NUM_OF_FACTORS = 20
    MAX_DEPTH = 6

    # Build decision tree on training set
    (decisionTree, decisionTreeI, user_vectors,
     item_vectors) = alternateOptimization(opinion_matrix, opinion_matrixI,
                                           rating_matrix, NUM_OF_FACTORS,
                                           MAX_DEPTH, File)
    Predicted_Rating = np.dot(user_vectors, item_vectors.T)
    np.savetxt('/results/item_vector.txt', item_vectors, fmt='%0.8f')
    np.savetxt('/results/user_vectors.txt', user_vectors, fmt='%0.8f')
    np.savetxt('/results/rating_predict.txt', Predicted_Rating, fmt='%0.8f')
    TestFile = "yelp_test.txt"
    (test_r, test_opinion, test_opinionI) = getRatingMatrix(TestFile)
    Predicted_Rating[np.where[rating_matrix > 0]] = 0.0

    # print("Predicted_Rating for Test: ", Predicted_Rating)
    # print("Test Rating: ", test)
    '''NDCG = getNDCG(Predicted_Rating, test_r, 10)
    print("NDCG@10: ", NDCG)
    NDCG = getNDCG(Predicted_Rating, test_r, 20)
    print("NDCG@20: ", NDCG)
    NDCG = getNDCG(Predicted_Rating, test_r, 50)
    print("NDCG@50: ", NDCG)'''
    print("print user tree")
Ejemplo n.º 19
0
    # Metropolis within Gibbs is used.
    for k in range(1):
        rs = npr.RandomState(k)

        ws = rs.randn(num_weights)
        ws_df = np.zeros(num_weights).reshape(num_weights, 1)

        for b in range(B):
            print("b", b)
            for i in range(num_weights):
                # wiprime|wi ~ N(wi, 1)
                print("i", i)
                # Let the first 9999 to burn-in
                for m in range(50):
                    wi = ws[i]
                    wi_prime = rs.randn(1) + wi
                    ws_tmp = copy.deepcopy(ws)
                    ws_tmp[i] = wi_prime
                    Pprime = (logprob(ws_tmp.reshape(1, num_weights), inputs,
                                      targets))
                    P0 = (logprob(ws.reshape(1, num_weights), inputs, targets))
                    alpha = (Pprime - P0)
                    print("alpha", alpha)
                    bb = np.log(rs.rand(1))
                    print("b", bb)
                    wi = wi_prime if bb < alpha else wi
                    ws[i] = wi
            ws_df = np.concatenate([ws_df, ws.reshape(num_weights, 1)], axis=1)
        filename = "gibbs_wts" + "K" + str(k) + ".csv"
        np.savetxt(filename, ws_df, delimiter=',', fmt='%d')
Ejemplo n.º 20
0
theta_dict["C"] = np.repeat(25., pecmy.N)
theta_x = pecmy.unwrap_theta(theta_dict)

# opt.root(pecmy.pp_wrap_alpha, .5, args=(.99, ))['x']
# pecmy.W ** - .75
np.reshape(
    np.repeat(np.max(pecmy.ecmy.tau + pecmy.tau_buffer, axis=1), pecmy.N),
    (pecmy.N, pecmy.N)) / pecmy.ecmy.tau

np.reshape(np.repeat(np.min(tau_min_mat - pecmy.tau_buffer, axis=1), pecmy.N),
           (pecmy.N, pecmy.N)) / pecmy.ecmy.tau

v = np.mean(pecmy.ecmy.tau, axis=1)

pecmy.estimator_bounds(theta_x, v, "lower")

# x, obj, status = pecmy.estimator(v, theta_x, pecmy.m, sv=sv, nash_eq=False)
x, obj, status = pecmy.estimator(v, theta_x, pecmy.m, sv=None, nash_eq=False)

x_dict = pecmy.rewrap_xlhvt(x)
ge_dict = pecmy.ecmy.rewrap_ge_dict(x_dict["ge_x"])
theta_dict = pecmy.rewrap_theta(x_dict["theta"])

print(ge_dict["tau_hat"] * pecmy.ecmy.tau)
print("-----")
for i in theta_dict.keys():
    print(i)
    print(theta_dict[i])

np.savetxt(out_fname, x, delimiter=",")
Ejemplo n.º 21
0
    print('D_(r||p)', true_divg(x0))
    print('Acceptance', len(Accep) / Num_sample)

    Result[0, index] = loss(x0)
    Result[1, index] = true_divg(x0)
    Result[2, index] = len(Accep) / Num_sample

    index = index + 1

X1 = np.arange(-7, 7, 0.01)

p1 = plt.scatter(X1,
                 stats.t.pdf(X1, 10, x0[0], np.exp(x0[1])),
                 label="t_" + str(10))

np.savetxt('GMM_Results', Result)

X1 = np.arange(-7, 5, 0.01)

p2 = plt.scatter(X1,
                 (stats.norm.pdf(X1, 2, 0.8) + stats.norm.pdf(X1, 0, 0.8) +
                  stats.norm.pdf(X1, -2, 0.8) + stats.norm.pdf(X1, -4, 0.8)) /
                 4,
                 label="real",
                 color='red')

p3 = plt.scatter(X1, learned_dist(x0), label="learn", color='black')

plt.legend(fontsize='xx-large')
plt.savefig('GMM_toy_ex.png')
Ejemplo n.º 22
0
train_images = mnist.train.next_batch(100)[0]

with bl.Model() as m:
    X = bl.Placeholder('X', dimensions=agnp.array([100, 784]))
    encoder = bl.ml.neural_network.DenseNeuralNetwork(
        'Encoder',
        X,
        layer_dims=[784, 50, 20],
        nonlinearity=bl.math.utils.sigmoid)
    decoder = bl.ml.neural_network.DenseNeuralNetwork(
        'Decoder',
        encoder,
        layer_dims=[20, 50, 784],
        nonlinearity=bl.math.utils.sigmoid,
        last_layer_nonlinearity=bl.math.utils.sigmoid)
    y = bl.rvs.Bernoulli('obs', decoder, observed=X)

    init = agnp.random.normal(0, 3, size=m.n_params)
    optimizer = bl.math.optimizers.ADAM(learning_rate=5e-1)

    def iter_func(t, p, o):
        print("Objective value: %f" % o)

    res = optimizer.run(
        lambda x: -m.log_density(x, feed_dict={X: train_images}),
        lambda x: -m.grad_log_density(x, feed_dict={X: train_images}),
        init,
        iter_func=iter_func,
        iter_interval=5)
    agnp.savetxt("pos.txt", res.position, delimiter=',')
Ejemplo n.º 23
0
    def __init__(
        self,
        X,
        Y,
        init_state=None,
        parameter_sampling_flag=0,
        noiseless_flag=False,
    ):
        self.dataXshape = X.shape
        self.dataYshape = Y.shape
        self.init_state_mean = np.array((0., 0., np.pi, 0.))
        '''
        if init_state is None:
            self.init_state_mean=np.array((0.,0.,np.pi,0.))
        else:
            self.init_state_mean=init_state
        '''

        w_init = 0.1 * np.ones(dimw)
        #lam_init = np.array([10000., 10000., 10000., 10000.])
        lam_init = np.array([100., 100., 100., 100.])
        alpha_init = np.array([5.])

        unload_flag = 1
        import os
        if os.path.isfile("./result_apply/temp_param_cartpole.csv"):
            param_load = np.loadtxt('./result_apply/temp_param_cartpole.csv',
                                    delimiter=',')
            if abs(param_load[0] - self.dataXshape[0]) < 1:
                unload_flag = 0
                w = np.array([param_load[1], param_load[2]])
                lam = np.array([
                    param_load[3], param_load[4], param_load[5], param_load[6]
                ])
                alpha = np.array([param_load[7]])
        if unload_flag:
            w, lam, alpha = laplace_approximation.maximized_approximate_log_marginal_likelihood(
                Y, X, mo_model, w_init, lam_init, alpha_init)

        param_save = np.array([
            self.dataXshape[0], w[0], w[1], lam[0], lam[1], lam[2], lam[3],
            alpha[0]
        ])
        np.savetxt('./result_apply/temp_param_cartpole.csv',
                   param_save,
                   delimiter=',')

        self.prec_weight = alpha
        self.post_mean = w
        self.post_var = inv(
            laplace_approximation.hesse_w(Y, X, w, mo_model, lam, alpha))
        self.noise_var1 = 1. / lam[0]
        self.noise_var2 = 1. / lam[1]
        self.noise_var3 = 1. / lam[2]
        self.noise_var4 = 1. / lam[3]
        self.lam_memo = lam

        self.log_evidence_memo = laplace_approximation.approximate_log_marginal_likelihood(
            Y, X, w, mo_model, lam, alpha)

        self.parameter_sampling_flag = parameter_sampling_flag
        self.noiseless_flag = noiseless_flag
        if 2 == self.parameter_sampling_flag:
            self.parameter_sample = np.random.multivariate_normal(
                self.post_mean, self.post_var)
            print("self.parameter_sample =", self.parameter_sample)
        if 3 == self.parameter_sampling_flag:
            self.parameter_sample = self.post_mean

        self.sqrt_noise_var1 = np.sqrt(self.noise_var1)
        self.sqrt_noise_var2 = np.sqrt(self.noise_var2)
        self.sqrt_noise_var3 = np.sqrt(self.noise_var3)
        self.sqrt_noise_var4 = np.sqrt(self.noise_var4)

        if self.noiseless_flag:
            self.sqrt_noise_var1 = 0.
            self.sqrt_noise_var2 = 0.
            self.sqrt_noise_var3 = 0.
            self.sqrt_noise_var4 = 0.
Ejemplo n.º 24
0
    ind_trn = np.where((t >= 1075) & (t < 1165))[0]  # Interval of blocked flow
    ind_vld = ind_trn
    a = np.floor(len(ind_trn) / (npts - 1))
    ind_trn = ind_trn[::int(a)]
    ind_vld = np.setxor1d(ind_trn, ind_vld)
    ind_tst = np.where((t >= 500))[0]
    ind_tst = np.setxor1d(np.setxor1d(ind_tst, ind_trn), ind_vld)
    ind_tst = ind_tst[:-1]
    tM, zM, zdM, LM = gen.dataset(t, Z, ind_trn, **kwargs)
    tV, zV, zdV, LV = gen.dataset(t, Z, ind_vld, **kwargs)
    tS, zS, zdS, LS = gen.dataset(t, Z, ind_tst, **kwargs)

    ### Set up network, train, and predict
    layer_sizes = [ndim, 128, 128, ndim]
    step_size = 0.001
    max_iters = 5000
    lyap_off = 2000
    dOTD = dOTDModel(layer_sizes, step_size, max_iters, lyap_off)
    dOTD.train((zM, zdM, LM), notd)
    dOTD.comp_error(trn_set=(zM, zdM, LM), vld_set=(zV, zdV, LV), \
                    tst_set=(zS, zdS, LS))
    dOTD.comp_avgdist(ind_trn, ind_vld, ind_tst, Z, U)
    dOTD_tst = dOTD.predict(Z)  # Predict everywhere for plotting

    ### Save for plotting
    for kk in range(notd):
        filename = 'dOTD_tst' + str(kk + 1) + '.out'
        data = np.hstack((np.atleast_2d(t).T, Z, \
                          dOTD_tst[kk], U[:,:,kk]))
        np.savetxt(filename, data, fmt="%16.8e")
Ejemplo n.º 25
0
def save_gen_params(gen_params, gp_out_dir, nm):
    np.savetxt(gp_out_dir + nm + '_00.csv', gen_params[0][0], delimiter=',')
    np.savetxt(gp_out_dir + nm + '_01.csv', gen_params[0][1], delimiter=',')
    np.savetxt(gp_out_dir + nm + '_10.csv', gen_params[1][0], delimiter=',')
    np.savetxt(gp_out_dir + nm + '_11.csv', gen_params[1][1], delimiter=',')
    return
Ejemplo n.º 26
0
def cross_validation(odom_1,
                     aligned_1,
                     odom_2,
                     aligned_2,
                     type_1,
                     type_2,
                     K=10):
    """Function to run cross-validation to run nonlinear optimization for optimal
    pose estimation and evaluation.  Performs cross-validation K times and splits
    the dataset into K (approximately) even splits, to be used for in-sample
    training and out-of-sample evaluation.

    This function estimates a relative transformation between two lidar frames
    using nonlinear optimization, and evaluates the robustness of this estimate
    through K-fold cross-validation performance of our framework.  Though this
    function does not return any values, it saves all results in the
    'results' relative path.

    Parameters:
        odom_1 (pd.DataFrame):  DataFrame corresponding to odometry data for the
            pose we wish to transform into the odom_2 frame of reference.  See
            data/main_odometry.csv for an example of the headers/columns/data
            types this function expects this DataFrame to have.

        aligned_1 (pd.DataFrame): DataFrame corresponding to aligned odometry
            data given the 3 sets of odometry data for the 3 lidar sensors.  This
            data corresponds to the odom_1 sensor frame.

        odom_2 (pd.DataFrame):  DataFrame corresponding to odometry data for the
            pose we wish to transform the odom_1 frame of reference into.  See
            data/main_odometry.csv for an example of the headers/columns/data
            types this function expects this DataFrame to have.

        aligned_2 (pd.DataFrame): DataFrame corresponding to aligned odometry
            data given the 3 sets of odometry data for the 3 lidar sensors.  This
            data corresponds to the odom_2 sensor frame.

        type_1 (str):  String denoting the lidar type.  Should be in the set
            {'main', 'front', 'rear'}.  This type corresponds to the data type
            for the odom_1 frame.

        type_2 (str):  String denoting the lidar type.  Should be in the set
            {'main', 'front', 'rear'}.  This type corresponds to the data type
            for the odom_2 frame.

        K (int):  The number of folds to be used for cross-validation.  Defaults
            to 10.
    """
    # Get ICP covariance matrices
    # Odom 1 lidar odometry
    odom1_icp, odom1_trans_cov, odom1_trans_cov_max, \
    odom1_trans_cov_avg, odom1_rot_cov, odom1_rot_cov_max, \
    odom1_rot_cov_avg, odom1_reject = parse_icp_cov(odom_1, type=type_1,
                                                  reject_thr=REJECT_THR)

    # Odom 2 lidar odometry
    odom2_icp, odom2_trans_cov, odom2_trans_cov_max, \
    odom2_trans_cov_avg, odom2_rot_cov, odom2_rot_cov_max, \
    odom2_rot_cov_avg, odom2_reject = parse_icp_cov(odom_2, type=type_2,
                                                    reject_thr=REJECT_THR)
    # Calculate relative poses
    (odom1_aligned,
     odom1_rel_poses) = relative_pose_processing.calc_rel_poses(aligned_1)
    (odom2_aligned,
     odom2_rel_poses) = relative_pose_processing.calc_rel_poses(aligned_2)

    # Compute weights for weighted estimate
    cov_t_odom1, cov_R_odom1 = compute_weights_euler(odom1_aligned)
    cov_t_odom2, cov_R_odom2 = compute_weights_euler(odom2_aligned)

    # Extract a single scalar using the average value from rotation and translation
    var_t_odom1 = extract_variance(cov_t_odom1, mode="max")
    var_R_odom1 = extract_variance(cov_R_odom1, mode="max")
    var_t_odom2 = extract_variance(cov_t_odom2, mode="max")
    var_R_odom2 = extract_variance(cov_R_odom2, mode="max")

    # Optimization (1) Instantiate a manifold
    translation_manifold = Euclidean(3)  # Translation vector
    so3 = Rotations(3)  # Rotation matrix
    manifold = Product((so3, translation_manifold))  # Instantiate manifold

    # Get initial guesses for our estimations
    if os.path.exists(PKL_POSES_PATH):  # Check to make sure path exists
        transforms_dict = load_transforms(
            PKL_POSES_PATH)  # Relative transforms

    # Map types to sensor names to access initial estimate relative transforms
    types2sensors = {"main": "velodyne", "front": "front", "rear": "rear"}

    # Now get initial guesses from the relative poses
    initial_guess_odom1_odom2 = transforms_dict["{}_{}".format(
        types2sensors[type_1], types2sensors[type_2])]
    # Print out all the initial estimates as poses
    print("INITIAL GUESS {} {}: \n {} \n".format(types2sensors[type_1],
                                                 types2sensors[type_2],
                                                 initial_guess_odom1_odom2))

    # Get rotation matrices for initial guesses
    R0_odom1_odom2, t0_odom1_odom2 = initial_guess_odom1_odom2[:3, :3], \
                                     initial_guess_odom1_odom2[:3, 3]
    X0_odom1_odom2 = (R0_odom1_odom2, t0_odom1_odom2)  # Pymanopt estimate
    print("INITIAL GUESS {} {}: \n R0: \n {} \n\n t0: \n {} \n".format(
        types2sensors[type_1], types2sensors[type_2], R0_odom1_odom2,
        t0_odom1_odom2))

    # Create KFold xval object to get training/validation indices
    kf = KFold(n_splits=K, random_state=None, shuffle=False)
    k = 0  # Set fold counter to 0

    # Dataset
    A = np.array(odom2_rel_poses)  # First set of poses
    B = np.array(odom1_rel_poses)  # Second set of poses
    N = len(A)
    assert len(A) == len(B)  # Sanity check to ensure odometry data matches
    r = np.logical_or(np.array(odom1_reject)[:N],
                      np.array(odom2_reject)[:N])  # Outlier rejection

    print("NUMBER OF CROSS-VALIDATION FOLDS: {}".format(K))

    # Iterate over 30 second intervals of the poses
    for train_index, test_index in kf.split(
            A):  # Perform K-fold cross-validation

        # Path for results from manifold optimization
        analysis_results_path = os.path.join(ANALYSIS_RESULTS_PATH,
                                             "k={}".format(k))
        final_estimates_path = os.path.join(FINAL_ESTIMATES_PATH,
                                            "k={}".format(k))
        odometry_plots_path = os.path.join(ODOMETRY_PLOTS_PATH,
                                           "k={}".format(k))

        # Make sure all paths exist - if they don't create them
        for path in [
                analysis_results_path, final_estimates_path,
                odometry_plots_path
        ]:
            check_dir(path)

        # Get training data
        A_train = A[train_index]
        B_train = B[train_index]
        N_train = min(A_train.shape[0], B_train.shape[0])
        r_train = r[train_index]
        print("FOLD NUMBER: {}, NUMBER OF TRAINING SAMPLES: {}".format(
            k, N_train))

        omega = np.max([var_R_odom1, var_R_odom2
                        ])  # Take average across different odometries
        rho = np.max([var_t_odom1,
                      var_t_odom2])  # Take average across different odometries

        cost_lambda = lambda x: cost(x, A_train, B_train, r_train, rho, omega,
                                     WEIGHTED)  # Create cost function
        problem = Problem(manifold=manifold,
                          cost=cost_lambda)  # Create problem
        solver = CustomSteepestDescent()  # Create custom solver
        X_opt = solver.solve(problem, x=X0_odom1_odom2)  # Solve problem
        print("Initial Guess for Main-Front Transformation: \n {}".format(
            initial_guess_odom1_odom2))
        print("Optimal solution between {} and {} "
              "reference frames: \n {}".format(types2sensors[type_1],
                                               types2sensors[type_2], X_opt))

        # Take intermediate values for plotting
        estimates_x = solver.estimates
        errors = solver.errors
        iters = solver.iterations

        # Metrics dictionary
        estimates_dict = {i: T for i, T in zip(iters, estimates_x)}
        error_dict = {i: e for i, e in zip(iters, errors)}

        # Save intermediate results to a pkl file
        estimates_fname = os.path.join(
            analysis_results_path,
            "estimates_{}_{}.pkl".format(types2sensors[type_1],
                                         types2sensors[type_2], X_opt))
        error_fname = os.path.join(
            analysis_results_path,
            "error_{}_{}.pkl".format(types2sensors[type_1],
                                     types2sensors[type_2], X_opt))

        # Save estimates to pickle file
        with open(estimates_fname, "wb") as pkl_estimates:
            pickle.dump(estimates_dict, pkl_estimates)
            pkl_estimates.close()

        # Save error to pickle file
        with open(error_fname, "wb") as pkl_error:
            pickle.dump(error_dict, pkl_error)
            pkl_error.close()

        # Calculate difference between initial guess and final
        X_opt_T = construct_pose(X_opt[0], X_opt[1].reshape((3, 1)))
        print("DIFFERENCE IN MATRICES: \n {}".format(
            np.subtract(X_opt_T, initial_guess_odom1_odom2)))

        # Compute the weighted RMSE (training/in-sample)
        train_rmse_init_weighted, train_rmse_final_weighted, train_rmse_init_R_weighted, \
        train_rmse_init_t_weighted, train_rmse_final_R_weighted, \
        train_rmse_final_t_weighted = compute_rmse_weighted(
            initial_guess_odom1_odom2, X_opt_T, A_train, B_train, rho, omega)

        # Compute the unweighted RMSE (training/in-sample)
        train_rmse_init_unweighted, train_rmse_final_unweighted, train_rmse_init_R_unweighted, \
        train_rmse_init_t_unweighted, train_rmse_final_R_unweighted, \
        train_rmse_final_t_unweighted = compute_rmse_unweighted(
            initial_guess_odom1_odom2, X_opt_T, A_train, B_train)

        # Concatenate all RMSE values for training/in-sample
        train_rmses = [
            train_rmse_init_unweighted, train_rmse_final_unweighted,
            train_rmse_init_weighted, train_rmse_final_weighted,
            train_rmse_init_R_unweighted, train_rmse_init_t_unweighted,
            train_rmse_final_R_unweighted, train_rmse_final_t_unweighted,
            train_rmse_init_R_weighted, train_rmse_init_t_weighted,
            train_rmse_final_R_weighted, train_rmse_final_t_weighted
        ]

        # Display and save RMSEs
        outpath = os.path.join(
            analysis_results_path,
            "train_rmse_{}_{}.txt".format(types2sensors[type_1],
                                          types2sensors[type_2]))
        display_and_save_rmse(train_rmses, outpath)

        # Get test data
        A_test = A[test_index]
        B_test = B[test_index]
        N_test = min(A_test.shape[0], B_test.shape[0])
        print("NUMBER OF TEST SAMPLES: {}".format(N_test))

        # Compute the weighted RMSE (testing/out-of-sample)
        test_rmse_init_weighted, test_rmse_final_weighted, test_rmse_init_R_weighted, \
        test_rmse_init_t_weighted, test_rmse_final_R_weighted, \
        test_rmse_final_t_weighted = compute_rmse_weighted(initial_guess_odom1_odom2,
                                                            X_opt_T, A_test, B_test, rho, omega)

        # Compute the unweighted RMSE (testing/out-of-sample)
        test_rmse_init_unweighted, test_rmse_final_unweighted, test_rmse_init_R_unweighted, \
        test_rmse_init_t_unweighted, test_rmse_final_R_unweighted, \
        test_rmse_final_t_unweighted = compute_rmse_unweighted(initial_guess_odom1_odom2,
                                                                X_opt_T, A_test, B_test)

        # Concatenate all RMSE values for testing/out-of-sample
        test_rmses = [
            test_rmse_init_unweighted, test_rmse_final_unweighted,
            test_rmse_init_weighted, test_rmse_final_weighted,
            test_rmse_init_R_unweighted, test_rmse_init_t_unweighted,
            test_rmse_final_R_unweighted, test_rmse_final_t_unweighted,
            test_rmse_init_R_weighted, test_rmse_init_t_weighted,
            test_rmse_final_R_weighted, test_rmse_final_t_weighted
        ]

        # Display and save RMSEs
        outpath = os.path.join(
            analysis_results_path,
            "test_rmse_{}_{}.txt".format(types2sensors[type_1],
                                         types2sensors[type_2]))
        display_and_save_rmse(test_rmses, outpath)

        # Save final estimates
        final_estimate_outpath = os.path.join(
            final_estimates_path, "{}_{}.txt".format(types2sensors[type_1],
                                                     types2sensors[type_2]))
        np.savetxt(final_estimate_outpath, X_opt_T)

        # Finally, increment k
        k += 1
Ejemplo n.º 27
0
    Nr = TrackNormal(rx)

    # psie is sort of backwards: higher angles go to the left
    return np.angle(Nx) - np.angle(Nr)


if __name__ == '__main__':
    TRACK_SPACING = 19.8  # cm
    x = SVGPathToTrackPoints("oakwarehouse.path", TRACK_SPACING)[:-1]

    xm = np.array(x)[:, 0] / 50  # 50 pixels / meter
    track_k = TrackCurvature(xm)
    Nx = TrackNormal(xm)
    u = 1j * Nx
    np.savetxt("track_x.txt",
               np.vstack([np.real(xm), np.imag(xm)]).T.reshape(-1),
               newline=",\n")
    np.savetxt("track_u.txt",
               np.vstack([np.real(u), np.imag(u)]).T.reshape(-1),
               newline=",\n")
    np.savetxt("track_k.txt", track_k, newline=",\n")

    ye, val, stuff = OptimizeTrack(xm, 1.4, 0.1)
    psie = RelativePsie(ye, xm)

    rx = u * ye + xm
    raceline_k = TrackCurvature(rx)

    np.savetxt("raceline_k.txt", raceline_k, newline=",\n")
    np.savetxt("raceline_ye.txt", ye, newline=",\n")
    np.savetxt("raceline_psie.txt", psie, newline=",\n")
Ejemplo n.º 28
0
    print ('KNN')
    print ( np.sum(res2) / 100.)
    
    # Make neural net functions
    N_weights, pred_fun, loss_fun, \
        frac_err = make_nn_funs(input_shape, layer_specs, L2_reg)
    loss_grad = grad(loss_fun)

    # Initialize weights
    rs = npr.RandomState()
    W = rs.randn(N_weights) * param_scale

    print("    Epoch      |    Train err  |   Test error  ")
    def print_perf(epoch, W):
        test_perf  = frac_err(W, test_images, test_labels)
        train_perf = frac_err(W, train_images, train_labels)
        print("{0:15}|{1:15}|{2:15}".format(epoch, train_perf, test_perf))

    # Train with sgd
    batch_idxs = make_batches(N_data, batch_size)
    cur_dir = np.zeros(N_weights)

    for epoch in range(num_epochs):
        print_perf(epoch, W)
        for idxs in batch_idxs:
            grad_W = loss_grad(W, train_images[idxs], train_labels[idxs])
            cur_dir = momentum * cur_dir + (1.0 - momentum) * grad_W
            W -= learning_rate * cur_dir

    np.savetxt('W',W)
Ejemplo n.º 29
0
words_all = cv.fit_transform(words).toarray()
featname_all = cv.get_feature_names()

# choose the alpha features
idx_words = []
for j in np.arange(len(featname_all)):
    if featname_all[j].isalpha():
        idx_words.append(j)
featname_alpha = np.array(featname_all)[idx_words]
words_alpha = words_all[:, idx_words]

#words_std = np.std(words_alpha, axis=0)
words_std = np.sum(words_alpha, axis=0)
# choose the high-variance words
t1 = np.argsort(words_std)
idx_highstd = t1[-500:][::-1]

featname_highstd = featname_alpha[idx_highstd]
words_highstd = words_alpha[:, idx_highstd]

# save the data into csv files
np.savetxt("webkbRaw_word.csv", words_highstd, delimiter=',')
np.savetxt("webkbRaw_label_univ.csv", label_univ.astype(int), delimiter=',')
np.savetxt("webkbRaw_label_topic.csv", label_topic.astype(int), delimiter=',')

csvfile = open("webkbRaw_wordnames.csv", "wb")
csvwriter = csv.writer(csvfile, delimiter=',')
for i in np.arange(len(featname_highstd)):
    csvwriter.writerow([featname_highstd[i]])
csvfile.close()
Ejemplo n.º 30
0
        # rs = npr.RandomState(0)

        covariance = get_covariance(variational_params)
        cov_decomp = np.linalg.cholesky(covariance)

        sample_weights = np.matmul(rs.randn(1000, len(log_std)), cov_decomp) + mean

        plot_inputs = np.linspace(-2, 2, num=400)
        outputs_final = predictions(sample_weights, np.expand_dims(plot_inputs, 1))
        lowerbd = numpy.quantile(outputs_final, 0.05, axis=0)
        upperbd = numpy.quantile(outputs_final, 0.95, axis=0)
        inconint = np.logical_and(lowerbd < tot_targets, upperbd > tot_targets).ravel()
        con_ind = np.zeros(len(lowerbd))
        con_ind[inconint] = 1
        con_ind = con_ind.reshape(len(con_ind), 1)
        coverage_df = np.concatenate([coverage_df, con_ind], axis=1)
        # # Plot data and functions.
        fig = plt.figure(figsize=(12, 8), facecolor='white')
        ax = fig.add_subplot(111, frameon=False)
        ax.plot(tot_inputs.ravel(), tot_targets.ravel(), "bx", label = "testing data")
        ax.plot(inputs.ravel(), targets.ravel(), 'rx', label = "training data")
        ax.plot(tot_inputs.ravel(), lowerbd.ravel(), "k-")
        ax.plot(tot_inputs.ravel(), upperbd.ravel(), "k-")
        ax.set_ylim([-2, 3])
        ax.legend()
        plt.show()
    filename = "blockVar" + "B" + str(B) + "t"+str(t) + "noise"+str(tau) + ".csv"
    np.savetxt(filename, coverage_df, delimiter=',', fmt='%d')

    # csv.reader("diagVar_QuantUQ.csv")
Ejemplo n.º 31
0
def main():
    """Main function to run nonlinear manifold optimization on SE(3) to estimate
    an optimal relative pose transformation between coordinate frames given by
    the different lidar sensors."""
    # Extract and process the CSVs
    main_odometry = relative_pose_processing.process_df(MAIN_ODOM_CSV)
    front_odometry = relative_pose_processing.process_df(FRONT_ODOM_CSV)
    rear_odometry = relative_pose_processing.process_df(REAR_ODOM_CSV)

    # Process poses
    (main_aligned, front_aligned,
     rear_aligned) = relative_pose_processing.align_df(
         [main_odometry, front_odometry, rear_odometry])

    # Get ICP covariance matrices
    # Main lidar odometry
    main_icp, main_trans_cov, main_trans_cov_max, \
        main_trans_cov_avg, main_rot_cov, main_rot_cov_max, \
        main_rot_cov_avg, main_reject = parse_icp_cov(main_odometry, type="main",
                                                      reject_thr=REJECT_THR)

    # Front lidar odometry
    front_icp, front_trans_cov, front_trans_cov_max, \
        front_trans_cov_avg, front_rot_cov, front_rot_cov_max, \
        front_rot_cov_avg, front_reject = parse_icp_cov(front_odometry, type="front",
                                                        reject_thr=REJECT_THR)

    # Rear lidar odometry
    rear_icp, rear_trans_cov, rear_trans_cov_max, \
        rear_trans_cov_avg, rear_rot_cov, rear_rot_cov_max, \
        rear_rot_cov_avg, rear_reject = parse_icp_cov(rear_odometry, type="rear",
                                                      reject_thr=REJECT_THR)

    # Calculate relative poses
    (main_aligned,
     main_rel_poses) = relative_pose_processing.calc_rel_poses(main_aligned)
    (front_aligned,
     front_rel_poses) = relative_pose_processing.calc_rel_poses(front_aligned)
    (rear_aligned,
     rear_rel_poses) = relative_pose_processing.calc_rel_poses(rear_aligned)

    cov_t_main, cov_R_main = compute_weights_euler(main_aligned)
    cov_t_front, cov_R_front = compute_weights_euler(front_aligned)
    cov_t_rear, cov_R_rear = compute_weights_euler(rear_aligned)

    # Extract a single scalar using the average value from rotation and translation
    var_t_main = extract_variance(cov_t_main, mode="max")
    var_R_main = extract_variance(cov_R_main, mode="max")
    var_t_front = extract_variance(cov_t_front, mode="max")
    var_R_front = extract_variance(cov_R_front, mode="max")
    var_t_rear = extract_variance(cov_t_main, mode="max")
    var_R_rear = extract_variance(cov_R_rear, mode="max")

    # Optimization (1) Instantiate a manifold
    translation_manifold = Euclidean(3)  # Translation vector
    so3 = Rotations(3)  # Rotation matrix
    manifold = Product((so3, translation_manifold))  # Instantiate manifold

    # Get initial guesses for our estimations
    initial_poses = {}
    if os.path.exists(PKL_POSES_PATH):  # Check to make sure path exists
        transforms_dict = load_transforms(
            PKL_POSES_PATH)  # Loads relative transforms

    # Now get initial guesses from the relative poses
    initial_guess_main_front = transforms_dict[
        "velodyne_front"]  # Get relative transform from main to front (T^{V}_{F})
    initial_guess_main_rear = transforms_dict[
        "velodyne_rear"]  # Get relative transform from front to main T^{V}_{B})
    initial_guess_front_rear = np.linalg.inv(
        initial_guess_main_front
    ) @ initial_guess_main_rear  # Get relative transform from front to rear T^{B}_{W})
    direct_initial_guess_front_rear = transforms_dict[
        "direct_front_rear"]  # Transform directly computed

    # Print out all the initial estimates as poses
    print(
        "INITIAL GUESS MAIN FRONT: \n {} \n".format(initial_guess_main_front))
    print("INITIAL GUESS MAIN REAR: \n {} \n".format(initial_guess_main_rear))
    print(
        "INITIAL GUESS FRONT REAR: \n {} \n".format(initial_guess_front_rear))
    print("INITIAL GUESS DIRECT FRONT REAR: \n {} \n".format(
        direct_initial_guess_front_rear))

    # Get rotation matrices for initial guesses
    R0_main_front, t0_main_front = initial_guess_main_front[:3, :
                                                            3], initial_guess_main_front[:
                                                                                         3,
                                                                                         3]
    X0_main_front = (R0_main_front, t0_main_front)
    print("INITIAL GUESS MAIN FRONT: \n R0: \n {} \n\n t0: \n {} \n".format(
        R0_main_front, t0_main_front))

    R0_main_rear, t0_main_rear = initial_guess_main_rear[:3, :
                                                         3], initial_guess_main_rear[:
                                                                                     3,
                                                                                     3]
    X0_main_rear = (R0_main_rear, t0_main_rear)
    print("INITIAL GUESS MAIN REAR: \n R0: \n {} \n\n t0: \n {} \n".format(
        R0_main_rear, t0_main_rear))

    R0_front_rear, t0_front_rear = initial_guess_front_rear[:3, :
                                                            3], initial_guess_front_rear[:
                                                                                         3,
                                                                                         3]
    X0_front_rear = (R0_front_rear, t0_front_rear)
    print("INITIAL GUESS FRONT REAR: \n R0: \n {} \n\n t0: \n {} \n".format(
        R0_front_rear, t0_front_rear))

    ######################## MAIN FRONT CALIBRATION ################################
    # Carry out optimization for main-front homogeneous transformations
    ### PARAMETERS ###
    A = np.array(front_rel_poses)  # First set of poses
    B = np.array(main_rel_poses)  # Second set of poses
    N = min(A.shape[0], B.shape[0])
    r = np.logical_or(np.array(main_reject[:N]), np.array(
        front_reject[:N]))  # If either has high variance, reject the sample
    omega = np.max([var_R_main,
                    var_R_front])  # Take average across different odometries
    rho = np.max([var_t_main,
                  var_t_front])  # Take average across different odometries
    ### PARAMETERS ###

    cost_main_front = lambda x: cost(x, A, B, r, rho, omega, WEIGHTED)
    problem_main_front = Problem(
        manifold=manifold, cost=cost_main_front
    )  # (2a) Compute the optimization between main and front
    solver_main_front = CustomSteepestDescent(
    )  # (3) Instantiate a Pymanopt solver
    Xopt_main_front = solver_main_front.solve(problem_main_front,
                                              x=X0_main_front)
    print("Initial Guess for Main-Front Transformation: \n {}".format(
        initial_guess_main_front))
    print("Optimal solution between main and front reference frames: \n {}".
          format(Xopt_main_front))

    # Take intermediate values for plotting
    estimates_x_main_front = solver_main_front.estimates
    errors_main_front = solver_main_front.errors
    iters_main_front = solver_main_front.iterations

    # Metrics dictionary
    estimates_dict_main_front = {
        i: T
        for i, T in zip(iters_main_front, estimates_x_main_front)
    }
    error_dict_main_front = {
        i: e
        for i, e in zip(iters_main_front, errors_main_front)
    }

    # Save intermediate results to a pkl file
    estimates_fname_main_front = os.path.join(ANALYSIS_RESULTS_PATH,
                                              "estimates_main_front.pkl")
    error_fname_main_front = os.path.join(ANALYSIS_RESULTS_PATH,
                                          "error_main_front.pkl")

    # Save estimates to pickle file
    with open(estimates_fname_main_front, "wb") as pkl_estimates:
        pickle.dump(estimates_dict_main_front, pkl_estimates)
        pkl_estimates.close()

    # Save error to pickle file
    with open(error_fname_main_front, "wb") as pkl_error:
        pickle.dump(error_dict_main_front, pkl_error)
        pkl_error.close()

    # Calculate difference between initial guess and final
    XOpt_T_main_front = construct_pose(Xopt_main_front[0],
                                       Xopt_main_front[1].reshape((3, 1)))
    print("DIFFERENCE IN MATRICES: \n {}".format(
        np.subtract(XOpt_T_main_front, initial_guess_main_front)))

    # Compute the weighted and unweighted RMSE
    rmse_init_weighted, rmse_final_weighted, rmse_init_R_weighted, \
    rmse_init_t_weighted, rmse_final_R_weighted, \
    rmse_final_t_weighted = compute_rmse_weighted(initial_guess_main_front,
                                                  XOpt_T_main_front, A, B, rho,
                                                  omega)
    rmse_init_unweighted, rmse_final_unweighted, rmse_init_R_unweighted, \
    rmse_init_t_unweighted, rmse_final_R_unweighted, \
    rmse_final_t_unweighted = compute_rmse_unweighted(initial_guess_main_front,
                                                      XOpt_T_main_front, A, B)
    rmses = [
        rmse_init_unweighted, rmse_final_unweighted, rmse_init_weighted,
        rmse_final_weighted, rmse_init_R_unweighted, rmse_init_t_unweighted,
        rmse_final_R_unweighted, rmse_final_t_unweighted, rmse_init_R_weighted,
        rmse_init_t_weighted, rmse_final_R_weighted, rmse_final_t_weighted
    ]

    # Display and save RMSEs
    outpath = os.path.join(ANALYSIS_RESULTS_PATH, "main_front_rmse.txt")
    display_and_save_rmse(rmses, outpath)

    # Save final estimates
    final_estimate_outpath = os.path.join(FINAL_ESTIMATES_PATH,
                                          "main_front_final.txt")
    np.savetxt(final_estimate_outpath, XOpt_T_main_front)
    ################################################################################

    ######################## MAIN REAR CALIBRATION #################################
    ### PARAMETERS ###
    A = np.array(rear_rel_poses)  # First set of poses
    B = np.array(main_rel_poses)  # Second set of poses
    N = min(A.shape[0], B.shape[0])
    r = np.logical_or(np.array(main_reject[:N]), np.array(
        rear_reject[:N]))  # If either has high variance, reject the sample
    omega = np.max([var_R_main,
                    var_R_rear])  # Take average across different odometries
    rho = np.max([var_t_main,
                  var_t_rear])  # Take average across different odometries
    ### PARAMETERS ###

    cost_main_rear = lambda x: cost(x, A, B, r, rho, omega, WEIGHTED)
    # Carry out optimization for main-rear homogeneous transformations
    problem_main_rear = Problem(
        manifold=manifold, cost=cost_main_rear
    )  # (2a) Compute the optimization between main and front
    solver_main_rear = CustomSteepestDescent(
    )  # (3) Instantiate a Pymanopt solver
    Xopt_main_rear = solver_main_rear.solve(problem_main_rear, x=X0_main_rear)
    print("Initial Guess for Main-Rear Transformation: \n {}".format(
        initial_guess_main_rear))
    print("Optimal solution between main and rear reference frames: \n {}".
          format(Xopt_main_rear))

    # Take intermediate values for plotting
    estimates_x_main_rear = solver_main_rear.estimates
    errors_main_rear = solver_main_rear.errors
    iters_main_rear = solver_main_rear.iterations

    # Metrics dictionary
    estimates_dict_main_rear = {
        i: T
        for i, T in zip(iters_main_rear, estimates_x_main_rear)
    }
    error_dict_main_rear = {
        i: e
        for i, e in zip(iters_main_rear, errors_main_rear)
    }

    # Save intermediate results to a pkl file
    estimates_fname_main_rear = os.path.join(ANALYSIS_RESULTS_PATH,
                                             "estimates_main_rear.pkl")
    error_fname_main_rear = os.path.join(ANALYSIS_RESULTS_PATH,
                                         "error_main_rear.pkl")

    # Save estimates to pickle file
    with open(estimates_fname_main_rear, "wb") as pkl_estimates:
        pickle.dump(estimates_dict_main_rear, pkl_estimates)
        pkl_estimates.close()

    # Save error to pickle file
    with open(error_fname_main_rear, "wb") as pkl_error:
        pickle.dump(error_dict_main_rear, pkl_error)
        pkl_error.close()

    # Calculate difference between initial guess and final
    XOpt_T_main_rear = construct_pose(Xopt_main_rear[0],
                                      Xopt_main_rear[1].reshape((3, 1)))
    print("DIFFERENCE IN MATRICES: \n {}".format(
        np.subtract(XOpt_T_main_rear, initial_guess_main_rear)))

    # Compute the weighted and unweighted RMSE
    rmse_init_weighted, rmse_final_weighted, rmse_init_R_weighted, \
           rmse_init_t_weighted, rmse_final_R_weighted, \
           rmse_final_t_weighted = compute_rmse_weighted(initial_guess_main_rear, XOpt_T_main_rear, A, B, rho, omega)
    rmse_init_unweighted, rmse_final_unweighted, rmse_init_R_unweighted, \
           rmse_init_t_unweighted, rmse_final_R_unweighted, \
           rmse_final_t_unweighted = compute_rmse_unweighted(initial_guess_main_rear, XOpt_T_main_rear, A, B)
    rmses = [
        rmse_init_unweighted, rmse_final_unweighted, rmse_init_weighted,
        rmse_final_weighted, rmse_init_R_unweighted, rmse_init_t_unweighted,
        rmse_final_R_unweighted, rmse_final_t_unweighted, rmse_init_R_weighted,
        rmse_init_t_weighted, rmse_final_R_weighted, rmse_final_t_weighted
    ]

    # Display and save RMSEs
    outpath = os.path.join(ANALYSIS_RESULTS_PATH, "main_rear_rmse.txt")
    display_and_save_rmse(rmses, outpath)

    # Save final estimates
    final_estimate_outpath = os.path.join(FINAL_ESTIMATES_PATH,
                                          "main_rear_final.txt")
    np.savetxt(final_estimate_outpath, XOpt_T_main_rear)
    ################################################################################

    ######################## FRONT REAR CALIBRATION ################################
    ### PARAMETERS ###
    A = np.array(rear_rel_poses)  # First set of poses
    B = np.array(front_rel_poses)  # Second set of poses
    N = min(A.shape[0], B.shape[0])
    r = np.logical_or(np.array(front_reject[:N]), np.array(
        rear_reject[:N]))  # If either has high variance, reject the sample
    omega = np.max([var_R_front,
                    var_R_rear])  # Take average across different odometries
    rho = np.max([var_t_front,
                  var_t_rear])  # Take average across different odometries
    ### PARAMETERS ###

    cost_front_rear = lambda x: cost(x, A, B, r, rho, omega, WEIGHTED)
    # Carry out optimization for front-rear homogeneous transformations
    problem_front_rear = Problem(
        manifold=manifold, cost=cost_front_rear
    )  # (2a) Compute the optimization between main and front
    solver_front_rear = CustomSteepestDescent(
    )  # (3) Instantiate a Pymanopt solver
    Xopt_front_rear = solver_front_rear.solve(problem_front_rear,
                                              x=X0_front_rear)
    print("Initial Guess for Front-Rear Transformation: \n {}".format(
        initial_guess_front_rear))
    print("Optimal solution between front and rear reference frames: \n {}".
          format(Xopt_front_rear))

    # Take intermediate values for plotting
    estimates_x_front_rear = solver_front_rear.estimates
    errors_front_rear = solver_front_rear.errors
    iters_front_rear = solver_front_rear.iterations

    # Metrics dictionary
    estimates_dict_front_rear = {
        i: T
        for i, T in zip(iters_front_rear, estimates_x_front_rear)
    }
    error_dict_front_rear = {
        i: e
        for i, e in zip(iters_front_rear, errors_front_rear)
    }

    # Save intermediate results to a pkl file
    estimates_fname_front_rear = os.path.join(ANALYSIS_RESULTS_PATH,
                                              "estimates_front_rear.pkl")
    error_fname_front_rear = os.path.join(ANALYSIS_RESULTS_PATH,
                                          "error_front_rear.pkl")

    # Save estimates to pickle file
    with open(estimates_fname_front_rear, "wb") as pkl_estimates:
        pickle.dump(estimates_dict_front_rear, pkl_estimates)
        pkl_estimates.close()

    # Save error to pickle file
    with open(error_fname_front_rear, "wb") as pkl_error:
        pickle.dump(error_dict_front_rear, pkl_error)
        pkl_error.close()

    # Calculate difference between initial guess and final
    XOpt_T_front_rear = construct_pose(Xopt_front_rear[0],
                                       Xopt_front_rear[1].reshape((3, 1)))
    print("DIFFERENCE IN MATRICES: \n {}".format(
        np.subtract(XOpt_T_front_rear, initial_guess_front_rear)))

    # Compute the weighted and unweighted RMSE
    rmse_init_weighted, rmse_final_weighted, rmse_init_R_weighted, \
    rmse_init_t_weighted, rmse_final_R_weighted, \
    rmse_final_t_weighted = compute_rmse_weighted(initial_guess_front_rear,
                                                  XOpt_T_front_rear, A, B, rho,
                                                  omega)
    rmse_init_unweighted, rmse_final_unweighted, rmse_init_R_unweighted, \
    rmse_init_t_unweighted, rmse_final_R_unweighted, \
    rmse_final_t_unweighted = compute_rmse_unweighted(initial_guess_front_rear,
                                                      XOpt_T_front_rear, A, B)
    rmses = [
        rmse_init_unweighted, rmse_final_unweighted, rmse_init_weighted,
        rmse_final_weighted, rmse_init_R_unweighted, rmse_init_t_unweighted,
        rmse_final_R_unweighted, rmse_final_t_unweighted, rmse_init_R_weighted,
        rmse_init_t_weighted, rmse_final_R_weighted, rmse_final_t_weighted
    ]

    # Display and save RMSEs
    outpath = os.path.join(ANALYSIS_RESULTS_PATH, "front_rear_rmse.txt")
    display_and_save_rmse(rmses, outpath)

    # Save final estimates
    final_estimate_outpath = os.path.join(FINAL_ESTIMATES_PATH,
                                          "front_rear_final.txt")
    np.savetxt(final_estimate_outpath, XOpt_T_front_rear)
    ################################################################################

    # Display all results
    print("_________________________________________________________")
    print("_____________________ALL RESULTS_________________________")
    print("_________________________________________________________")
    print("Initial Guess for Main-Front Transformation: \n {}".format(
        initial_guess_main_front))
    print("Optimal solution between main and front reference frames: \n {}".
          format(Xopt_main_front))
    print("_________________________________________________________")
    print("Initial Guess for Main-Rear Transformation: \n {}".format(
        initial_guess_main_rear))
    print("Optimal solution between main and rear reference frames: \n {}".
          format(Xopt_main_rear))
    print("_________________________________________________________")
    print("Initial Guess for Front-Rear Transformation: \n {}".format(
        initial_guess_front_rear))
    print("Optimal solution between front and rear reference frames: \n {}".
          format(Xopt_front_rear))
    print("_________________________________________________________")
Ejemplo n.º 32
0
        # Run the chain (with burn-in).
        samples, is_accepted = tfp.mcmc.sample_chain(
            num_results=num_results,
            num_burnin_steps=num_burnin_steps,
            current_state=start_point,
            #current_state=np.random.randn(1, num_weights),
            kernel=adaptive_hmc,
            trace_fn=lambda _, pkr: pkr.inner_results.is_accepted)

        sample_mean = tf.reduce_mean(samples)
        sample_stddev = tf.math.reduce_std(samples)
        is_accepted = tf.reduce_mean(tf.cast(is_accepted, dtype=tf.float32))
        return sample_mean, sample_stddev, is_accepted, samples

    tmp = pyreadr.read_r('hmcstartpoints')
    tmp1 = tmp['D1'].to_numpy()
    tmp2 = tmp1.reshape(13, 100)

    for start in range(100):
        spts = tmp2[:, start]
        sample_mean, sample_stddev, is_accepted, samples = run_chain(
            spts.reshape(1, 13))

        print('mean:{:.4f}  stddev:{:.4f}  acceptance:{:.4f}'.format(
            sample_mean.numpy(), sample_stddev.numpy(), is_accepted.numpy()))

        sam = samples.numpy()
        sam = sam.reshape(num_results, 13)
        sam = np.concatenate([sam, sam])
    np.savetxt("multiple_hmc.csv", sam, delimiter=',', fmt='%f')