type=float,
                    default=0.0001,
                    help='could be changed')
parser.add_argument('--ADAM_BETA_D',
                    type=float,
                    default=0.5,
                    help='could be changed')
parser.add_argument('--ADAM_BETA_G',
                    type=float,
                    default=0.5,
                    help='could be changed')

FLAGS = parser.parse_args()

# 这个函数把两个参数对应的数据集放进来
Train_examples = ioUtil.load_examples(FLAGS.train_hdf5, 'names')
Test_examples = ioUtil.load_examples(FLAGS.test_hdf5, 'names')
'''重新排列数据'''
Train_data = ioUtil.arrange_datas(Train_examples)
Test_data = ioUtil.arrange_datas(Test_examples)

############# FALG things #################################
FLAGS.point_num_in = Train_examples.skeleton_in.shape[
    1]  # shape gives nums in every dims
FLAGS.point_num_out = Train_examples.pointSet_out.shape[1]

FLAGS.example_num = Train_examples.skeleton_in.shape[0]
EXAMPLE_NUM = FLAGS.example_num

TRAINING_EPOCHES = FLAGS.epoch
Beispiel #2
0
        yy = tf.expand_dims(y, -1) # [batch_size, npts, 3]
        yy = tf.tile(yy, tf.stack([1, 1, 1, nump_x]))
        
        yy = tf.transpose(yy, perm=[0, 3, 2, 1]) # 交换张量的不同维度相当于1和3维的转置

        diff = tf.subtract(xx, yy) # 做差,xx中每个点和yy中每个点的差
        square_diff = tf.square(diff) # 平方

        square_dist = tf.reduce_sum(square_diff, 2) # 平方和,特征维求平方和,降维

        return square_dist  


# 这个函数把两个参数对应的数据集放进来
Test_examples  = ioUtil.load_examples('../data_hdf5/horse_seg_test.hdf5', 'names')
#segmentation tags
Tags = txtReader.read_txt("../data_hdf5/seg.txt")
tags = np.squeeze(Tags)

shape_in = Test_examples.pointSet_out

inputpoints = shape_in[0:1, ...]
outputpoints = shape_in[7:8, ...]
#inputpoints = np.random.rand(1, 10, 3)
#transform = np.random.rand(1, 10, 3)
#outputpoints = inputpoints + transform

# FLAGS
FLAGS = collections.namedtuple("FLAGS", "gpu, batch_size, point_num_out, point_num_in, range_max, radiusScal")
FLAGS.gpu = 0
Beispiel #3
0
        #print('names : ', nametosave[0])
        ioUtil.output_point_cloud_ply( Predicted_xyz1, nametosave, output_dir,
        'Ep' + '_predicted_' + 'X1')
        #ioUtil.output_point_cloud_ply( Predicted_xyz4, nametosave, output_dir,
        #   'Ep' + '_predicted_' + 'X4')   
        #ioUtil.output_point_cloud_ply( Predicted_xyz8, nametosave, output_dir,
        #   'Ep' + '_predicted_' + 'X8')    

    tf.Session.close
    Predicted_xyz = np.squeeze(Predicted_xyz1)
    return Predicted_xyz

# main function
if __name__ == '__main__':
    modelPath = 'myNet/trained_models/'
    data = ioUtil.load_examples('Skeletons/horse.hdf5', 'names')
    Y = data.skeleton_in
    N = Y.shape[0]
    D = Y.shape[1]
    pointSet_in = Y[0,:] # [19 * 3]
    nametosave = data.names[0]
    print(type(pointSet_in))
    mustSavePly = True

    with tf.Graph().as_default():
        model = load_model()
        # check point: 二进制文件,它包含的权重变量,biases变量和其他变量
        config = tf.ConfigProto(allow_soft_placement = True)
        sess = tf.Session(config = config)
        
        #metaPath = modelPath + 'epoch_200.ckpt.meta'
Beispiel #4
0
        print('\t locate results:\n', results)
        #main_ax.scatter(xx_ske[:,0], xx_ske[:,1], s=50, alpha=0.3)
        main_ax.scatter(final_X[0], final_X[1], s=30, alpha=0.3)
        plt.draw()

        nav_init = np.array(final_X).reshape([-1,2])

   
        
        

np.random.seed(42)  #用于指定随机数生成时所用算法开始的整数值
gpflow.settings.numerics.quadrature = 'error'  # throw error if quadrature is used for kernel expectations

if __name__ == '__main__':
    data = ioUtil.load_examples(FLAGS.hdf5, 'names')
    skeletons = data.skeleton_in
    pointSet = data.pointSet_out
    print('Number of points * number of dimensions', skeletons.shape)

    # create model
    Q = 2 #pca降维的维度
    M = 10 #支撑点的数目
    N = skeletons.shape[0]
    D = skeletons.shape[1]
    Y = np.reshape(skeletons, (N, D*3))
    pointSet = np.reshape(pointSet, (N, 2048, 3))
    # PCA降维,提取前面5维向量作为基 100*5
    X_mean = gpflow.gplvm.PCA_reduce(Y, Q)

    # GPLVM