def writing_pgpatch_to_base(spec_array, schema, conn, cur, writing_query, file_name, schemas, additional_offset=None): """ this function writes pgpatch one by one in a given table""" import pg_pointcloud_classes as pgp import psycopg2 import sys import numpy as np connection_string = """host=localhost dbname=test_pointcloud user=postgres password=postgres port=5433""" #shitty move, casting back to numpy double should not be necessary ! numpy_double, schema = pgp.patch_numpy_to_numpy_double( np.array(spec_array), schema, use_scale_offset=False) numpy_double = np.array(numpy_double) if additional_offset != None: numpy_double[:] += np.array(additional_offset) pgpatch = pgp.numpy_double_to_WKB_patch(np.array(numpy_double), schema) cur.execute(writing_query, ( file_name, pgpatch, )) conn.commit()
def compute_dim_descriptor_from_patch(uncompressed_patch, connection_string): """ given a patch, extract points and compute dim descriptors""" import pg_pointcloud_classes as pgp #convert patch to numpy array GD = pgp.create_GD_if_not_exists() #cache mecanism for patch schema if 'rc' not in GD: # creating the rc dict if necessary GD['rc'] = dict() if 'schemas' not in GD['rc']: # creating the schemas dict if necessary GD['rc']['schemas'] = dict() restrict_dim = ["x","y","z"] pt_arr, (mschema,endianness, compression, npoints) = \ pgp.patch_string_buff_to_numpy(uncompressed_patch, GD['rc']['schemas'], connection_string) #pt_arr, (mschema,endianness, compression, npoints) = pgp.patch_string_buff_to_numpy(uncompressed_patch, temp_schema, []) numpy_double, mschema = pgp.patch_numpy_to_numpy_double( \ pt_arr[ restrict_dim], mschema,use_scale_offset=True,dim_to_use=restrict_dim) ########### #warning: to be removed ! @TODO #numpy_double[:,0] = numpy_double[:,0] + 649000 #numpy_double[:,1] = numpy_double[:,1] + 6840000 ########### #computing descriptors descriptors = compute_descriptors_from_points(numpy_double) return descriptors
def reordering_patch_following_midoc(uncompressed_patch, tot_level, stop_level, connection_string): """ main function : reorder patch following midoc ordering""" import pg_pointcloud_classes as pgp import midoc_ordering as midoc ################# only for test temp_schema = dict() temp_schema["1"] = artificial_schema() ################# #convert patch to numpy array GD = pgp.create_GD_if_not_exists() #cache mecanism for patch schema if 'rc' not in GD: # creating the rc dict if necessary GD['rc'] = dict() if 'schemas' not in GD['rc']: # creating the schemas dict if necessary GD['rc']['schemas'] = dict() restrict_dim = ["x", "y", "z"] pt_arr, (mschema,endianness, compression, npoints) = \ pgp.patch_string_buff_to_numpy(uncompressed_patch, GD['rc']['schemas'], connection_string) #pt_arr, (mschema,endianness, compression, npoints) = pgp.patch_string_buff_to_numpy(uncompressed_patch, temp_schema, []) numpy_double, mschema = pgp.patch_numpy_to_numpy_double( pt_arr[restrict_dim], mschema, use_scale_offset=True, dim_to_use=restrict_dim) ###################### # WARNING DEBUG # to be removed # numpy_double [:,0] = numpy_double [:,0] #+ 649000 #numpy_double [:,1] = numpy_double [:,1] + 6840000 ###################### #keep only the relevant dimensions pt_xyz = numpy_double num_points = npoints #compute midoc ordering result = midoc.order_by_octree(pt_xyz, tot_level, stop_level) result_completed = midoc.complete_and_shuffle_result(result, num_points) pt_per_class = midoc.count_points_per_class(result, stop_level) #transfer ordering to full points reordered_arr = pt_arr[result_completed[:, 0].astype('int32')] #create new patch wkb_ordered_patch = pgp.numpy_double_to_WKB_patch(reordered_arr, mschema) return wkb_ordered_patch, pt_per_class
def reordering_patch_following_midoc(uncompressed_patch, tot_level, stop_level, connection_string): """ main function : reorder patch following midoc ordering""" import pg_pointcloud_classes as pgp import midoc_ordering as midoc ################# only for test temp_schema = dict() temp_schema["1"]= artificial_schema() ################# #convert patch to numpy array GD = pgp.create_GD_if_not_exists() #cache mecanism for patch schema if 'rc' not in GD: # creating the rc dict if necessary GD['rc'] = dict() if 'schemas' not in GD['rc']: # creating the schemas dict if necessary GD['rc']['schemas'] = dict() restrict_dim = ["x","y","z"] pt_arr, (mschema,endianness, compression, npoints) = \ pgp.patch_string_buff_to_numpy(uncompressed_patch, GD['rc']['schemas'], connection_string) #pt_arr, (mschema,endianness, compression, npoints) = pgp.patch_string_buff_to_numpy(uncompressed_patch, temp_schema, []) numpy_double, mschema = pgp.patch_numpy_to_numpy_double(pt_arr[ restrict_dim], mschema,use_scale_offset=True,dim_to_use=restrict_dim) ###################### # WARNING DEBUG # to be removed # numpy_double [:,0] = numpy_double [:,0] #+ 649000 #numpy_double [:,1] = numpy_double [:,1] + 6840000 ###################### #keep only the relevant dimensions pt_xyz = numpy_double num_points = npoints #compute midoc ordering result = midoc.order_by_octree(pt_xyz, tot_level, stop_level) result_completed = midoc.complete_and_shuffle_result(result, num_points) pt_per_class = midoc.count_points_per_class(result, stop_level) #transfer ordering to full points reordered_arr = pt_arr[result_completed[:,0].astype('int32')] #create new patch wkb_ordered_patch = pgp.numpy_double_to_WKB_patch(reordered_arr, mschema) return wkb_ordered_patch, pt_per_class
def writing_pgpatch_to_base(spec_array,schema,conn, cur, writing_query,file_name, schemas, additional_offset=None): """ this function writes pgpatch one by one in a given table""" import pg_pointcloud_classes as pgp import psycopg2 import sys import numpy as np connection_string = """host=localhost dbname=test_pointcloud user=postgres password=postgres port=5433""" #shitty move, casting back to numpy double should not be necessary ! numpy_double, schema = pgp.patch_numpy_to_numpy_double(np.array(spec_array), schema,use_scale_offset=False) numpy_double = np.array(numpy_double) if additional_offset != None: numpy_double[:] += np.array(additional_offset) pgpatch = pgp.numpy_double_to_WKB_patch(np.array(numpy_double), schema) cur.execute(writing_query,(file_name,pgpatch,) ) conn.commit()