def __getitem__(self, index): refine_path, input_path, gt_path, i, j, k, f, mod, seq = self.random_data_paths[ index] fp = open(refine_path, 'rb') refine = binvox_rw.read_as_3d_array(fp, fix_coords=False).data refine = refine.astype('float32') fp = open(input_path, 'rb') input = binvox_rw.read_as_3d_array(fp, fix_coords=False).data input = input.astype('float32') # read shapenet alignent ground truth fp = open(gt_path, 'rb') voxel_data = binvox_rw.read_as_3d_array(fp, fix_coords=False).data gt = np.zeros((2, 128, 128, 128), dtype='float32') gt[0, :, :, :] = voxel_data < 1 gt[1, :, :, :] = voxel_data refine = refine[i * 32:(i + 1) * 32, j * 32:(j + 1) * 32, k * 32:(k + 1) * 32] input = input[i * 64:(i + 1) * 64, j * 64:(j + 1) * 64, k * 64:(k + 1) * 64] gt = gt[:, i * 64:(i + 1) * 64, j * 64:(j + 1) * 64, k * 64:(k + 1) * 64] refine = torch.from_numpy(refine[None, :, :, :]).type( torch.FloatTensor) input = torch.from_numpy(input[None, :, :, :]).type(torch.FloatTensor) gt = torch.from_numpy(gt).type(torch.FloatTensor) return refine, input, gt, i, j, k, f, mod, seq
def read_coords_from_binvox(fixed_points_file, random_points_file, edge_file, offset=0, f_all=True, half=False): with open(fixed_points_file, 'rb') as f, open(random_points_file, 'rb') as r: fixed = read_as_3d_array(f) random = read_as_3d_array(r) f_coords = np.transpose(dense_to_sparse(fixed.data), (1, 0)).copy() r_coords = np.transpose(dense_to_sparse(random.data), (1, 0)).copy() if half: f_idx = [] for i in range(len(f_coords)): if f_coords[i][0] < 256 and np.random.random() < 0.3: f_idx.append(i) r_idx = [] for i in range(len(r_coords)): if r_coords[i][0] < 256 and np.random.random() < 0.03: r_idx.append(i) elif not f_all: f_idx = np.random.choice(f_coords.shape[0], 900, replace=False) r_idx = np.random.choice(r_coords.shape[0], 300, replace=False) edge_list = np.load(edge_file) + offset if f_all: return f_coords, r_coords, edge_list new_edge_list = [] for e in edge_list: if e[0] in f_idx and e[1] in f_idx: new_edge_list.append([ np.where(f_idx == e[0])[0][0], np.where(f_idx == e[1])[0][0] ]) return f_coords[f_idx], r_coords[r_idx], new_edge_list
def demo(): ''' demo on how to use this class. ''' import binvox_rw # Constructor sc = Shape_complete(verbose=True) # Read demo binvox as (64*64*64) array with open('demo/occupy.binvox', 'rb') as f: occ = binvox_rw.read_as_3d_array(f).data with open('demo/non_occupy.binvox', 'rb') as f: non = binvox_rw.read_as_3d_array(f).data # Complete shape out = sc.complete(occ=occ, non=non, verbose=False) # Thresholding. Threshold sets to be 0.5 th = 0.5 out[out >= th] = 1 out[out < th] = 0 # Save to file for demo vox = binvox_rw.Voxels(out, [64, 64, 64], [0, 0, 0], 1, 'xyz') with open('demo/output.binvox', 'wb') as f: vox.write(f) print('Output saved to demo/output.binvox.') print( 'Please use ./viewvox demo/output.binvox to visualize the result.')
def get_batch(self): if self.idx >= len(self.data_paths): self.idx = 0 refine_path, input_path, gt_path, f, mod, seq = self.data_paths[self.idx] fp = open(refine_path, 'rb') refine = binvox_rw.read_as_3d_array(fp, fix_coords=False).data refine = refine.astype('float32') fp = open(input_path, 'rb') input = binvox_rw.read_as_3d_array(fp, fix_coords=False).data input = input.astype('float32') fp = open(gt_path, 'rb') voxel_data = binvox_rw.read_as_3d_array(fp, fix_coords=False).data gt = np.zeros((128, 128, 128), dtype='int64') gt[:] = voxel_data refine_batch = [] input_batch = [] gt_batch = [] for i in range(2): for j in range(2): for k in range(2): refine_batch.append(refine[None, None, i * 32:(i + 1) * 32, j * 32:(j + 1) * 32, k * 32:(k + 1) * 32]) input_batch.append(input[None, None, i * 64:(i + 1) * 64, j* 64:(j + 1) * 64, k * 64:(k + 1) * 64]) gt_batch.append(gt[None, i * 64:(i + 1) * 64, j * 64:(j + 1) * 64, k * 64:(k + 1) * 64]) refine_batch = np.concatenate(refine_batch, axis=0) input_batch = np.concatenate(input_batch, axis=0) gt_batch = np.concatenate(gt_batch, axis=0) refine_batch = torch.from_numpy(refine_batch).type(torch.FloatTensor) input_batch = torch.from_numpy(input_batch).type(torch.FloatTensor) gt_batch = torch.from_numpy(gt_batch).type(torch.LongTensor) self.idx += 1 return refine_batch, input_batch, gt_batch, f, mod, seq
def demo(): ''' Publish sample data to ROS ''' global base_path base_path = rospkg.RosPack().get_path('mps_shape_completion') # Read demo binvox as (64*64*64) array with open(base_path + '/demo/occupy.binvox', 'rb') as f: occ = binvox_rw.read_as_3d_array(f).data with open(base_path + '/demo/non_occupy.binvox', 'rb') as f: non = binvox_rw.read_as_3d_array(f).data rospy.init_node('shape_demo_loader') rospy.wait_for_service('complete_shape') pub = rospy.Publisher('local_occupancy', numpy_msg(ByteMultiArray), queue_size=10) rospy.Subscriber("local_occupancy_predicted", numpy_msg(ByteMultiArray), callback) time.sleep(1) print("Requesting shape completion") pub.publish(vox_to_msg(occ)) rospy.spin()
def read_voxel(self): ''' Reads in a triangulated 3D model file (.obj, .stl, etc.), rasterizes it using 'binvox', and saves the data as 3D numpy array of 0's and 1's ''' if len(self.filename) != 0: binvox = self.filename[:self.filename.rfind('.')] + '.binvox' if not os.path.isfile(binvox): subprocess.call("./binvox -d "+ str(self.size) + " " + self.filename, shell = True) fid = open(binvox, 'r') model = binvox_rw.read_as_3d_array(fid) if model.dims[0] != self.size: os.remove(binvox) subprocess.call("./binvox -d "+ str(self.size) + " " + self.filename, shell = True) fid = open(binvox, 'r') model = binvox_rw.read_as_3d_array(fid) self.voxel = 1*model.data if self.scale != 1: self.pad_voxel([self.resolution] * 3)
def evaluate_instance(file_name, gtfolder=None): if 'imview' in file_name or '_1' in file_name: return [0] prefl = file_name names = prefl.split('/') cat = names[-3] md5 = names[-2] gtfl = '%s/%s/%s/model-0.45.binvox' % (gtfolder, cat, md5) try: with open(prefl, 'rb') as f: data = binvox_rw.read_as_3d_array(f) with open(gtfl, 'rb') as f: data2 = binvox_rw.read_as_3d_array(f) except: print('Error in read data') print(prefl) print(gtfl) return [0] iouall = data.data | data2.data iouoverlap = data.data & data2.data iouthis = np.sum(iouoverlap) / (np.sum(iouall) + 1e-8) iouthisgt = np.sum(iouoverlap) / (np.sum(data2.data) + 1e-8) iouthispre = np.sum(iouoverlap) / (np.sum(data.data) + 1e-8) return [cat, iouthis, iouthisgt, iouthispre]
def demo(): ''' Publish sample data to ROS ''' # Read demo binvox as (64*64*64) array with open('demo/occupy.binvox', 'rb') as f: occ = binvox_rw.read_as_3d_array(f).data with open('demo/non_occupy.binvox', 'rb') as f: non = binvox_rw.read_as_3d_array(f).data msg = ByteMultiArray() msg.data = (occ.astype(int) - non.astype(int)).flatten().tolist() msg.layout.dim.append( MultiArrayDimension(label='x', size=DIM, stride=DIM * DIM * DIM)) msg.layout.dim.append( MultiArrayDimension(label='y', size=DIM, stride=DIM * DIM)) msg.layout.dim.append(MultiArrayDimension(label='z', size=DIM, stride=DIM)) rospy.init_node('shape_demo_loader') pub = rospy.Publisher('local_occupancy', numpy_msg(ByteMultiArray), queue_size=10) rospy.Subscriber("local_occupancy_predicted", numpy_msg(ByteMultiArray), callback) time.sleep(5) pub.publish(msg) rospy.spin()
def __getitem__(self, index): rgb_path, input32_path, gt32_path, input64_path, gt64_path, f, mod, seq = self.data_paths[index] if self.train: im = Image.open(rgb_path) im = self.dataAugmentation(im) # random crop else: im = Image.open(rgb_path) im = self.validating(im) # center crop data = self.transforms(im) # scale data = data[:3, :, :] fp = open(input32_path, 'rb') input32 = binvox_rw.read_as_3d_array(fp, fix_coords=False).data input32 = input32.astype('float32') input32 = torch.from_numpy(input32[None, :, :, :]).type(torch.FloatTensor) fp = open(gt32_path, 'rb') voxel_data = binvox_rw.read_as_3d_array(fp, fix_coords=False).data gt32 = np.zeros((32, 32, 32), dtype='int64') gt32[:] = voxel_data gt32 = torch.from_numpy(gt32).type(torch.LongTensor) fp = open(input64_path, 'rb') input64 = binvox_rw.read_as_3d_array(fp, fix_coords=False).data input64 = input64.astype('float32') input64 = torch.from_numpy(input64[None, :, :, :]).type(torch.FloatTensor) fp = open(gt64_path, 'rb') voxel_data = binvox_rw.read_as_3d_array(fp, fix_coords=False).data gt64 = np.zeros((64, 64, 64), dtype='int64') gt64[:] = voxel_data gt64 = torch.from_numpy(gt64).type(torch.LongTensor) return data, input32, gt32, input64, gt64, f, mod, seq
def jaccard_similarity(mesh_filepath0, mesh_filepath1, grid_size=40, exact=True): temp_mesh0_filepath = tempfile.mktemp(suffix=".ply") temp_mesh1_filepath = tempfile.mktemp(suffix=".ply") binvox0_filepath = temp_mesh0_filepath.replace(".ply", ".binvox") binvox1_filepath = temp_mesh1_filepath.replace(".ply", ".binvox") os.symlink(os.path.abspath(mesh_filepath0), temp_mesh0_filepath) os.symlink(os.path.abspath(mesh_filepath1), temp_mesh1_filepath) mesh0 = plyfile.PlyData.read(temp_mesh0_filepath) minx, maxx = minmax(mesh0['vertex']['x']) miny, maxy = minmax(mesh0['vertex']['y']) minz, maxz = minmax(mesh0['vertex']['z']) # -d: specify voxel grid size (default 256, max 1024)(no max when using -e) # -e: exact voxelization (any voxel with part of a triangle gets set)(does not use graphics card) # -bb <minx> <miny> <minz> <maxx> <maxy> <maxz>: force a different input model bounding box cmd_base = "binvox -pb " if exact: cmd_base += "-e " cmd_base += "-d " + str(grid_size) + " -bb " + str(minx) + " " + str( miny) + " " + str(minz) + " " + str(maxx) + " " + str( maxy) + " " + str(maxz) mesh0_cmd = cmd_base + " " + temp_mesh0_filepath mesh1_cmd = cmd_base + " " + temp_mesh1_filepath process = subprocess.Popen(mesh0_cmd.split(" "), stdout=subprocess.PIPE) command1_output, _ = process.communicate() process = subprocess.Popen(mesh1_cmd.split(" "), stdout=subprocess.PIPE) command2_output, _ = process.communicate() with open(binvox0_filepath, 'r') as mesh0_binvox_file: mesh0_binvox = binvox_rw.read_as_3d_array(mesh0_binvox_file) with open(binvox1_filepath, 'r') as mesh1_binvox_file: mesh1_binvox = binvox_rw.read_as_3d_array(mesh1_binvox_file) jaccard = _jaccard_distance(mesh0_binvox.data, mesh1_binvox.data) if os.path.exists(temp_mesh0_filepath): os.remove(temp_mesh0_filepath) if os.path.exists(temp_mesh1_filepath): os.remove(temp_mesh1_filepath) if os.path.exists(binvox0_filepath): os.remove(binvox0_filepath) if os.path.exists(binvox1_filepath): os.remove(binvox1_filepath) return jaccard
def get_model(self, material_name, product_name): ''' Get 3d-Array form of obj. file ''' with open('model_binvox/%s' % material_name, 'rb') as f: material_binvox = binvox_rw.read_as_3d_array(f) material = np.array([material_binvox.data])[0, :, :, :] with open('model_binvox/%s' % product_name, 'rb') as f: product_binvox = binvox_rw.read_as_3d_array(f) product = np.array([product_binvox.data])[0, :, :, :] return material, product
def make_boundary(self, hx): boundary = (hx == -2) all_vox_files = glob.glob('../../Flow-Sculpter/data/train/**/*.binvox') num_file_try = np.random.randint(2, 6) for i in xrange(num_file_try): file_ind = np.random.randint(0, len(all_vox_files)) with open(all_vox_files[file_ind], 'rb') as f: model = binvox_rw.read_as_3d_array(f) model = model.data[:, :, model.dims[2] / 2] model = np.array(model, dtype=np.int) model = np.pad(model, ((1, 1), (1, 1)), 'constant', constant_values=0) floodfill(model, 0, 0) model = np.greater(model, -0.1) pos_x = np.random.randint(1, hx.shape[0] - model.shape[0] - 1) pos_y = np.random.randint(1, hx.shape[1] - model.shape[1] - 1) boundary[pos_x:pos_x + model.shape[0], pos_y:pos_y + model.shape[0]] = model | boundary[pos_x:pos_x + model.shape[0], pos_y:pos_y + model.shape[0]] return boundary
def test(self): models_dir = '/srv/3d_conv_data/ModelNet10' categories = [ d for d in os.listdir(models_dir) if os.path.isdir(os.path.join(models_dir, d)) ] examples = [] subdir = '/' + 'train' + '/' for category in categories: for file_name in os.listdir(models_dir + '/' + category + subdir): if ".binvox" in file_name: examples.append(models_dir + '/' + category + subdir + file_name) subdir = '/' + 'test' + '/' for category in categories: for file_name in os.listdir(models_dir + '/' + category + subdir): if ".binvox" in file_name: examples.append(models_dir + '/' + category + subdir + file_name) for example in examples: with open(example, 'rb') as f: model = binvox_rw.read_as_3d_array(f).data if model.max() == 0: print example
def binvox_to_step(binvox_file, voxel_length, voxel_width, voxel_height, application_protocol="AP203"): """function used to change binvox file to step file binvox_file: the binvox file ('chair.binvox' etc.) voxel_length: the length of one voxel voxel_width: the width of one voxel voxel_height: the height of one voxel application protocol: "AP203" or "AP214IS" or "AP242DIS" """ with open(binvox_file, 'rb') as f: model = binvox_rw.read_as_3d_array(f) voxel = voxel_to_TopoDS(model, voxel_length, voxel_width, voxel_height) # initialize the STEP exporter step_writer = STEPControl_Writer() Interface_Static_SetCVal("write.step.schema", application_protocol) # transfer shapes and write file step_writer.Transfer(voxel, STEPControl_AsIs) status = step_writer.Write(binvox_file[:-6] + "stp") if status != IFSelect_RetDone: raise AssertionError("load failed")
def load_single_Y_vox(vox_path): with open(vox_path, 'rb') as ff: vox = binvox_rw.read_as_3d_array(ff) vox_grid = vox.data.astype(int) #Data.plotFromVoxels(vox_grid) return vox_grid
def main(args): input_dir = args.input_dir output_dir = args.output_dir log_dir = args.log_dir error_log = os.path.join(log_dir, "error.txt") target_dim = 64 allf = os.listdir(input_dir) start = time.time() for i, f in enumerate(allf): if (i % 1000 == 0): print("Now checking the {0}-th file. Elapsed time {1:.2f} min.". format(i, (time.time() - start) / 60)) with open(os.path.join(input_dir, f), "rb") as b: model = binvox_rw.read_as_3d_array(b) volume = np.asarray(model.data * 1, dtype=np.float32) # print("the shape of volume") # print(volume.shape) if np.sum(volume) == 0: print("Empty voxel! Logging...") log_error(error_log, f) continue else: newv = dilate(volume, target_dim, 0.5) outpath = os.path.join(output_dir, f.split(".")[0] + ".npy") np.save(outpath, newv) return
def buildclanlogo(self): print("I will build clan logo") print(self.logoname) x0=self.logopos[0] y0=self.logopos[1] z0=self.logopos[2] print(x0,y0,z0) with open(self.logoname, 'rb') as f: model = binvox_rw.read_as_3d_array(f) print(model.dims) print(model.scale) print(model.translate) #print(model.data) for y in range(model.dims[1]): #print("layer y=",y) layer_data=model.data[y] stringlayer="" for x in range(model.dims[0]): stringlayer=stringlayer+"\n" for z in range(model.dims[2]): if model.data[x][z][y] == True: stringlayer=stringlayer+'1' mc.setBlock(x0+x,y0+y,z0+z,89) else: stringlayer=stringlayer+'0' mc.setBlock(x0+x,y0+y,z0+z,block.AIR.id) #print(stringlayer) time.sleep(5)
def save2binvox(reconstructed_volume, data_name1, data_name2): with open(data_name1, "rb") as f: bvx = brw.read_as_3d_array(f) bvx.dims = [256, 256, 256] bvx.data = reconstructed_volume with open(data_name2, "wb") as f: brw.write(bvx, f)
def convert_bin(): for s in wanted_classes: directory = 'data/voxels/' + labels[s] + '/' # find all binvoxes models = glob('data/managable_objects/' + labels[s] + '/*.binvox') if not os.path.exists(directory): os.makedirs(directory) for m in tqdm(models): with open(m, 'rb') as f: try: model = binvox_rw.read_as_3d_array(f).data except ValueError: continue # remove internals from models # I think this makes it easier to learn positions = np.where(model != 0) new_mod = np.zeros(model.shape) for i, j, k in zip(*positions): # identifies if current voxel has an exposed face if np.sum(model[i - 1:i + 2, j - 1:j + 2, k - 1:k + 2]) < 27: new_mod[i, j, k] = 1 # save as np array sio.savemat(directory + m.split('/')[-1][:-7], {'model': new_mod.astype(np.uint8)})
def gen_binary(path): dataset = np.zeros((1, 64 * 64 * 64 + 1)) for file in os.listdir(path): suffix = file.split('.')[-1] if suffix == 'binvox': name = file.split('_') label = np.array([int(name[0])]) print(label) f = open(path + file, 'rb') model = binvox_rw.read_as_3d_array(f) model_num = model.data * 1 model_rot = rotations6(model_num) for rot_dir in model_rot: model_vector = np.reshape(rot_dir, (1, 64 * 64 * 64))[0] data = np.append(label, model_vector) dataset = np.vstack((dataset, data)) # print(np.shape(model_num))# 1/0 # model_vector = np.reshape(model_num,(1,64*64*64))[0] # # data = np.append(label,model_vector) # dataset=np.vstack((dataset,data)) dataset = dataset[1:, :] dataset = np.array(dataset, dtype=np.uint8) # convert float64 to uint8 np.random.shuffle(dataset) # shuffle input return dataset
def __init__(self, mc, binvox_pathname): self.mc = mc with open(binvox_pathname, 'rb') as f: self.model = binvox_rw.read_as_3d_array(f) print(self.model.dims) print(self.model.scale) print(self.model.translate)
def rotate_voxels(rep, angle, fov): a = binvox_rw.read_as_3d_array( open("unprojected_voxels/outline_scale_47.binvox", "rb")) val = a.data val = tf.convert_to_tensor(np.expand_dims(np.expand_dims(val, 0), -1)) voxel.fov = fov phi, theta = angle proj_val = voxel.rotate_voxel(val, phi, theta) num = np.where(proj_val > 0.5)[0] if len(num) > 0: print("found") fovs_working[fov] = len(num) proj_val = np.squeeze(proj_val) proj_val = proj_val > 0.5 proj_imgZ = np.mean(proj_val, 0) imsave( '{}/valRotate_phi_{}_theta_{}_fov_{:04d}_Z.png'.format( rep, phi, theta, fov), proj_imgZ) save_voxel( np.squeeze(proj_val), "{}/valRotate_THETA_{}_PHI_{}_fov_{}_.binvox".format( rep, theta[0], phi[0], fov))
def mylogo(px, py, pz): pos.x = px pos.y = py pos.z = pz with open('mylogo.binvox', 'rb') as f: model = binvox_rw.read_as_3d_array(f) print(model.dims) print(model.scale) print(model.translate) #print(model.data) for y in range(model.dims[1]): print("layer y=", y) layer_data = model.data[y] stringlayer = "" for x in range(model.dims[0]): stringlayer = stringlayer + "\n" for z in range(model.dims[2]): if model.data[x][y][z] == True: stringlayer = stringlayer + '1' mc.setBlock(pos.x + x, pos.y + z, pos.z + y, block.DIAMOND_BLOCK.id) else: stringlayer = stringlayer + '0' mc.setBlock(pos.x + x, pos.y + z, pos.z + y, block.AIR.id) print(stringlayer) # mylogo(pos.x,pos.y,pos.z)
def diaoxiang(self): x0 = self.data[0] y0 = self.data[1] z0 = self.data[2] with open(self.load, 'rb') as f: model = binvox_rw.read_as_3d_array(f) #print(model.dims) #print(model.scale) #print(model.translate) #print(model.data) for y in range(model.dims[1]): print("layer y=", y) layer_data = model.data[y] stringlayer = "" for x in range(model.dims[0]): stringlayer = stringlayer + "\n" for z in range(model.dims[2]): if model.data[x][y][z] == True: stringlayer = stringlayer + '1' mc.setBlock(x0 + x, y0 + 25 + y, z0 + z, block.STONE.id) else: stringlayer = stringlayer + '0' mc.setBlock(x0 + x, y0 + 25 + y, z0 + z, block.AIR.id) print(stringlayer)
def __init__(self, filename='map2.binvox'): with open(filename, 'rb') as f: self.model = binvox_rw.read_as_3d_array(f) # the point in airsim coordinate that generate voxel maps self.center = np.array([0, 0, 0]) # resolution of each voxel self.res = 0.5 # origin of voxel in airsim coordinate voxel_origin_x = -int(self.model.dims[0] / 2) * self.res + self.center[0] voxel_origin_y = -int(self.model.dims[1] / 2) * self.res + self.center[1] voxel_origin_z = -int(self.model.dims[2] / 2) * self.res + self.center[2] self.voxel_origin = (voxel_origin_x, voxel_origin_y, voxel_origin_z) # coordinates of all voxels in airsim coordinate self.coordinates = np.zeros(self.model.dims + [3]) self.tsdf = np.zeros(self.model.dims + [4]) for i in range(self.model.dims[0]): for j in range(self.model.dims[1]): for k in range(self.model.dims[2]): x = (i - int(self.model.dims[0] / 2)) * self.res + self.center[0] y = (j - int(self.model.dims[1] / 2)) * self.res + self.center[1] z = (k - int(self.model.dims[2] / 2)) * self.res + self.center[2] self.coordinates[i, j, k, :] = np.array([x, y, z]) p = multiprocessing.Pool(multiprocessing.cpu_count()) indices = [] for i in range(self.model.dims[0]): for j in range(self.model.dims[1]): for k in range(self.model.dims[2]): indices.append((i, j, k)) p.map(self.calculate_tsdf, indices) p.close() p.join()
def create_voxel_file_list(folderPath, expression, printMessages=True): path = os.getcwd() + "/" + folderPath if (printMessages): print("Loading voxel files from " + path) fileList = [] for i in os.listdir(path): if re.match(expression, i): fileList.append(path + i) assert(len(fileList) > 0) if (printMessages): print("Found " + str(len(fileList)) + " files") modelList = [] for filePath in fileList: with open(filePath, 'rb') as f: geom = binvox_rw.read_as_3d_array(f) # Don't append any empty files. if (geom.data.max() > 0): modelList.append(geom) else: print(filePath + " was empty!!") if (printMessages): print("Done loading voxel files.") return modelList
def _load_binvox(self, path): ''' loads voxels saved in binvox format. see also _load_vox ''' if not os.path.exists(path): raise Exception('path does not exist: ' + path) with open(path, 'rb') as fin: voxels = read_as_3d_array(fin) return torch.from_numpy(voxels.data.astype('uint8'))
def create_occupancy_grid_from_obstacles(obstacles, mins_xyz, step_size_xyz, dims_xyz, use_binvox=False): voxel_grid = np.zeros(shape=dims_xyz) for obstacle in obstacles: if use_binvox: vox = binvox_rw.read_as_3d_array( open(obstacle.mesh_filepath.replace('.ply', '.binvox'), 'r')) vertices = binvox_to_points(vox) else: vertices = read_vertex_points_from_ply_filepath( obstacle.mesh_filepath) frame = tf_conversions.fromMsg(obstacle.pose_stamped.pose) transform = tf_conversions.toMatrix(frame) vertices_transformed = transform_points(vertices, transform) if use_binvox: voxel_grid += add_obstacles_to_reachability_space_full_binvox( vertices_transformed, mins_xyz, step_size_xyz, dims_xyz) else: voxel_grid += add_obstacles_to_reachability_space_full( vertices_transformed, mins_xyz, step_size_xyz, dims_xyz) voxel_grid[np.where(voxel_grid > 0)] = 1 return voxel_grid
def generatevoxels(stlfile): os.system("binvox -cb -pb -d 32 " + str(stlfile)) try: binvoxpath = stlfile.split(".")[0] + ".binvox" with open(binvoxpath, 'rb') as f: model = binvox_rw.read_as_3d_array(f) voxel_array = model.data voxel_array = voxel_array * 1 # convert to zero and one print voxel_array print voxel_array.shape #model[False] = 0 #model[True] = 1 #print voxel_array #print type(voxel_array) #print voxel_array.shape' if render == True: render_in_3d(voxel_array) voxel_array = np.expand_dims(voxel_array, -1) os.system("rm " + str(binvoxpath)) master_array.append(voxel_array) print "Here's how many have been processed: " + str(len(master_array)) if len(master_array) % 500 == 0: outfile = open("data/thingi10k_" + str(len(master_array)) + ".npy", "w") np.save(outfile, np.array(master_array)) except Exception as e: print e
def saveVoxelsBinvox(pose, shape, fileshape): if os.path.exists(fileshape): print('Already exists ' + fileshape) else: dict_shape = {} # Save gt points m = model.copy() m.betas[:] = shape m.pose[:] = pose dict_shape['points'] = m.r dict_shape['J_transformed'] = m.J_transformed.r # Write to .obj file obj_path = fileshape[:-10] + '.obj' smpl_utils.save_smpl_obj(obj_path, m, saveFaces=True) # Voxelize using binvox call([ os.path.join(BINVOX_PATH, "binvox"), "-e", "-fit", "-d", "128", "%s" % obj_path ]) # Read the output of binvox binvox_path = obj_path[:-4] + '.binvox' with open(binvox_path, 'rb') as f: binvoxModel = binvox_rw.read_as_3d_array(f) # Remove intermediate files call(["rm", obj_path]) call(["rm", binvox_path]) # Save binvox results to mat dict_shape['voxels'] = binvoxModel.data dict_shape['voxelsdims'] = binvoxModel.dims dict_shape['voxelstranslate'] = binvoxModel.translate dict_shape['voxelsscale'] = binvoxModel.scale sio.savemat(fileshape, dict_shape, do_compression=True) print('Saved ' + fileshape)
def getVoxelGrid(binvoxPath): binvoxObj = binvox_rw.read_as_3d_array(open(binvoxPath, 'rb')) scale = binvoxObj.scale translate = binvoxObj.translate voxelgrid = binvoxObj.data.astype('float32') dims = binvoxObj.dims return voxelgrid, dims, scale, translate
def fromMesh(self, fileName, resolution=128): # Voxelize the model call('optirun binvox', '-t {}'.format(resolution), fileName) voxelFile = ''.join(os.path.splitext(fileName)[:-1] + ('.binvox',)) with open(voxelFile, 'rb') as f: self.voxelModel = bv.read_as_3d_array(f) self.fromBinaryArray(self.voxelModel.data)
def build_training_example(model_filepath, pose_filepath, single_view_pointcloud_filepath, patch_size): pc = np.load(single_view_pointcloud_filepath) # Point cloud. Shape is (number of points, 4). R,G,B,Color #remove 32 bit color channel pc = pc[:, 0:3] model_pose = np.load(pose_filepath) # 4x4 homogeneous transform matrix with open(model_filepath, 'rb') as f: model = binvox_rw.read_as_3d_array(f) # import IPython # IPython.embed() points = model.data scale = model.scale translate = model.translate dims = model.dims non_zero_points = points.nonzero() #get numpy array of nonzero points num_points = len(non_zero_points[0]) non_zero_arr = np.zeros((4, num_points)) non_zero_arr[0] = non_zero_points[0] non_zero_arr[1] = non_zero_points[1] non_zero_arr[2] = non_zero_points[2] non_zero_arr[3] = 1.0 translate_arr = np.array(translate).reshape(3, 1) non_zero_arr[0:3, :] = non_zero_arr[0:3, :] + translate_arr non_zero_arr[0:3, :] = non_zero_arr[0:3, :] / (scale * 4) #this is needed, to recenter binvox model at origin for some reason #the translate array does not seem to fully compensate. non_zero_arr[2, :] -= .09 #this is an easier task, the y value is always the same. i.e the model standing #up at the origin. #pc2_out, non_zero_arr1 = self.map_pointclouds_to_world(pc, non_zero_arr, model_pose) pc2_out, non_zero_arr1 = map_pointclouds_to_camera_frame(pc, non_zero_arr, model_pose) min_x = pc2_out[0, :].min() min_y = pc2_out[1, :].min() min_z = pc2_out[2, :].min() max_x = pc2_out[0, :].max() max_y = pc2_out[1, :].max() max_z = pc2_out[2, :].max() center = (min_x + (max_x - min_x) / 2.0, min_y + (max_y - min_y) / 2.0, min_z + (max_z - min_z) / 2.0) #now non_zero_arr and pc points are in the same frame of reference. #since the images were captured with the model at the origin #we can just compute an occupancy grid centered around the origin. x = create_voxel_grid_around_point(pc2_out[0:3, :].T, center, voxel_resolution=.02, num_voxels_per_dim=patch_size) y = create_voxel_grid_around_point(non_zero_arr1.T[:, 0:3], center, voxel_resolution=.02, num_voxels_per_dim=patch_size) # viz.visualize_3d(x) # viz.visualize_3d(y) # viz.visualize_pointcloud(pc2_out[0:3, :].T) # viz.visualize_pointclouds(pc2_out.T, non_zero_arr1.T[:, 0:3], False, True) # import IPython # IPython.embed() return x, y
if c==state: ctr += 1 # if ctr hits max, dump if ctr==255: fp.write(chr(state)) fp.write(chr(ctr)) ctr = 0 else: # if switch state, dump fp.write(chr(state)) fp.write(chr(ctr)) state = c ctr = 1 # flush out remainders if ctr > 0: fp.write(chr(state)) fp.write(chr(ctr)) if __name__ == '__main__': #~ import doctest #~ doctest.testmod() import numpy as np import binvox_rw with open('3D Models/761_hand-olivier_2.binvox', 'rb') as f: m1 = binvox_rw.read_as_3d_array(f) print m1.dims print m1.scale print m1.data
def build_training_example(binvox_file_path, model_pose_filepath, single_view_pointcloud_filepath, patch_size): pc = np.load(single_view_pointcloud_filepath) pc = pc[:, 0:3] model_pose = np.load(model_pose_filepath) with open(binvox_file_path, 'rb') as f: model = binvox_rw.read_as_3d_array(f) points = model.data scale = model.scale translate = model.translate dims = model.dims non_zero_points = points.nonzero() #get numpy array of nonzero points num_points = len(non_zero_points[0]) non_zero_arr = np.zeros((4, num_points)) non_zero_arr[0] = non_zero_points[0] non_zero_arr[1] = non_zero_points[1] non_zero_arr[2] = non_zero_points[2] non_zero_arr[3] = 1.0 translate_arr = np.array(translate).reshape(3, 1) #meters to centimeters scale /= 100 #inches to meters scale /= 2.54 non_zero_arr[0:3, :] = non_zero_arr[0:3, :] + translate_arr * 1.0/scale non_zero_arr[0:3, :] = non_zero_arr[0:3, :] * scale #this is an easier task, the y value is always the same. i.e the model standing #up at the origin. #pc2_out, non_zero_arr1 = map_pointclouds_to_world(pc, non_zero_arr, model_pose) pc2_out, non_zero_arr1 = map_pointclouds_to_camera_frame(pc, non_zero_arr, model_pose) min_x = pc2_out[0, :].min() min_y = pc2_out[1, :].min() min_z = pc2_out[2, :].min() max_x = pc2_out[0, :].max() max_y = pc2_out[1, :].max() max_z = pc2_out[2, :].max() center = (min_x + (max_x-min_x)/2.0, min_y + (max_y-min_y)/2.0, min_z + (max_z-min_z)/2.0) # viz.visualize_pointclouds(pc2_out.T, non_zero_arr1.T[:, 0:3], False, True) # import IPython # IPython.embed() #now non_zero_arr and pc points are in the same frame of reference. #since the images were captured with the model at the origin #we can just compute an occupancy grid centered around the origin. x = create_voxel_grid_around_point(pc2_out[0:3, :].T, center, voxel_resolution=.02, num_voxels_per_dim=patch_size) y = create_voxel_grid_around_point(non_zero_arr1.T[:, 0:3], center, voxel_resolution=.02, num_voxels_per_dim=patch_size) return x, y