def matrix_regression_data_prepare(**kwargs): bufferCollection = buffers.BufferCollection() bufferCollection.AddBuffer(buffers.X_FLOAT_DATA, buffers.as_matrix_buffer(kwargs['x'])) if 'y' in kwargs: bufferCollection.AddBuffer(buffers.YS, buffers.as_matrix_buffer(kwargs['y'])) return bufferCollection
def construct_axis_aligned_forest(self): # (0) X[0] > 2.2 # / \ # (1) X[1] > -5 (2) [0.7 0.1 0.2] # / \ # (3) [0.3 0.3 0.4] (4) [0.3 0.6 0.1] # # (0) X[0] > 5.0 # / \ # (1) X[0] > 2.5 (2) [0.8 0.1 0.1] # / \ # (3) [0.2 0.2 0.6] (4) [0.2 0.7 0.1] path_1 = buffers.as_matrix_buffer(np.array([[1,2],[3,4],[-1,-1],[-1,-1],[-1,-1]], dtype=np.int32)) int_params_1 = buffers.as_matrix_buffer(np.array([[1,0],[1,1],[1,0],[1,0],[1,0]], dtype=np.int32)) float_params_1 = buffers.as_matrix_buffer(np.array([[2.2],[-5],[0],[0],[0]], dtype=np.float32)) ys_1 = buffers.as_matrix_buffer(np.array([[0,0,0],[0,0,0],[0.7,0.1,0.2],[0.3,0.3,0.4],[0.3,0.6,0.1]], dtype=np.float32)) depth_1 = buffers.as_vector_buffer(np.array([0, 1, 1, 2, 2], dtype=np.int32)) counts_1 = buffers.as_vector_buffer(np.array([5, 5, 5, 5, 5], dtype=np.float32)) tree_1 = forest_data.Tree(path_1, int_params_1, float_params_1, depth_1, counts_1, ys_1) path_2 = buffers.as_matrix_buffer(np.array([[1,2],[3,4],[-1,-1],[-1,-1],[-1,-1]], dtype=np.int32)) int_params_2 = buffers.as_matrix_buffer(np.array([[1,0],[1,0],[1,0],[1,0],[1,0]], dtype=np.int32)) float_params_2 = buffers.as_matrix_buffer(np.array([[5.0],[2.5],[0],[0],[0]], dtype=np.float32)) ys_2 = buffers.as_matrix_buffer(np.array([[0,0,0],[0,0,0],[0.8,0.1,0.1],[0.2,0.2,0.6],[0.2,0.7,0.1]], dtype=np.float32)) depth_2 = buffers.as_vector_buffer(np.array([0, 1, 1, 2, 2], dtype=np.int32)) counts_2 = buffers.as_vector_buffer(np.array([5, 5, 5, 5, 5], dtype=np.float32)) tree_2 = forest_data.Tree(path_2, int_params_2, float_params_2, depth_2, counts_2, ys_2) forest = forest_data.Forest([tree_1, tree_2]) return forest
def classify_body_pixels(depth, ground_labels, forest): assert(depth.ndim == 2) # setup test data pixel_indices = to_indices(0, np.where(ground_labels != background)) buffer_collection = buffers.BufferCollection() buffer_collection.AddInt32MatrixBuffer(buffers.PIXEL_INDICES, buffers.as_matrix_buffer(pixel_indices)) buffer_collection.AddFloat32Tensor3Buffer(buffers.DEPTH_IMAGES, buffers.as_tensor_buffer(depth)) # setup predictor all_samples_step = pipeline.AllSamplesStep_i32f32i32(buffers.PIXEL_INDICES) depth_delta_feature = image_features.ScaledDepthDeltaFeature_f32i32(all_samples_step.IndicesBufferId, buffers.PIXEL_INDICES, buffers.DEPTH_IMAGES) combiner = classification.ClassProbabilityCombiner_f32(number_of_body_parts) forest_predictor = predict.ScaledDepthDeltaClassificationPredictin_f32i32(forest, depth_delta_feature, combiner, all_samples_step) # predict yprobs_buffer = buffers.Float32MatrixBuffer() forest_predictor.PredictYs(buffer_collection, yprobs_buffer) # convert to image space yprobs = buffers.as_numpy_array(yprobs_buffer) (_, ydim) = yprobs.shape m,n = depth.shape img_yprobs = np.zeros((m,n), dtype=np.float32) img_yprobs[ground_labels != background].shape yprobs.max(axis=1).shape img_yprobs[ground_labels != background] = yprobs.max(axis=1) img_yhat = np.zeros((m,n), dtype=np.int32) img_yhat[ground_labels != background] = np.argmax(yprobs, axis=1) return img_yhat, img_yprobs
def classify_pixels(depth, forest): assert(depth.ndim == 2) # setup test data pixel_indices = np.array( list(itertools.product( np.zeros(1), range(m), range(n) )), dtype=np.int32 ) buffer_collection = buffers.BufferCollection() buffer_collection.AddInt32MatrixBuffer(buffers.PIXEL_INDICES, buffers.as_matrix_buffer(pixel_indices)) buffer_collection.AddFloat32Tensor3Buffer(buffers.DEPTH_IMAGES, buffers.as_tensor_buffer(depth)) # setup predictor all_samples_step = pipeline.AllSamplesStep_i32f32i32(buffers.PIXEL_INDICES) depth_delta_feature = image_features.ScaledDepthDeltaFeature_f32i32(all_samples_step.IndicesBufferId, buffers.PIXEL_INDICES, buffers.DEPTH_IMAGES) combiner = classification.ClassProbabilityCombiner_f32(number_of_body_parts) forest_predictor = predict.ScaledDepthDeltaClassificationPredictin_f32i32(forest, depth_delta_feature, combiner, all_samples_step) # predict yprobs_buffer = buffers.Float32MatrixBuffer() forest_predictor.PredictYs(buffer_collection, yprobs_buffer) # convert to image space yprobs = buffers.as_numpy_array(yprobs_buffer) (_, ydim) = yprobs.shape img_yprobs = yprobs.reshape((m,n,ydim)) img_yhat = np.argmax(img_yprobs, axis=2) return img_yhat, img_yprobs.max(axis=2)
def test_pickle(self): # test tensor for dtype in [np.float32, np.float64, np.int32, np.int64]: array = np.array([[[3, 21, 1], [22, 1, 5]], [[2, 2, 2], [7, 7, 7]]], dtype=dtype) b1 = buffers.as_tensor_buffer(array) pickle.dump(b1, open("tmp.pkl", "wb")) b2 = pickle.load(open("tmp.pkl", "rb")) array2 = buffers.as_numpy_array(b2) self.assertTrue((array == array2).all()) # test matrix for dtype in [np.float32, np.float64, np.int32, np.int64]: array = np.array([[3, 21, 1], [22, 1, 5], [2, 2, 2], [7, 7, 7]], dtype=dtype) b1 = buffers.as_matrix_buffer(array) pickle.dump(b1, open("tmp.pkl", "wb")) b2 = pickle.load(open("tmp.pkl", "rb")) array2 = buffers.as_numpy_array(b2) self.assertTrue((array == array2).all()) # test vector for dtype in [np.float32, np.float64, np.int32, np.int64]: array = np.array([3, 21, 1], dtype=dtype) b1 = buffers.as_vector_buffer(array) pickle.dump(b1, open("tmp.pkl", "wb")) b2 = pickle.load(open("tmp.pkl", "rb")) array2 = buffers.as_numpy_array(b2) self.assertTrue((array == array2).all())
def test_pickle(self): # test tensor for dtype in [np.float32, np.float64, np.int32, np.int64]: array = np.array( [[[3, 21, 1], [22, 1, 5]], [[2, 2, 2], [7, 7, 7]]], dtype=dtype) b1 = buffers.as_tensor_buffer(array) pickle.dump(b1, open('tmp.pkl', 'wb')) b2 = pickle.load(open('tmp.pkl', 'rb')) array2 = buffers.as_numpy_array(b2) self.assertTrue((array == array2).all()) # test matrix for dtype in [np.float32, np.float64, np.int32, np.int64]: array = np.array([[3, 21, 1], [22, 1, 5], [2, 2, 2], [7, 7, 7]], dtype=dtype) b1 = buffers.as_matrix_buffer(array) pickle.dump(b1, open('tmp.pkl', 'wb')) b2 = pickle.load(open('tmp.pkl', 'rb')) array2 = buffers.as_numpy_array(b2) self.assertTrue((array == array2).all()) # test vector for dtype in [np.float32, np.float64, np.int32, np.int64]: array = np.array([3, 21, 1], dtype=dtype) b1 = buffers.as_vector_buffer(array) pickle.dump(b1, open('tmp.pkl', 'wb')) b2 = pickle.load(open('tmp.pkl', 'rb')) array2 = buffers.as_numpy_array(b2) self.assertTrue((array == array2).all())
def matrix_buffer_flatten_helper(self, X, buffer_type): n = len(X.shape) matrix_buffer = buffers.as_matrix_buffer(X) assert isinstance(matrix_buffer, buffer_type) X_back = buffers.as_numpy_array(matrix_buffer) self.assertTrue((X == X_back.flatten()).all()) X_back = buffers.as_numpy_array(buffer=matrix_buffer, flatten=True) self.assertTrue((X == X_back).all())
def construct_axis_aligned_forest(self): # (0) X[0] > 2.2 # / \ # (1) X[1] > -5 (2) [0.7 0.1 0.2] # / \ # (3) [0.3 0.3 0.4] (4) [0.3 0.6 0.1] # # (0) X[0] > 5.0 # / \ # (1) X[0] > 2.5 (2) [0.8 0.1 0.1] # / \ # (3) [0.2 0.2 0.6] (4) [0.2 0.7 0.1] path_1 = buffers.as_matrix_buffer( np.array([[1, 2], [3, 4], [-1, -1], [-1, -1], [-1, -1]], dtype=np.int32)) int_params_1 = buffers.as_matrix_buffer( np.array([[1, 0], [1, 1], [1, 0], [1, 0], [1, 0]], dtype=np.int32)) float_params_1 = buffers.as_matrix_buffer( np.array([[2.2], [-5], [0], [0], [0]], dtype=np.float32)) ys_1 = buffers.as_matrix_buffer( np.array([[0, 0, 0], [0, 0, 0], [0.7, 0.1, 0.2], [0.3, 0.3, 0.4], [0.3, 0.6, 0.1]], dtype=np.float32)) depth_1 = buffers.as_vector_buffer( np.array([0, 1, 1, 2, 2], dtype=np.int32)) counts_1 = buffers.as_vector_buffer( np.array([5, 5, 5, 5, 5], dtype=np.float32)) tree_1 = forest_data.Tree(path_1, int_params_1, float_params_1, depth_1, counts_1, ys_1) path_2 = buffers.as_matrix_buffer( np.array([[1, 2], [3, 4], [-1, -1], [-1, -1], [-1, -1]], dtype=np.int32)) int_params_2 = buffers.as_matrix_buffer( np.array([[1, 0], [1, 0], [1, 0], [1, 0], [1, 0]], dtype=np.int32)) float_params_2 = buffers.as_matrix_buffer( np.array([[5.0], [2.5], [0], [0], [0]], dtype=np.float32)) ys_2 = buffers.as_matrix_buffer( np.array([[0, 0, 0], [0, 0, 0], [0.8, 0.1, 0.1], [0.2, 0.2, 0.6], [0.2, 0.7, 0.1]], dtype=np.float32)) depth_2 = buffers.as_vector_buffer( np.array([0, 1, 1, 2, 2], dtype=np.int32)) counts_2 = buffers.as_vector_buffer( np.array([5, 5, 5, 5, 5], dtype=np.float32)) tree_2 = forest_data.Tree(path_2, int_params_2, float_params_2, depth_2, counts_2, ys_2) tree_2.GetExtraInfo().AddBuffer( "first", np.array([3, 21, 1, 22, 1, 5], dtype=np.float32)) forest = forest_data.Forest([tree_1, tree_2]) return forest
def load_and_sample(pose_path, list_of_poses, number_of_pixels_per_image): depths, labels = load_data(pose_path, list_of_poses) depths_buffer = buffers.as_tensor_buffer(depths) del depths pixel_indices, pixel_labels = sample_pixels_from_images(labels, number_of_pixels_per_image) del labels pixel_indices_buffer = buffers.as_matrix_buffer(pixel_indices) pixel_labels_buffer = buffers.as_vector_buffer(pixel_labels) del pixel_indices del pixel_labels return depths_buffer, pixel_indices_buffer, pixel_labels_buffer
def load_training_data(numpy_filename): f = open(numpy_filename, 'rb') depths = np.load(f) labels = np.load(f) pixel_indices = np.load(f) pixel_labels = np.load(f) depths_buffer = buffers.as_tensor_buffer(depths) del depths del labels pixel_indices_buffer = buffers.as_matrix_buffer(pixel_indices) del pixel_indices pixel_labels_buffer = buffers.as_vector_buffer(pixel_labels) del pixel_labels return depths_buffer, pixel_indices_buffer, pixel_labels_buffer
def predict_proba(self, x): buffer_collection = buffers.BufferCollection() buffer_collection.AddFloat32MatrixBuffer(buffers.X_FLOAT_DATA, buffers.as_matrix_buffer(x)) number_of_classes = self.forest_data.GetTree(0).GetYs().GetN() all_samples_step = pipeline.AllSamplesStep_f32f32i32(buffers.X_FLOAT_DATA) combiner = classification.ClassProbabilityCombiner_f32(number_of_classes) matrix_feature = matrix_features.LinearFloat32MatrixFeature_f32i32(all_samples_step.IndicesBufferId, buffers.X_FLOAT_DATA) forest_predicter = predict.LinearMatrixClassificationPredictin_f32i32(forest_data, matrix_feature, combiner, all_samples_step) result = buffers.Float32MatrixBuffer() forest_predicter.PredictYs(bufferCollection, result) return buffers.as_numpy_array(result)
forestStats = predictor.get_forest().GetForestStats() forestStats.Print() # For the rest of the passes use all of the data start_index = 0 end_index = clipped_list_of_sample_counts[-1] for pass_id in range(1, args.number_of_passes_through_data): # Randomize the order perm = buffers.as_vector_buffer(np.array(np.random.permutation(pixel_labels_buffer.GetN()), dtype=np.int32)) pixel_indices_buffer = pixel_indices_buffer.Slice(perm) pixel_labels_buffer = pixel_labels_buffer.Slice(perm) # Randomly offset scales number_of_datapoints = pixel_indices_buffer.GetM() offset_scales = np.array(np.random.uniform(0.8, 1.2, (number_of_datapoints, 2)), dtype=np.float32) offset_scales_buffer = buffers.as_matrix_buffer(offset_scales) predictor = forest_learner.fit(depth_images=depths_buffer, pixel_indices=pixel_indices_buffer, offset_scales=offset_scales_buffer, classes=pixel_labels_buffer) #pickle forest and data used for training forest_pickle_filename = "%s/forest-%d-%d.pkl" % (online_run_folder, pass_id, end_index) pickle.dump(predictor.get_forest(), gzip.open(forest_pickle_filename, 'wb')) # Print forest stats forestStats = predictor.get_forest().GetForestStats() forestStats.Print()
def convert_matrix_buffer_both_directions_helper(self, X, buffer_type): buf = buffers.as_matrix_buffer(X) assert isinstance(buf, buffer_type) X_back = buffers.as_numpy_array(buf) self.assertTrue((X == X_back).all())
print "Processing %d - %d - %s - %s" % (pass_id, i, pose_filename, str(datetime.now())) # Load single pose depth and class labels depths = kinect_utils.load_depth_from_exr("%s%s.exr" % (args.pose_files_input_path, pose_filename)) labels = kinect_utils.load_labels_from_png("%s%s.png" % (args.pose_files_input_path, pose_filename)) pixel_indices, pixel_labels = kinect_utils.sample_pixels_from_image(labels[0,:,:], config.number_of_pixels_per_image) # Randomly sample pixels and offset scales (number_of_datapoints, _) = pixel_indices.shape offset_scales = np.array(np.random.uniform(0.8, 1.2, (number_of_datapoints, 2)), dtype=np.float32) datapoint_indices = np.array(np.arange(number_of_datapoints), dtype=np.int32) # Package buffers for learner bufferCollection = buffers.BufferCollection() bufferCollection.AddFloat32Tensor3Buffer(buffers.DEPTH_IMAGES, buffers.as_tensor_buffer(depths)) bufferCollection.AddFloat32MatrixBuffer(buffers.OFFSET_SCALES, buffers.as_matrix_buffer(offset_scales)) bufferCollection.AddInt32MatrixBuffer(buffers.PIXEL_INDICES, buffers.as_matrix_buffer(pixel_indices)) bufferCollection.AddInt32VectorBuffer(buffers.CLASS_LABELS, buffers.as_vector_buffer(pixel_labels)) # Update learner online_learner.Train(bufferCollection, buffers.Int32Vector(datapoint_indices)) #pickle forest and data used for training if (i+1) % 1 == 0: # if True: forest_pickle_filename = "%s/forest-%d-%d.pkl" % (online_run_folder, pass_id, i+1) pickle.dump(online_learner.GetForest(), gzip.open(forest_pickle_filename, 'wb')) # Print forest stats forestStats = online_learner.GetForest().GetForestStats() forestStats.Print()
labels[0, :, :], config.number_of_pixels_per_image) # Randomly sample pixels and offset scales (number_of_datapoints, _) = pixel_indices.shape offset_scales = np.array(np.random.uniform( 0.8, 1.2, (number_of_datapoints, 2)), dtype=np.float32) datapoint_indices = np.array(np.arange(number_of_datapoints), dtype=np.int32) # Package buffers for learner bufferCollection = buffers.BufferCollection() bufferCollection.AddFloat32Tensor3Buffer( buffers.DEPTH_IMAGES, buffers.as_tensor_buffer(depths)) bufferCollection.AddFloat32MatrixBuffer( buffers.OFFSET_SCALES, buffers.as_matrix_buffer(offset_scales)) bufferCollection.AddInt32MatrixBuffer( buffers.PIXEL_INDICES, buffers.as_matrix_buffer(pixel_indices)) bufferCollection.AddInt32VectorBuffer( buffers.CLASS_LABELS, buffers.as_vector_buffer(pixel_labels)) # Update learner online_learner.Train(bufferCollection, buffers.Int32Vector(datapoint_indices)) #pickle forest and data used for training if (i + 1) % 1 == 0: # if True: forest_pickle_filename = "%s/forest-%d-%d.pkl" % ( online_run_folder, pass_id, i + 1) pickle.dump(online_learner.GetForest(),
end_index = clipped_list_of_sample_counts[-1] for pass_id in range(1, args.number_of_passes_through_data): # Randomize the order perm = buffers.as_vector_buffer( np.array(np.random.permutation(pixel_labels_buffer.GetN()), dtype=np.int32)) pixel_indices_buffer = pixel_indices_buffer.Slice(perm) pixel_labels_buffer = pixel_labels_buffer.Slice(perm) # Randomly offset scales number_of_datapoints = pixel_indices_buffer.GetM() offset_scales = np.array(np.random.uniform(0.8, 1.2, (number_of_datapoints, 2)), dtype=np.float32) offset_scales_buffer = buffers.as_matrix_buffer(offset_scales) predictor = forest_learner.fit(depth_images=depths_buffer, pixel_indices=pixel_indices_buffer, offset_scales=offset_scales_buffer, classes=pixel_labels_buffer) #pickle forest and data used for training forest_pickle_filename = "%s/forest-%d-%d.pkl" % (online_run_folder, pass_id, end_index) pickle.dump(predictor.get_forest(), gzip.open(forest_pickle_filename, 'wb')) # Print forest stats forestStats = predictor.get_forest().GetForestStats() forestStats.Print()