Пример #1
0
    def test_pickle(self):
        # test tensor
        for dtype in [np.float32, np.float64, np.int32, np.int64]:
            array = np.array(
                [[[3, 21, 1], [22, 1, 5]], [[2, 2, 2], [7, 7, 7]]],
                dtype=dtype)
            b1 = buffers.as_tensor_buffer(array)
            pickle.dump(b1, open('tmp.pkl', 'wb'))
            b2 = pickle.load(open('tmp.pkl', 'rb'))
            array2 = buffers.as_numpy_array(b2)
            self.assertTrue((array == array2).all())

        # test matrix
        for dtype in [np.float32, np.float64, np.int32, np.int64]:
            array = np.array([[3, 21, 1], [22, 1, 5], [2, 2, 2], [7, 7, 7]],
                             dtype=dtype)
            b1 = buffers.as_matrix_buffer(array)
            pickle.dump(b1, open('tmp.pkl', 'wb'))
            b2 = pickle.load(open('tmp.pkl', 'rb'))
            array2 = buffers.as_numpy_array(b2)
            self.assertTrue((array == array2).all())

        # test vector
        for dtype in [np.float32, np.float64, np.int32, np.int64]:
            array = np.array([3, 21, 1], dtype=dtype)
            b1 = buffers.as_vector_buffer(array)
            pickle.dump(b1, open('tmp.pkl', 'wb'))
            b2 = pickle.load(open('tmp.pkl', 'rb'))
            array2 = buffers.as_numpy_array(b2)
            self.assertTrue((array == array2).all())
def classify_pixels(depth, forest):
    assert(depth.ndim == 2)

    # setup test data
    pixel_indices = np.array( list(itertools.product( np.zeros(1), range(m), range(n) )), dtype=np.int32 )
    buffer_collection = buffers.BufferCollection()
    buffer_collection.AddInt32MatrixBuffer(buffers.PIXEL_INDICES, buffers.as_matrix_buffer(pixel_indices))
    buffer_collection.AddFloat32Tensor3Buffer(buffers.DEPTH_IMAGES, buffers.as_tensor_buffer(depth))

    # setup predictor
    all_samples_step = pipeline.AllSamplesStep_i32f32i32(buffers.PIXEL_INDICES)
    depth_delta_feature = image_features.ScaledDepthDeltaFeature_f32i32(all_samples_step.IndicesBufferId,
                                                                        buffers.PIXEL_INDICES,
                                                                        buffers.DEPTH_IMAGES)
    combiner = classification.ClassProbabilityCombiner_f32(number_of_body_parts)
    forest_predictor = predict.ScaledDepthDeltaClassificationPredictin_f32i32(forest, depth_delta_feature, combiner, all_samples_step)

    # predict
    yprobs_buffer = buffers.Float32MatrixBuffer()
    forest_predictor.PredictYs(buffer_collection, yprobs_buffer)

    # convert to image space 
    yprobs = buffers.as_numpy_array(yprobs_buffer)
    (_, ydim) = yprobs.shape
    img_yprobs = yprobs.reshape((m,n,ydim))
    img_yhat = np.argmax(img_yprobs, axis=2)
    return img_yhat, img_yprobs.max(axis=2)
def classify_body_pixels(depth, ground_labels, forest):
    assert(depth.ndim == 2)

    # setup test data
    pixel_indices = to_indices(0, np.where(ground_labels != background))
    buffer_collection = buffers.BufferCollection()
    buffer_collection.AddInt32MatrixBuffer(buffers.PIXEL_INDICES, buffers.as_matrix_buffer(pixel_indices))
    buffer_collection.AddFloat32Tensor3Buffer(buffers.DEPTH_IMAGES, buffers.as_tensor_buffer(depth))

    # setup predictor
    all_samples_step = pipeline.AllSamplesStep_i32f32i32(buffers.PIXEL_INDICES)
    depth_delta_feature = image_features.ScaledDepthDeltaFeature_f32i32(all_samples_step.IndicesBufferId,
                                                                        buffers.PIXEL_INDICES,
                                                                        buffers.DEPTH_IMAGES)
    combiner = classification.ClassProbabilityCombiner_f32(number_of_body_parts)
    forest_predictor = predict.ScaledDepthDeltaClassificationPredictin_f32i32(forest, depth_delta_feature, combiner, all_samples_step)

    # predict
    yprobs_buffer = buffers.Float32MatrixBuffer()
    forest_predictor.PredictYs(buffer_collection, yprobs_buffer)

    # convert to image space 
    yprobs = buffers.as_numpy_array(yprobs_buffer)
    (_, ydim) = yprobs.shape
    m,n = depth.shape
    img_yprobs = np.zeros((m,n), dtype=np.float32)
    img_yprobs[ground_labels != background].shape
    yprobs.max(axis=1).shape
    img_yprobs[ground_labels != background] = yprobs.max(axis=1)
    img_yhat = np.zeros((m,n), dtype=np.int32)
    img_yhat[ground_labels != background] = np.argmax(yprobs, axis=1)

    return img_yhat, img_yprobs
Пример #4
0
    def test_pickle(self):
        # test tensor
        for dtype in [np.float32, np.float64, np.int32, np.int64]:
            array = np.array([[[3, 21, 1], [22, 1, 5]], [[2, 2, 2], [7, 7, 7]]], dtype=dtype)
            b1 = buffers.as_tensor_buffer(array)
            pickle.dump(b1, open("tmp.pkl", "wb"))
            b2 = pickle.load(open("tmp.pkl", "rb"))
            array2 = buffers.as_numpy_array(b2)
            self.assertTrue((array == array2).all())

        # test matrix
        for dtype in [np.float32, np.float64, np.int32, np.int64]:
            array = np.array([[3, 21, 1], [22, 1, 5], [2, 2, 2], [7, 7, 7]], dtype=dtype)
            b1 = buffers.as_matrix_buffer(array)
            pickle.dump(b1, open("tmp.pkl", "wb"))
            b2 = pickle.load(open("tmp.pkl", "rb"))
            array2 = buffers.as_numpy_array(b2)
            self.assertTrue((array == array2).all())

        # test vector
        for dtype in [np.float32, np.float64, np.int32, np.int64]:
            array = np.array([3, 21, 1], dtype=dtype)
            b1 = buffers.as_vector_buffer(array)
            pickle.dump(b1, open("tmp.pkl", "wb"))
            b2 = pickle.load(open("tmp.pkl", "rb"))
            array2 = buffers.as_numpy_array(b2)
            self.assertTrue((array == array2).all())
Пример #5
0
 def img_buffer_flatten_helper(self, X, buffer_type):
     m,n = X.shape
     img_buffer = buffers.as_tensor_buffer(X)
     assert isinstance(img_buffer, buffer_type)
     X_back = buffers.as_numpy_array(img_buffer)
     self.assertTrue((X.reshape(1,m,n) == X_back).all())
     X_back = buffers.as_numpy_array(buffer=img_buffer, flatten=True)
     self.assertTrue((X == X_back).all())
def load_and_sample(pose_path, list_of_poses, number_of_pixels_per_image):
    depths, labels = load_data(pose_path, list_of_poses)
    depths_buffer = buffers.as_tensor_buffer(depths)
    del depths
    pixel_indices, pixel_labels = sample_pixels_from_images(labels, number_of_pixels_per_image)
    del labels
    pixel_indices_buffer = buffers.as_matrix_buffer(pixel_indices)
    pixel_labels_buffer = buffers.as_vector_buffer(pixel_labels)
    del pixel_indices
    del pixel_labels
    return depths_buffer, pixel_indices_buffer, pixel_labels_buffer
def load_training_data(numpy_filename):
    f = open(numpy_filename, 'rb')
    depths = np.load(f)
    labels = np.load(f)
    pixel_indices = np.load(f)
    pixel_labels = np.load(f)
    depths_buffer = buffers.as_tensor_buffer(depths)
    del depths
    del labels
    pixel_indices_buffer = buffers.as_matrix_buffer(pixel_indices)
    del pixel_indices
    pixel_labels_buffer = buffers.as_vector_buffer(pixel_labels)
    del pixel_labels
    return depths_buffer, pixel_indices_buffer, pixel_labels_buffer
Пример #8
0
 def convert_img_buffer_both_directions_helper(self, X, buffer_type):
     buf = buffers.as_tensor_buffer(X)
     assert isinstance(buf, buffer_type)
     X_back = buffers.as_numpy_array(buf)
     self.assertTrue((X == X_back).all())
        for i, pose_filename in enumerate(pose_filenames):
            print "Processing %d - %d - %s - %s" % (pass_id, i, pose_filename, str(datetime.now()))

            # Load single pose depth and class labels
            depths = kinect_utils.load_depth_from_exr("%s%s.exr" % (args.pose_files_input_path, pose_filename))
            labels = kinect_utils.load_labels_from_png("%s%s.png" % (args.pose_files_input_path, pose_filename))
            pixel_indices, pixel_labels = kinect_utils.sample_pixels_from_image(labels[0,:,:], config.number_of_pixels_per_image)

            # Randomly sample pixels and offset scales
            (number_of_datapoints, _) = pixel_indices.shape
            offset_scales = np.array(np.random.uniform(0.8, 1.2, (number_of_datapoints, 2)), dtype=np.float32)
            datapoint_indices = np.array(np.arange(number_of_datapoints), dtype=np.int32)

            # Package buffers for learner
            bufferCollection = buffers.BufferCollection()
            bufferCollection.AddFloat32Tensor3Buffer(buffers.DEPTH_IMAGES, buffers.as_tensor_buffer(depths))
            bufferCollection.AddFloat32MatrixBuffer(buffers.OFFSET_SCALES, buffers.as_matrix_buffer(offset_scales))
            bufferCollection.AddInt32MatrixBuffer(buffers.PIXEL_INDICES, buffers.as_matrix_buffer(pixel_indices))
            bufferCollection.AddInt32VectorBuffer(buffers.CLASS_LABELS, buffers.as_vector_buffer(pixel_labels))

            # Update learner
            online_learner.Train(bufferCollection, buffers.Int32Vector(datapoint_indices))

            #pickle forest and data used for training
            if (i+1) % 1 == 0:
            # if True:
                forest_pickle_filename = "%s/forest-%d-%d.pkl" % (online_run_folder, pass_id, i+1)
                pickle.dump(online_learner.GetForest(), gzip.open(forest_pickle_filename, 'wb'))

                # Print forest stats
                forestStats = online_learner.GetForest().GetForestStats()
Пример #10
0
                "%s%s.png" % (args.pose_files_input_path, pose_filename))
            pixel_indices, pixel_labels = kinect_utils.sample_pixels_from_image(
                labels[0, :, :], config.number_of_pixels_per_image)

            # Randomly sample pixels and offset scales
            (number_of_datapoints, _) = pixel_indices.shape
            offset_scales = np.array(np.random.uniform(
                0.8, 1.2, (number_of_datapoints, 2)),
                                     dtype=np.float32)
            datapoint_indices = np.array(np.arange(number_of_datapoints),
                                         dtype=np.int32)

            # Package buffers for learner
            bufferCollection = buffers.BufferCollection()
            bufferCollection.AddFloat32Tensor3Buffer(
                buffers.DEPTH_IMAGES, buffers.as_tensor_buffer(depths))
            bufferCollection.AddFloat32MatrixBuffer(
                buffers.OFFSET_SCALES, buffers.as_matrix_buffer(offset_scales))
            bufferCollection.AddInt32MatrixBuffer(
                buffers.PIXEL_INDICES, buffers.as_matrix_buffer(pixel_indices))
            bufferCollection.AddInt32VectorBuffer(
                buffers.CLASS_LABELS, buffers.as_vector_buffer(pixel_labels))

            # Update learner
            online_learner.Train(bufferCollection,
                                 buffers.Int32Vector(datapoint_indices))

            #pickle forest and data used for training
            if (i + 1) % 1 == 0:
                # if True:
                forest_pickle_filename = "%s/forest-%d-%d.pkl" % (