def get_num_lightings(yaml_dict):
     '''
     Returns the number of non-blank lighting values.
     '''
     fg_path = yaml_dict['datasets']['training']['fg_path']
     dataset = MemmapDataset(os.path.join(data_path, fg_path))
     lighting_labels = dataset.tensors[1][:, 4]
     assert_equal(lighting_labels[0], -1)
     assert_array_compare(numpy.greater_equal, lighting_labels[1:], 0)
     assert_array_compare(numpy.less, lighting_labels[1:], 4)
     num_valid_lighting_values = len(frozenset(lighting_labels[1:]))
     assert_equal(num_valid_lighting_values, 4)
     return num_valid_lighting_values
    def __init__(self,
                 model,
                 foreground_dataset,
                 video_frame,
                 original_image_size,
                 output_dir=None):
        '''
        model: IdAndCameraDirModel

        foreground_dataset: simplelearn.data.Dataset
          A Dataset with NORB-style label vectors. Used to compute a mapping
          from object ID to example images.

        video_frame: numpy.ndarray
          A frame from the input video stream. Used to initialize display pixel
          buffer's size and dtype.

        output_dir: string or None
          The directory to save frame images to. If omitted, no frame images
          will be saved.
        '''

        assert_is_instance(model, (IdAndCameraDirModel,
                                   IdAndCameraDirModelConv,
                                   VideoModel))
        assert_equal(model.input_node.output_format.axes,
                     ('b', '0', '1', 'c'))

        assert_is_instance(foreground_dataset, Dataset)
        assert_equal(len(video_frame.shape), 3)
        assert_in(video_frame.shape[-1], (1, 3))

        original_image_size = numpy.asarray(original_image_size)
        assert_equal(len(original_image_size), 2)
        assert_array_compare(numpy.greater, original_image_size, 0)

        assert_is(output_dir, None)  # TODO: support this argument

        self.original_image_size = numpy.asarray(original_image_size)
        self.label_to_id = NorbLabelToObjectIdConverter(
            foreground_dataset.tensors[1])

        def make_id_elev_azim_to_example(example_dataset):
            assert_equal(example_dataset.formats[0].axes.index('c'), 3)
            assert_equal(example_dataset.formats[0].axes.index('b'), 0)
            assert_equal(example_dataset.formats[1].axes.index('b'), 0)

            images = example_dataset.tensors[0][..., :3]  # cut off alpha
            labels = example_dataset.tensors[1]

            assert_in(labels.shape[1], (5, 11))

            # Arbitrarily restrict attention to images that use
            # lighting setup 0.
            row_mask = labels[:, 4] == 0
            images = images[row_mask, :]
            labels = labels[row_mask, :]

            ids = self.label_to_id(labels)
            ids_elevs_azims = numpy.hstack((ids[:, numpy.newaxis],
                                            labels[:, 2:4]))

            result = dict(safe_izip((tuple(t) for t in ids_elevs_azims),
                                    images))

            assert_equal(len(result), ids_elevs_azims.shape[0])

            return result

        self.id_elev_azim_to_example = \
            make_id_elev_azim_to_example(foreground_dataset)

        self.model = model

        self.all_pixels = numpy.zeros([video_frame.shape[0],
                                       video_frame.shape[1] * 2,
                                       video_frame.shape[2]],
                                      dtype=video_frame.dtype)

        self.video_pixels = self.all_pixels[:, :video_frame.shape[1], :]

        self.examples_pixels = self.all_pixels[:, video_frame.shape[1]:, :]