def __init__(self,
                 small_conv_model,
                 image_node,
                 yaml_dict,
                 numpy_rng,
                 theano_rng):
        assert_is_instance(small_conv_model, IdAndCameraDirModel)
        assert_equal(image_node.output_format.axes, ('b', '0', '1', 'c'))

        self.shared_layers = []

        self.shared_layers.append(RgbToGray(image_node))
        self.shared_layers.append(Lcn(self.shared_layers[-1]))

        assert_is_subdtype(image_node.output_format.dtype, numpy.floating)

        use_dropout = yaml_dict['hyperparams']['use_dropout']

        def get_num_classes(yaml_dict):
            fg_path = yaml_dict['datasets']['training']['fg_path']
            dataset = MemmapDataset(os.path.join(data_path, fg_path))
            label_to_id = NorbLabelToObjectIdConverter(dataset.tensors[1])
            return label_to_id.num_unique_ids

        add_conv_layers(self.shared_layers[-1],
                        yaml_dict['model']['shared_layers']['conv'],
                        use_dropout,
                        numpy_rng,
                        theano_rng,
                        self.shared_layers)

        def get_first_affine_layer_filter_shape(small_conv_model):
            first_affine_layer = \
                small_conv_model.shared_layers[len(self.shared_layers)]

            assert_is_instance(first_affine_layer, AffineLayer)
            assert_is_instance(first_affine_layer.inputs[0], Conv2dLayer)
            assert_equal(first_affine_layer.inputs[0].output_format.axes,
                         ('b', 'c', '0', '1'))
            return first_affine_layer.inputs[0].output_format.shape[2:]

        first_filter_shape = \
            get_first_affine_layer_filter_shape(small_conv_model)

        assert_equal(first_filter_shape, (2, 2))

        add_affine_layers_conv(self.shared_layers[-1],
                               yaml_dict['model']['shared_layers']['affine'],
                               use_dropout,
                               numpy_rng,
                               theano_rng,
                               first_filter_shape=first_filter_shape,
                               output_list=self.shared_layers)

        self.id_layers = []
        add_classifier_mlp_conv(self.shared_layers[-1],
                                yaml_dict['model']['id_layers'],
                                get_num_classes(yaml_dict),
                                use_dropout,
                                numpy_rng,
                                theano_rng,
                                self.id_layers)

        self.cam_dir_layers = []
        add_regressor_mlp_conv(self.shared_layers[-1],
                               yaml_dict['model']['cam_dir_layers'],
                               3,
                               use_dropout,
                               numpy_rng,
                               theano_rng,
                               self.cam_dir_layers)

        self.input_node = image_node
    def __init__(self, image_node, yaml_dict, numpy_rng, theano_rng):
        '''
        Parameters
        ----------
        image_node: InputNode
        yaml_dict: dict
        '''

        super(IdAndCameraDirModelConv, self).__init__()

        #
        # Build the model nodes, and initialize their weights.
        #

        # preprocessing layers
        shared_layers = []
        shared_layers.append(RgbToGray(image_node))
        shared_layers.append(Lcn(shared_layers[-1]))

        assert_is_subdtype(image_node.output_format.dtype, numpy.floating)

        use_dropout = yaml_dict['hyperparams']['use_dropout']

        def get_num_classes(yaml_dict):
            fg_path = yaml_dict['datasets']['training']['fg_path']
            dataset = MemmapDataset(os.path.join(data_path, fg_path))
            label_to_id = NorbLabelToObjectIdConverter(dataset.tensors[1])
            return label_to_id.num_unique_ids

        add_conv_layers(shared_layers[-1],
                        yaml_dict['model']['shared_layers']['conv'],
                        use_dropout,
                        numpy_rng,
                        theano_rng,
                        shared_layers)

        assert_equal(shared_layers[-1].output_format.axes,
                     ('b', 'c', '0', '1'))
        first_filter_shape = shared_layers[-1].output_format.shape[2:]

        add_affine_layers_conv(shared_layers[-1],
                               yaml_dict['model']['shared_layers']['affine'],
                               use_dropout,
                               numpy_rng,
                               theano_rng,
                               shared_layers,
                               first_filter_shape=first_filter_shape)

        id_layers = []
        add_classifier_mlp_conv(shared_layers[-1],
                                yaml_dict['model']['id_layers'],
                                get_num_classes(yaml_dict),
                                use_dropout,
                                numpy_rng,
                                theano_rng,
                                id_layers)

        cam_dir_layers = []
        add_regressor_mlp_conv(shared_layers[-1],
                               yaml_dict['model']['cam_dir_layers'],
                               3,
                               use_dropout,
                               numpy_rng,
                               theano_rng,
                               cam_dir_layers)

        self.input_node = image_node
        self.shared_layers = shared_layers
        self.id_layers = id_layers
        self.cam_dir_layers = cam_dir_layers
Esempio n. 3
0
    def __init__(self, image_node, yaml_dict, numpy_rng, theano_rng):
        '''
        Parameters
        ----------
        image_node: InputNode
        yaml_dict: dict
        '''

        super(IdPoseLightingModel, self).__init__()

        self.input_node = image_node

        # preprocessing layers
        self.shared_layers = []
        self.shared_layers.append(RgbToGray(image_node))
        self.shared_layers.append(Lcn(self.shared_layers[-1]))

        assert_is_subdtype(image_node.output_format.dtype, numpy.floating)

        use_dropout = yaml_dict['hyperparams']['use_dropout']

        add_conv_layers(self.shared_layers[-1],
                        yaml_dict['model']['shared_layers']['conv'],
                        use_dropout,
                        numpy_rng,
                        theano_rng,
                        self.shared_layers)

        add_affine_layers_conv(self.shared_layers[-1],
                               yaml_dict['model']['shared_layers']['affine'],
                               use_dropout,
                               numpy_rng,
                               theano_rng,
                               self.shared_layers)

        def get_num_classes(yaml_dict):
            fg_path = yaml_dict['datasets']['training']['fg_path']
            dataset = MemmapDataset(os.path.join(data_path, fg_path))
            label_to_id = NorbLabelToObjectIdConverter(dataset.tensors[1])
            return label_to_id.num_unique_ids

        self.id_layers = []
        add_classifier_mlp_conv(self.shared_layers[-1],
                                yaml_dict['model']['id_layers'],
                                get_num_classes(yaml_dict),
                                use_dropout,
                                numpy_rng,
                                theano_rng,
                                self.id_layers)

        self.cam_dir_layers = []
        add_regressor_mlp_conv(self.shared_layers[-1],
                               yaml_dict['model']['cam_dir_layers'],
                               3,
                               use_dropout,
                               numpy_rng,
                               theano_rng,
                               self.cam_dir_layers)


        def get_num_lightings(yaml_dict):
            '''
            Returns the number of non-blank lighting values.
            '''
            fg_path = yaml_dict['datasets']['training']['fg_path']
            dataset = MemmapDataset(os.path.join(data_path, fg_path))
            lighting_labels = dataset.tensors[1][:, 4]
            assert_equal(lighting_labels[0], -1)
            assert_array_compare(numpy.greater_equal, lighting_labels[1:], 0)
            assert_array_compare(numpy.less, lighting_labels[1:], 4)
            num_valid_lighting_values = len(frozenset(lighting_labels[1:]))
            assert_equal(num_valid_lighting_values, 4)
            return num_valid_lighting_values

        self.lighting_layers = []
        add_classifier_mlp_conv(self.shared_layers[-1],
                                yaml_dict['model']['lighting_layers'],
                                get_num_lightings(yaml_dict),
                                use_dropout,
                                numpy_rng,
                                theano_rng,
                                self.lighting_layers)

        self.rc_shift_layers = []
        add_regressor_mlp_conv(self.shared_layers[-1],
                               yaml_dict['model']['rc_shift_layers'],
                               2,
                               use_dropout,
                               numpy_rng,
                               theano_rng,
                               self.rc_shift_layers)

        self.scale_layers = []
        add_regressor_mlp_conv(self.shared_layers[-1],
                               yaml_dict['model']['scale_layers'],
                               1,
                               use_dropout,
                               numpy_rng,
                               theano_rng,
                               self.scale_layers)

        self.roll_layers = []
        add_regressor_mlp_conv(self.shared_layers[-1],
                               yaml_dict['model']['roll_layers'],
                               1,
                               use_dropout,
                               numpy_rng,
                               theano_rng,
                               self.roll_layers)