Ejemplo n.º 1
0
    def __init__(self, trainable=False, batch_size=32, epochs=3, **kwargs):
        super(TFMobileNet, self).__init__(**kwargs)

        self.image_width, self.image_height = 224, 224
        self.trainable = trainable
        self.batch_size = batch_size
        self.epochs = epochs

        self.feature_extractor_layer = hub.KerasLayer(
            feature_extractor_model,
            input_shape=(self.image_width, self.image_height, 3),
            trainable=trainable)

        self.from_name, self.to_name, self.value, self.labels_in_config = get_single_tag_keys(
            self.parsed_label_config, 'Choices', 'Image')
        self.labels = tf.convert_to_tensor(sorted(self.labels_in_config))
        num_classes = len(self.labels_in_config)
        self.model = tf.keras.Sequential(
            [self.feature_extractor_layer,
             tf.keras.layers.Dense(num_classes)])
        self.model.summary()
        if self.train_output:
            model_file = self.train_output['model_file']
            logger.info('Restore model from ' + model_file)
            # Restore previously saved weights
            self.model.load_weights(self.train_output['model_file'])
Ejemplo n.º 2
0
 def __init__(self, freeze_extractor=False, **kwargs):
     super(ImageClassifierAPI, self).__init__(**kwargs)
     self.from_name, self.to_name, self.value, self.classes = get_single_tag_keys(
         self.parsed_label_config, 'Choices', 'Image')
     self.freeze_extractor = freeze_extractor
     if self.train_output:
         self.classes = self.train_output['classes']
         self.model = ImageClassifier(len(self.classes), freeze_extractor)
         self.model.load(self.train_output['model_path'])
     else:
         self.model = ImageClassifier(len(self.classes), freeze_extractor)
Ejemplo n.º 3
0
    def __init__(self, freeze_extractor=False, **kwargs):
        # don't forget to call base class constructor
        super(animalClassifier, self).__init__(**kwargs)
        self.from_name, self.to_name, self.value, self.classes = get_single_tag_keys(
            self.parsed_label_config, 'Choices', 'Image')

        if self.train_output:
            self.classes = self.train_output['classes']
            self.model = ImageClassifier(len(self.classes))
            self.model.load(self.train_output['model_path']
                            )  # save the checkpoint of pretrained weight
        else:
            self.model = ImageClassifier(len(self.classes))
Ejemplo n.º 4
0
    def __init__(self,
                 config_file,
                 checkpoint_file,
                 labels_file=None,
                 score_threshold=0.3,
                 device="cpu",
                 **kwargs):
        """
        Load MMDetection model from config and checkpoint into memory.
        (Check https://mmdetection.readthedocs.io/en/v1.2.0/GETTING_STARTED.html#high-level-apis-for-testing-images)

        Optionally set mappings from COCO classes to target labels
        :param config_file: Absolute path to MMDetection config file (e.g. /home/user/mmdetection/configs/faster_rcnn/faster_rcnn_r50_fpn_1x.py)
        :param checkpoint_file: Absolute path MMDetection checkpoint file (e.g. /home/user/mmdetection/checkpoints/faster_rcnn_r50_fpn_1x_20181010-3d1b3351.pth)
        :param labels_file: file with mappings from COCO labels to custom labels {"airplane": "Boeing"}
        :param score_threshold: score threshold to wipe out noisy results
        :param device: device (cpu, cuda:0, cuda:1, ...)
        :param kwargs:
        """
        super(MMDetection, self).__init__(**kwargs)

        self.config_file = config_file
        self.checkpoint_file = checkpoint_file
        self.labels_file = labels_file
        if self.labels_file and os.path.exists(self.labels_file):
            self.label_map = json_load(self.labels_file)
        else:
            self.label_map = {}

        (
            self.from_name,
            self.to_name,
            self.value,
            self.labels_in_config,
        ) = get_single_tag_keys(self.parsed_label_config, "RectangleLabels",
                                "Image")
        schema = list(self.parsed_label_config.values())[0]
        self.labels_in_config = set(self.labels_in_config)

        # Collect label maps from `predicted_values="airplane,car"` attribute in <Label> tag
        self.labels_attrs = schema.get("labels_attrs")
        if self.labels_attrs:
            for label_name, label_attrs in self.labels_attrs.items():
                for predicted_value in label_attrs.get("predicted_values",
                                                       "").split(","):
                    self.label_map[predicted_value] = label_name

        print("Load new model from: ", config_file, checkpoint_file)
        self.model = init_detector(config_file, checkpoint_file, device=device)
        self.score_thresh = score_threshold
Ejemplo n.º 5
0
    def __init__(self, num_responses=5, model='microsoft/DialoGPT-small', **kwargs):
        super(DialoGPTSimpleGenerator, self).__init__(**kwargs)

        self.num_return_sequences = num_responses
        self.top_k = num_responses
        self.model_name = model

        self.from_name, self.to_name, self.value, self.labels_in_config = get_single_tag_keys(
            self.parsed_label_config, 'TextArea', 'Paragraphs')
        config = etree.fromstring(self.label_config)
        paragraphs = config.find('.//Paragraphs')
        self.name_key = paragraphs.get('nameKey') or paragraphs.get('namekey') or 'author'
        self.text_key = paragraphs.get('textKey') or paragraphs.get('textkey') or 'text'

        self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
        self.model = AutoModelForCausalLM.from_pretrained(self.model_name)
Ejemplo n.º 6
0
    def __init__(self, freeze_extractor=False, **kwargs):

        super(ObjectDetectionAPI, self).__init__(**kwargs)

        self.from_name, self.to_name, self.value, self.classes =\
            get_single_tag_keys(self.parsed_label_config, 'RectangleLabels', 'Image')
        self.freeze_extractor = freeze_extractor

        self.model = Detectron2LayoutModel(
            config_path=
            "https://www.dropbox.com/s/ta4777i1g1jjj18/config.yml?dl=1",
            model_path=
            "https://www.dropbox.com/s/f261qar6f75b9c0/model_final.pth?dl=1",
            label_map={
                1: "title",
                2: "address",
                3: "text",
                4: "number"
            },
            extra_config=[
                "TEST.DETECTIONS_PER_IMAGE", 150,
                "MODEL.ROI_HEADS.SCORE_THRESH_TEST", 0.5,
                "MODEL.ROI_HEADS.NMS_THRESH_TEST", 0.75
            ])