예제 #1
0
    def postprocess(self, data: Input):
        predictions = torch.from_numpy(data.get_as_numpy()[0])
        num_rows, num_cols = predictions.shape

        outputs = Output()
        result = []
        for i in range(num_rows):
            out = predictions[i].unsqueeze(0)
            y_hat = out.argmax(1).item()
            predicted_idx = str(y_hat)
            result.append(self.mapping[predicted_idx])
        outputs.add_as_json(result)
        return outputs
예제 #2
0
 def preprocess(self, data: Input):
     input_text = data.get_as_string()
     tokens = self.tokenizer.encode_plus(input_text,
                                         max_length=self.max_length,
                                         truncation=True,
                                         padding=True,
                                         add_special_tokens=True,
                                         return_tensors='np')
     input_ids = tokens["input_ids"]
     attention_mask = tokens["attention_mask"]
     outputs = Output()
     outputs.add_as_numpy([input_ids, attention_mask])
     return outputs
예제 #3
0
파일: model.py 프로젝트: frankfliu/djl-demo
    def inference(self, inputs: Input):
        try:
            text = inputs.get_as_string()
            tokens = self.tokenizer.encode_plus(text,
                                                max_length=self.max_length,
                                                truncation=True,
                                                padding=True,
                                                add_special_tokens=True,
                                                return_tensors="pt")
            input_ids = tokens["input_ids"].to(self.device)
            attention_mask = tokens["attention_mask"].to(self.device)

            inferences = []
            out = self.model(input_ids, attention_mask)

            num_rows, num_cols = out[0].shape
            for i in range(num_rows):
                prediction = out[0][i].unsqueeze(0)
                y_hat = prediction.argmax(1).item()
                predicted_idx = str(y_hat)
                inferences.append(self.mapping[predicted_idx])

            outputs = Output()
            outputs.add_as_json(inferences)
        except Exception as e:
            logging.error(e, exc_info=True)
            # error handling
            outputs = Output(code=500, message=str(e))
            outputs.add("inference failed", key="data")

        return outputs
예제 #4
0
 def preprocess(self, data: Input):
     input_json = data.get_as_json()
     question = input_json["question"]
     context = input_json["paragraph"]
     tokens = self.tokenizer.encode_plus(question,
                                         context,
                                         max_length=self.max_length,
                                         truncation=True,
                                         padding=True,
                                         add_special_tokens=True,
                                         return_tensors="np")
     input_ids = tokens["input_ids"]
     attention_mask = tokens["attention_mask"]
     outputs = Output()
     outputs.add_as_numpy([input_ids, attention_mask])
     return outputs
예제 #5
0
    def postprocess(self, data: Input):
        input_ids = data.get_as_numpy("input_ids")[0].tolist()
        predictions = torch.from_numpy(data.get_as_numpy("data")[0])
        num_rows = predictions.shape[0]

        outputs = Output()
        result = []
        for i in range(num_rows):
            out = predictions[i].unsqueeze(0)
            out = torch.argmax(out, dim=2)
            tokens = self.tokenizer.tokenize(
                self.tokenizer.decode(input_ids[i]))
            prediction = [(token, self.label_list[prediction])
                          for token, prediction in zip(tokens, out[0].tolist())
                          ]
            result.append(prediction)

        outputs.add_as_json(result)
        return outputs
예제 #6
0
    def postprocess(self, data: Input):
        input_ids = data.get_as_numpy("input_ids")[0].tolist()
        predictions = data.get_as_numpy("data")
        answer_start_scores = torch.from_numpy(predictions[0])
        answer_end_scores = torch.from_numpy(predictions[1])
        num_rows, num_cols = answer_start_scores.shape

        outputs = Output()
        result = []
        for i in range(num_rows):
            answer_start_scores_one_seq = answer_start_scores[i].unsqueeze(0)
            answer_start = torch.argmax(answer_start_scores_one_seq)
            answer_end_scores_one_seq = answer_end_scores[i].unsqueeze(0)
            answer_end = torch.argmax(answer_end_scores_one_seq) + 1
            prediction = self.tokenizer.convert_tokens_to_string(
                self.tokenizer.convert_ids_to_tokens(
                    input_ids[i][answer_start:answer_end]))
            result.append(prediction)
        outputs.add_as_json(result)
        return outputs
예제 #7
0
파일: model.py 프로젝트: frankfliu/djl-demo
    def inference(self, inputs: Input):
        try:
            data = inputs.get_as_json()
            question = data["question"]
            paragraph = data["paragraph"]
            tokens = self.tokenizer.encode_plus(question,
                                                paragraph,
                                                max_length=self.max_length,
                                                truncation=True,
                                                padding='max_length',
                                                add_special_tokens=True,
                                                return_tensors="pt")
            input_ids = tokens["input_ids"].to(self.device)
            attention_mask = tokens["attention_mask"].to(self.device)

            inferences = []
            out = self.model(input_ids, attention_mask)
            answer_start_scores = out[0]
            answer_end_scores = out[1]

            num_rows, num_cols = answer_start_scores.shape
            for i in range(num_rows):
                answer_start_scores_one_seq = answer_start_scores[i].unsqueeze(
                    0)
                answer_start = torch.argmax(answer_start_scores_one_seq)
                answer_end_scores_one_seq = answer_end_scores[i].unsqueeze(0)
                answer_end = torch.argmax(answer_end_scores_one_seq) + 1
                token_id = self.tokenizer.convert_ids_to_tokens(
                    input_ids[i].tolist()[answer_start:answer_end])
                prediction = self.tokenizer.convert_tokens_to_string(token_id)
                inferences.append(prediction)

            outputs = Output()
            outputs.add_as_json(inferences)
        except Exception as e:
            logging.error(e, exc_info=True)
            # error handling
            outputs = Output(code=500, message=str(e))
            outputs.add("inference failed", key="data")

        return outputs