コード例 #1
0
def caption(audio):
    model_wrapper = ModelWrapper()
    start_time = 0
    # Getting the predictions
    try:
        preds = model_wrapper._predict(audio, start_time)
        return preds
    except ValueError:
        error = {
            'status': 'error',
            'message': 'Invalid start time: value outside audio clip'
        }
        print(error, "ERROR")
コード例 #2
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}

        args = input_parser.parse_args()
        input_data = args['image'].read()
        stillimg = read_still_image(input_data)
        preds = self.model_wrapper.predict(stillimg)

        label_preds = []
        for res in preds:
            label_preds.append({
                'age_estimation': res[0]['age'],
                'detection_box': res[0]['box']
            })
        result['predictions'] = label_preds
        result['status'] = 'ok'
        return result
コード例 #3
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}

        args = input_parser.parse_args()
        image_data = args['image'].read()
        image = self.model_wrapper.read_image(image_data)
        preds = self.model_wrapper.predict(image)

        label_preds = [{
            'label_id': p[0],
            'label': p[1],
            'probability': p[2]
        } for p in [x for x in preds]]
        result['predictions'] = label_preds
        result['status'] = 'ok'

        return result
コード例 #4
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}
        args = input_parser.parse_args()

        text = [args['first_word']]
        if args['second_word']:
            text.append(args['second_word'])
        theta = args['theta']
        mode = args['mode']
        input_json = {'text': text, 'theta': theta, 'mode': mode}

        try:
            preds = self.model_wrapper.predict(input_json)
        except TypeError or UnicodeDecodeError:  # noqa
            abort(400, errors='first_word, second_word', message="The input format is not valid. "
                  "Please input utf-8 encoded Chinese word(s) only.")

        result['predictions'] = preds
        result['status'] = 'ok'

        return result
コード例 #5
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(image_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""

        result = {'status': 'error'}
        args = image_parser.parse_args()
        if not args['image'].mimetype.endswith(('jpg', 'jpeg', 'png')):
            abort(
                400,
                'Invalid file type/extension. Please provide an image in JPEG or PNG format.'
            )
        image_data = args['image'].read()

        preds = self.model_wrapper.predict(image_data)

        label_preds = [{
            'index': p[0],
            'caption': p[1],
            'probability': p[2]
        } for p in [x for x in preds]]
        result['predictions'] = label_preds
        result['status'] = 'ok'

        return result
コード例 #6
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}
        args = input_parser.parse_args()

        text = [args['first_word']]
        if args['first_word'] == "":
            abort(400, "Please provide a valid input string.")

        if args['second_word']:
            text.append(args['second_word'])
        theta = args['theta']
        mode = args['mode']
        input_json = {'text': text, 'theta': theta, 'mode': mode}

        try:
            preds = self.model_wrapper.predict(input_json)
        except:  # noqa
            abort(400, "Invalid input to Pinyin converter, please check!")

        result['predictions'] = preds
        result['status'] = 'ok'

        return result
コード例 #7
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}

        input_json = MAX_API.payload

        try:
            preds = self.model_wrapper.predict(input_json['text'])
        except:  # noqa
            abort(
                400, "Please supply a valid input json. "
                "The json structure should have a 'text' field containing a list of strings"
            )

        # Generate the output format for every input string
        output = [{l: p[i] for i, l in enumerate(class_labels)} for p in preds]

        result['predictions'] = output
        result['status'] = 'ok'

        return result
コード例 #8
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    def post(self):
        """Make a prediction given input data"""
        args = input_parser.parse_args()
        input_data = args['image'].read()

        input_encoded = base64.urlsafe_b64encode(input_data)
        input_array = np.array(input_encoded)
        input_instance = np.expand_dims(input_array, axis=0)

        try:
            image = self.model_wrapper.predict(input_instance)
        except IOError as e:
            if e.args[0] == ERR_MSG:
                abort(400, ERR_MSG)

        response = send_file(io.BytesIO(image),
                             attachment_filename='result.png',
                             mimetype='image/png')

        return response
コード例 #9
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(video_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}

        #  Take video save it into directory
        args = video_parser.parse_args()
        video_data = args['video']
        filepath = os.path.join(UPLOAD_FOLDER, video_data.filename)
        video_data.save(filepath)

        #  Run predict function on file
        preds = self.model_wrapper.predict(filepath)
        label_preds = [{
            'label_id': p[0],
            'label': p[1],
            'probability': p[2]
        } for p in [x for x in preds]]
        result['predictions'] = label_preds
        result['status'] = 'ok'

        return result
コード例 #10
0
class ModelPredictAPI(PredictAPI):
    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Generate audio embedding from input data"""
        result = {'status': 'error'}

        args = input_parser.parse_args()

        if not re.match("audio/.*wav", str(args['audio'].mimetype)):
            e = BadRequest()
            e.data = {
                'status': 'error',
                'message': 'Invalid file type/extension'
            }
            raise e

        audio_data = args['audio'].read()

        # Getting the predictions
        preds = self.model_wrapper.predict(audio_data)

        # Aligning the predictions to the required API format
        result['embedding'] = preds.tolist()
        result['status'] = 'ok'

        return result
コード例 #11
0
class ModelPredictAPI(PredictAPI):
    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser, validate=True)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}

        input_json = MAX_API.payload

        # Make sure the input list is not empty
        if len(input_json['text']) == 0:
            abort(
                400,
                'An empty list was provided. Please put add the input strings to this list.'
            )

        try:
            output = self.model_wrapper.predict(input_json['text'])
            result['predictions'] = output
            result['status'] = 'ok'
            return result

        except:  # noqa
            abort(500, "Model Inference Failed with valid input")
コード例 #12
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(model_input)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}

        j = request.get_json()
        seed_text = j['seed_text']
        gen_chars = j['chars'] if 'chars' in j else DEFAULT_CHARS
        generated_text = self.model_wrapper.predict({
            'sentence': seed_text,
            'gen_chars': gen_chars
        })
        full_text = seed_text + generated_text
        model_pred = {
            'seed_text': seed_text,
            'generated_text': generated_text,
            'full_text': full_text
        }
        result['prediction'] = model_pred
        result['status'] = 'ok'

        return result
コード例 #13
0
class ModelPredictAPI(PredictAPI):
    model_wrapper = ModelWrapper()

    @MAX_API.marshal_with(predict_response)
    def post(self, input):
        """Make a prediction given input data"""
        result = {'status': 'error'}

        input_json = input

        # Make sure the input list is not empty
        if len(input_json['text']) == 0:
            abort(
                400,
                'An empty list was provided. Please put add the input strings to this list.'
            )

        try:
            output = self.model_wrapper.predict(input_json['text'])
            result['results'] = []
            for i in range(len(output)):
                res = {
                    'original_text': input_json['text'][i],
                    'predictions': output[i]
                }
                result['results'].append(res)
            result['status'] = 'ok'
            return result

        except:  # noqa
            abort(500, "Model Inference Failed with valid input")
コード例 #14
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Predict audio classes from input data"""
        result = {'status': 'error'}

        args = input_parser.parse_args()

        if not re.match("audio/.*wav", str(args['audio'].mimetype)):
            e = BadRequest()
            e.data = {
                'status':
                'error',
                'message':
                'Invalid file type/extension: ' + str(args['audio'].mimetype)
            }
            raise e

        audio_data = args['audio'].read()
        # sliced_audio = audio_slice.slice(audio_data, 10)

        # Getting the predictions
        try:
            preds = self.model_wrapper._predict(audio_data, args['start_time'])
        except ValueError:
            e = BadRequest()
            e.data = {
                'status': 'error',
                'message': 'Invalid start time: value outside audio clip'
            }
            raise e

        # Aligning the predictions to the required API format
        label_preds = [{
            'label_id': p[0],
            'label': p[1],
            'probability': p[2]
        } for p in preds]
        # label_preds = [{'label_id': p[0], 'probability': p[2]} for p in preds]

        # Filter list
        if args['filter'] is not None and any(x.strip() != ''
                                              for x in args['filter']):
            label_preds = [
                x for x in label_preds if x['label'] in args['filter']
            ]

        result['predictions'] = label_preds
        result['status'] = 'ok'

        return result
コード例 #15
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Predict audio classes from input data"""
        result = {'status': 'error'}

        args = input_parser.parse_args()
        audio_data = args['audio'].read()

        # clean up from earlier runs
        if os.path.exists("/audio.wav"):
            os.remove("/audio.wav")

        if '.wav' in str(args['audio']):
            file = open("/audio.wav", "wb")
            file.write(audio_data)
            file.close()
        else:
            e = BadRequest()
            e.data = {
                'status': 'error',
                'message': 'Invalid file type/extension'
            }
            raise e

        # Getting the predictions
        try:
            preds = self.model_wrapper.predict("/audio.wav",
                                               args['start_time'])
        except ValueError:
            e = BadRequest()
            e.data = {
                'status': 'error',
                'message': 'Invalid start time: value outside audio clip'
            }
            raise e

        # Aligning the predictions to the required API format
        label_preds = [{
            'label_id': p[0],
            'label': p[1],
            'probability': p[2]
        } for p in preds]
        result['predictions'] = label_preds
        result['status'] = 'ok'

        os.remove("/audio.wav")

        return result
コード例 #16
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser, validate=True)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}

        input_json = MAX_API.payload
        try:
            for p in input_json["paragraphs"]:
                assert frozenset(p.keys()) == frozenset(
                    ["context", "questions"])
                if p["context"] == "":
                    abort(400, "Invalid input, please provide a paragraph.")
                if not isinstance(p["questions"], list):
                    abort(400, "Invalid input, questions should be a list.")
        except KeyError:
            abort(
                400,
                "Invalid input, please check that the input JSON has a `paragraphs` field."
            )
        except AssertionError:
            abort(
                400,
                "Invalid input, please ensure that the input JSON has `context` and `questions` fields."
            )

        preds = self.model_wrapper.predict(input_json)
        # Create a flat list of answers
        answers_list = ["" if not preds[p][0] else preds[p][1] for p in preds]
        # Create a split of how many elements go in each list
        splits = [len(p['questions']) for p in input_json['paragraphs']]
        # Create an empty answers list
        answers = []
        # Populate this list based on above split of nested lists
        for i, s in enumerate(splits):
            if i == 0:
                answers.append(answers_list[0:s])
            else:
                answers.append(answers_list[splits[i - 1]:splits[i - 1] + s])

        result['predictions'] = answers
        result['status'] = 'ok'

        return result
コード例 #17
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}

        args = input_parser.parse_args()
        input_data = args['image'].read()
        preds = self.model_wrapper.predict(input_data)
        result['predictions'] = preds
        result['status'] = 'ok'

        return result
コード例 #18
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    def post(self):
        """Make a prediction given input data"""

        args = input_parser.parse_args()
        try:
            input_data = args['image'].read()
            image = self.model_wrapper._read_image(input_data)
        except ValueError:
            abort(400,
                  "Please submit a valid image in PNG, Tiff or JPEG format")

        output_image = self.model_wrapper.predict(image)
        return send_file(self.model_wrapper.write_image(output_image), mimetype='image/png', attachment_filename='result.png')
コード例 #19
0
class ModelPredictAPI(CustomMAXAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc(produces=['audio/wav'])
    @MAX_API.expect(input_parser)
    def get(self):
        """Generate audio file"""
        args = input_parser.parse_args()
        model = args['model']
        _ = self.model_wrapper.predict(model)

        response = make_response(open('output.wav', 'rb').read())
        response.headers.set('Content-Type', 'audio/wav')
        response.headers.set('Content-Disposition',
                             'attachment',
                             filename='result.wav')

        return response
コード例 #20
0
ファイル: predict.py プロジェクト: zwcdp/MAX-Text-Summarizer
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser, validate=True)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}
        result['summary_text'] = []

        input_json = MAX_API.payload
        texts = input_json['text']
        for text in texts:
            preds = self.model_wrapper.predict(text)
            result['summary_text'].append(preds)

        result['status'] = 'ok'

        return result
コード例 #21
0
class ModelPredictAPI(PredictAPI):
    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Generate audio embedding from input data"""
        result = {'status': 'error'}

        args = input_parser.parse_args()
        audio_data = args['audio'].read()

        # clean up from earlier runs
        if os.path.exists("/audio.wav"):
            os.remove("/audio.wav")

        if '.wav' in str(args['audio']):
            file = open("/audio.wav", "wb")
            file.write(audio_data)
            file.close()
        else:
            e = BadRequest()
            e.data = {
                'status': 'error',
                'message': 'Invalid file type/extension'
            }
            raise e

        # Getting the predictions
        preds = self.model_wrapper.predict("/audio.wav")

        # Aligning the predictions to the required API format
        result['embedding'] = preds.tolist()
        result['status'] = 'ok'

        os.remove("/audio.wav")

        return result
コード例 #22
0
ファイル: predict.py プロジェクト: SSaishruthi/AI_Eng
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}

        args = input_parser.parse_args()
        input_data = pd.read_csv(args['file'])
        print('********')
        print(input_data)
        preds = self.model_wrapper.predict(input_data)

        # Modify this code if the schema is changed
        # label_preds = [{'label_id': p[0], 'label': p[1], 'probability': p[2]} for p in [x for x in preds]]
        result['predictions'] = preds
        result['status'] = 'ok'

        return result
コード例 #23
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}

        args = input_parser.parse_args()

        try:
            image_data = args['image'].read()
            image = self.model_wrapper._read_image(image_data)
            preds = self.model_wrapper.predict(image)
            label_preds = [{'probability': float(preds)}]
            result['predictions'] = label_preds
            result['status'] = 'ok'
        except ValueError as e:
            abort(400, str(e))

        return result
コード例 #24
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Generate audio embedding from input data"""
        result = {'status': 'error'}  # set default status

        args = input_parser.parse_args()
        audio_data = args['audio'].read()

        try:
            prediction = self.model_wrapper.predict(audio_data)
        except OSError as error:
            abort(400, str(error), status="error")

        # Align the predictions to the required API format
        result['prediction'] = prediction
        result['status'] = 'ok'

        return result
コード例 #25
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    @MAX_API.marshal_with(predict_response)
    def post(self):
        """Make a prediction given input data"""
        result = {'status': 'error'}

        args = input_parser.parse_args()
        try:
            input_data = args['image'].read()
            image = self.model_wrapper._read_image(input_data)
        except ValueError:
            abort(400,
                  "Please submit a valid image in PNG, Tiff or JPEG format")

        preds = self.model_wrapper.predict(image)
        result['predictions'] = preds
        result['status'] = 'ok'

        return result
コード例 #26
0
def caption(img):
    model_wrapper = ModelWrapper()
    preds = model_wrapper.predict(img)
    return preds
コード例 #27
0
class ModelPredictAPI(PredictAPI):

    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    def post(self):
        """Make a prediction given input data"""

        args = input_parser.parse_args()

        if not args['file'].mimetype.endswith(('jpg', 'jpeg', 'png')):
            abort(
                400,
                'Invalid file type/extension. Please provide an image in JPEG or PNG format.'
            )

        image_input_read = Image.open(args['file'])
        image_mask_type = args['mask_type']
        # creating directory for storing input
        input_directory = '/workspace/assets/input'
        if not os.path.exists(input_directory):
            os.mkdir(input_directory)
        # clear input directory
        for file in glob.glob(input_directory + '/*'):
            try:
                try:
                    os.remove(file)
                except:
                    shutil.rmtree(file)
            except:
                continue
        # save input image
        image_input_read = image_input_read.convert('RGB')
        image_input_read.save('/workspace/assets/input/input.jpg')
        # face detection, alignment and resize using openface
        args = {
            'inputDir': input_directory,
            'outputDir': '/workspace/assets/input/align',
            'landmarks': 'innerEyesAndBottomLip',
            'dlibFacePredictor':
            '/workspace/openface/models/dlib/shape_predictor_68_face_landmarks.dat',
            'verbose': True,
            'size': 64,
            'skipMulti': False,
            'fallbackLfw': None,
            'mode': 'align'
        }
        try:
            coordinates = alignMain(args)
            coordinates_string = str(coordinates)
            pattern = '^\s*\[\s*\(\s*(\d+)\s*,\s*(\d+)\s*\)\s*\(\s*(\d+)\s*,\s*(\d+)\s*\)\s*\]\s*$'
            m = re.match(pattern, coordinates_string)
            if m:
                final_coordinates = '[[{},{}],[{},{}]]'.format(
                    m.group(1), m.group(2), m.group(3), m.group(4))

        except:
            #abort if there face is not detected
            abort(400, 'No face was detected in the image.')

        # store aligned input
        input_data = '/workspace/assets/input/align/input/input.png'
        #
        model_data = {
            "input_data_dir": input_data,
            "mask_type": image_mask_type
        }
        image_path = self.model_wrapper.predict(model_data)
        """
        preparing image collage
        """
        new_collage_path = "/workspace/assets/center_mask/completed/Collage.jpg"
        img_columns = 5
        img_rows = 4
        img_width = 320
        img_height = 256
        thumbnail_width = img_width // img_columns
        thumbnail_height = img_height // img_rows
        size = thumbnail_width, thumbnail_height
        new_collage = Image.new('RGB', (img_width, img_height))
        images_list = []
        filenames = []
        for filename in glob.glob(image_path):
            filenames.append(filename)

        filenames.sort()
        for i in filenames:
            im = Image.open(i)
            im.thumbnail(size)
            images_list.append(im)

        i = 0
        x = 0
        y = 0
        for col in range(img_columns):
            for row in range(img_rows):
                new_collage.paste(images_list[i], (x, y))
                i += 1
                y += thumbnail_height
            x += thumbnail_width
            y = 0

        new_collage.save(new_collage_path)
        """
        end of collage creation process
        """
        img = Image.open(new_collage_path, mode='r')
        imgByteArr = io.BytesIO()
        img.save(imgByteArr, format='JPEG')
        imgByteArr = imgByteArr.getvalue()

        response = make_response(imgByteArr)
        response.headers.set('Content-Type', 'image/jpeg')
        response.headers.set('Content-Disposition',
                             'attachment',
                             filename='result.jpg')
        response.headers.set('coordinates', final_coordinates)

        return response
コード例 #28
0
class ModelPredictAPI(PredictAPI):
    model_wrapper = ModelWrapper()

    @MAX_API.doc('predict')
    @MAX_API.expect(input_parser)
    # @MAX_API.marshal_with(predict_response)  TODO fix this
    def post(self):
        """Generate audio embedding from input data"""
        result = {'status': 'error'}

        true_start = time.time()

        args = input_parser.parse_args()

        if args['audio'] is None and args['url'] is None:
            e = BadRequest()
            e.data = {
                'status': 'error',
                'message': 'Need to provide either an audio or url argument'
            }
            raise e

        audio_data = {}
        uuid_map = {}
        if args['url'] is not None:
            url_splt = args['url'].split(',')
            for url in url_splt:
                audio_data[url] = urllib.request.urlopen(url).read()
        else:
            audio_data[args['audio'].filename] = args['audio'].read()

        print(f"audio_data: {audio_data.keys()}")
        for filestring in audio_data.keys():
            uuid_map[filestring] = uuid.uuid1()
            if 'mp3' in filestring:
                print(f"Creating file: /{uuid_map[filestring]}.mp3")
                file = open(f"/{uuid_map[filestring]}.mp3", "wb+")
                file.write(audio_data[filestring])
                file.close()
            elif 'wav' in filestring:
                print(f"Creating file: /{uuid_map[filestring]}.wav")
                file = open(f"/{uuid_map[filestring]}.wav", "wb+")
                file.write(audio_data[filestring])
                file.close()
            else:
                e = BadRequest()
                e.data = {
                    'status': 'error',
                    'message': 'Invalid file type/extension'
                }
                raise e

        start = time.time()

        commands = [
            f"ffmpeg -i /{uuid_map[x]}.mp3 /{uuid_map[x]}.wav"
            if 'mp3' in x else "" for x in uuid_map.keys()
        ]
        threads = []
        for command in commands:
            if command != "":
                print(f" Running command: {command}")
                threads.append(
                    threading.Thread(target=run_sys, args=(command, )))
        for thread in threads:
            thread.start()
        for thread in threads:
            thread.join()

        print(f'Converted mp3 files in {time.time() - start}s')

        start = time.time()
        for filestring in uuid_map.keys():
            audio_data[filestring] = open(f"/{uuid_map[filestring]}.wav",
                                          "rb").read()
            os.remove(f"/{uuid_map[filestring]}.wav")
            if 'mp3' in filestring:
                os.remove(f"/{uuid_map[filestring]}.mp3")
        print(f'Deleted files in {time.time() - start}s')

        # Getting the predictions
        res = {}
        threads = []
        for filestring in audio_data.keys():
            threads.append(
                threading.Thread(target=run_model,
                                 args=(self.model_wrapper.predict, filestring,
                                       audio_data[filestring], res)))
        for thread in threads:
            thread.start()
        for thread in threads:
            thread.join()

        # Aligning the predictions to the required API format
        result['embedding'] = res
        result['status'] = 'ok'

        print(f'Completed processing in {time.time() - true_start}s')
        return result
コード例 #29
0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#

from core.model import ModelWrapper

from maxfw.core import MAX_API, PredictAPI, MetadataAPI
from flask_restplus import fields
from flask import request


model_wrapper = ModelWrapper()

# === Labels API

model_label = MAX_API.model('ModelLabel', {
    'id': fields.String(required=True, description='Label identifier'),
    'name': fields.String(required=True, description='Entity label'),
    'description': fields.String(required=False, description='Meaning of entity label')
})

labels_response = MAX_API.model('LabelsResponse', {
    'count': fields.Integer(required=True, description='Number of labels returned'),
    'labels': fields.List(fields.Nested(model_label), description='Entity labels that can be predicted by the model')
})

# Reference: http://gmb.let.rug.nl/manual.php
コード例 #30
0
ファイル: predict.py プロジェクト: autoih/runtime_ner
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys

sys.path.append('/home/ihong/evalmaxner')
from core.model import ModelWrapper

from maxfw.core import MAX_API, PredictAPI, MetadataAPI
from flask_restplus import fields
from flask import request
import json
import csv
import re

model_wrapper = ModelWrapper()

# === Labels API

model_label = MAX_API.model(
    'ModelLabel', {
        'id':
        fields.String(required=True, description='Label identifier'),
        'name':
        fields.String(required=True, description='Entity label'),
        'description':
        fields.String(required=False, description='Meaning of entity label')
    })

labels_response = MAX_API.model(
    'LabelsResponse', {