def index(): log.info('Home api are visited') return '''
def classification(payload): project = AUTO_ML_PROJECT location = AUTO_ML_LOCATION model = payload.get('model_name', AUTO_ML_MODEL) try: limit = int(payload.get('limit', 3)) except ValueError as e: logging.error(e) raise BadRequest("Unable to process limit value '{}'".format( payload.get('limit'))) try: threshold = float(payload.get('threshold', 0.0)) except ValueError as e: logging.error(e) raise BadRequest("Unable to process threshold value '{}'".format( payload.get('threshold'))) try: weighted = payload.get('weighted', False) if not isinstance(weighted, bool): raise TypeError('Weighted is not a boolean') except TypeError as e: logging.error(e) raise BadRequest("Unable to process weighted value '{}'".format( payload.get('weighted'))) # Create an image object from the image data and validate it try: image = payload.get('image') imagedata = re.sub('^data:image/.+;base64,', '', image) imagebytes = BytesIO(base64.b64decode(imagedata)) image = Image.open(imagebytes) except base64.binascii.Error as e: logging.error(e) raise BadRequest('Unable to process image data: {}'.format(e)) except TypeError as e: logging.error(e) message = e.message.message if isinstance( e.message, base64.binascii.Error) else e.message raise BadRequest('Unable to process image data: {}'.format(e.message)) except IOError as e: logging.error(e) message = list(e.args)[0] raise BadRequest(message) if not isinstance(image, PngImageFile): raise BadRequest('Only png images are accepted') try: prediction_client = automl_v1beta1.PredictionServiceClient() name = prediction_client.model_path(project, location, AUTO_ML_MODEL) payload = {'image': {'image_bytes': imagebytes.getvalue()}} params = {'score_threshold': str(threshold)} logging.info('Calling AutoML {}'.format(name)) response = prediction_client.predict(name, payload, params) logging.info(response) except Exception as e: logging.error(e) raise InternalServerError('Error communicating with AutoML') predictions = [] for prediction in response.payload: glyph = prediction.display_name score = prediction.classifiction.score * weights.get( glyph, 1) if weighted else prediction.classification.score # Score might have been reduced due to weighting, so need to # check it again here if glyph != '--other--' and score >= threshold: predictions.append({'glyph': glyph, 'score': score}) predictions = sorted(predictions, key=lambda p: p.get('score'), reverse=True) if limit < len(predictions): predictions = predictions[0:limit] return jsonify(code=200, success=True, result=predictions)
def classification(payload): try: limit = int(payload.get('limit', 3)) except ValueError as e: logging.error(e) raise BadRequest("Unable to process limit value '{}'".format( payload.get('limit'))) try: threshold = float(payload.get('threshold', 0.0)) except ValueError as e: logging.error(e) raise BadRequest("Unable to process threshold value '{}'".format( payload.get('threshold'))) try: weighted = payload.get('weighted', False) if not isinstance(weighted, bool): raise TypeError('Weighted is not a boolean') except TypeError as e: logging.error(e) raise BadRequest("Unable to process weighted value '{}'".format( payload.get('weighted'))) # Create an image object from the image data and validate it try: print('unpacking image info') image = payload.get('image') imagedata = re.sub('^data:image/.+;base64,', '', image) imagebytes = BytesIO(base64.b64decode(imagedata)) image = Image.open(imagebytes) except base64.binascii.Error as e: logging.error(e) raise BadRequest('Unable to process image data: {}'.format(e)) except TypeError as e: logging.error(e) message = e.message.message if isinstance( e.message, base64.binascii.Error) else e.message raise BadRequest('Unable to process image data: {}'.format(e.message)) except IOError as e: logging.error(e) message = list(e.args)[0] raise BadRequest(message) if not isinstance(image, PngImageFile): raise BadRequest('Only png images are accepted') try: client_options = { 'api_endpoint': f'{AI_REGION}-aiplatform.googleapis.com' } prediction_client = aiplatform.gapic.PredictionServiceClient( client_options=client_options) encoded_content = base64.b64encode( imagebytes.getvalue()).decode('utf-8') instance = predict.instance.ImageClassificationPredictionInstance( content=encoded_content).to_value() instances = [instance] parameters = predict.params.ImageClassificationPredictionParams( confidence_threshold=0.5, max_predictions=5, ).to_value() endpoint = prediction_client.endpoint_path(project=AI_PROJECT, location=AI_REGION, endpoint=AI_ENDPOINT_ID) response = prediction_client.predict(endpoint=endpoint, instances=instances, parameters=parameters) logging.info(response) except Exception as e: print('Error happened in inference') logging.error(e) raise InternalServerError('Error communicating with AutoML') predictions = [] for prediction in response.predictions: glyph = prediction['displayNames'][0] confidence = prediction['confidences'][0] score = confidence * weights.get(glyph, 1) if weighted else confidence # Score might have been reduced due to weighting, so need to # check it again here if glyph != '--other--' and score >= threshold: predictions.append({'glyph': glyph, 'score': score}) predictions = sorted(predictions, key=lambda p: p.get('score'), reverse=True) if limit < len(predictions): predictions = predictions[0:limit] return jsonify(code=200, success=True, result=predictions)
def index(): log.info('Auth are visited') return '''
def index(): log.info('Pics are visited') #user = mongo.db.users.find_one_or_404({"_id": username}) return '''
def index(): log.info('Contact are visited') return '''