def predict(img_src): brand = '' brand_name = [ 'McLaren', 'Lamborghini', 'Bugatti', 'Ferrari', 'Rolls Royce' ] global model # if model is unloaded, initialize the model if model is None: model = init() # get appropriate brand name from list defined pred = model.predict(img_src) pred_ind = np.argmax(pred) brand = brand_name[pred_ind] return brand
#for regular expressions, saves time dealing with string data import re #system level operations (like loading files) import sys #for reading operating system data import os #tell our app where our saved model is sys.path.append(os.path.abspath("./model")) import load #initalize our flask app app = Flask(__name__) #global vars for easy reusability global model, graph #initialize these variables model, graph = load.init() #decoding an image from base64 into raw representation def convertImage(imgData1): imgstr = re.search(r'base64,(.*)', imgData1).group(1) #print(imgstr) with open('output.png', 'wb') as output: output.write(imgstr.decode('base64')) @app.route('/') def index(): #initModel() #render out pre-built HTML file right on the index page return render_template("index.html")
app = Flask(__name__) # get access token and channel secret LINE_CHANNEL_ACCESS_TOKEN = os.environ['LINE_CHANNEL_ACCESS_TOKEN'] LINE_CHANNEL_SECRET = os.environ['LINE_CHANNEL_SECRET'] # create line bot api and webhook handler line_bot_api = LineBotApi(LINE_CHANNEL_ACCESS_TOKEN) web_handler = WebhookHandler(LINE_CHANNEL_SECRET) header = { "Content-Type": "application/json", "Authorization": "Bearer " + LINE_CHANNEL_ACCESS_TOKEN } model = init() # simple check for server failures @app.route('/') def index(): return 'hello world' @app.route('/callback', methods=['POST']) def callback(): # get x-line-signature signature = request.headers['X-Line-Signature'] # get request body as text body = request.get_data(as_text=True) app.logger.info('Request body: ' + body)
import load public = load.init()
# -*- coding: utf-8 -*- """ Created on Thu Sep 5 21:46:53 2019 @author: tanma """ import main_model as mm import pdf_to_text as convert import load as l import json # Can be changed to alternate if wanted to nlp = l.init(r'Final_model_alt') def jsonify(pdf_file_path, nlp=nlp): """ Build an indented .json doc from a legal .pdf document using PDFBox API and a spacy NER model using pre-existing labels # Arguments pdf_file_path(string):File path to the PDF you want to convert to entities # Returns results.json:A beautified .json documents containing all the entities found in the document """ text = convert.convert_pdf_to_text_pdfbox_api(pdf_file_path) text = text[:1000000]
from flask import Flask, request, jsonify, abort from flask_cors import CORS, cross_origin import kenlm from pre_process import normalize_string from utils import tokenize_sinhala_text app = Flask(__name__) cors = CORS(app) app.config['CORS_HEADERS'] = 'Access-Control-Allow-Origin' global corrector, model, params # Building and loading the keras model, params file contains the encoding/decoding dictionaries. corrector, model, params = init() ngramModel = kenlm.LanguageModel('./lm/sinhala_lm.binary') regexp = re.compile(r'[^\u0D80-\u0DFF.!?,\s\u200d]') def valid_sinhala_sentence(sentence): if not sentence: return False elif sentence == '': return False elif regexp.search(sentence): return False else: return True
from flask import Flask, request, jsonify from flask_cors import CORS from scipy.misc import imsave, imread, imresize import numpy as np import keras.models import sys, os sys.path.append(os.path.abspath('./model')) import load as load_model global model, graph model, graph = load_model.init() app = Flask(__name__, static_url_path='', static_folder='deep-shrooms-frontend/build') cors = CORS(app, resources={r"*": {"origins": "*"}}) def generate_input_image(img_array): img_resized = imresize(img_array, (480, 480)) imsave('resized.jpg', img_resized) X = img_resized.reshape(1, 480, 480, 3) X = X / 255.0 return X @app.route('/', methods=['GET']) def frontpage(): return app.send_static_file('index.html')
import pickle import load as l from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = pickle.load(open("tokenizer_instance.pickle","rb")) model = l.init() def make_sentence(sentence): return [sentence] def preprocessing(sentence, max_features = 20000, maxlen = 50, tokenizer = tokenizer): list_tokenized_train = tokenizer.texts_to_sequences(sentence) X_t = pad_sequences(list_tokenized_train, maxlen = maxlen) return X_t def prediction(x): x = preprocessing(make_sentence(x)) list_classes = ["Toxic", "Severely Toxic", "Obscene", "Threat", "Insult", "Identity Hate"] x = str(dict(zip(list_classes, 100*model.predict([x]).flatten()))) return x
# -*- coding: utf-8 -*- """ Created on Thu Sep 12 23:40:27 2019 @author: tanma """ import pickle import load as l from tensorflow.keras.preprocessing.sequence import pad_sequences tokenizer = pickle.load(open("tokenizer_instance.pickle", "rb")) model, _ = l.init() def make_sentence(sentence): return [sentence] def preprocessing(sentence, max_features=20000, maxlen=50, tokenizer=tokenizer): list_tokenized_train = tokenizer.texts_to_sequences(sentence) X_t = pad_sequences(list_tokenized_train, maxlen=maxlen) return X_t def prediction(x):
import base64 import os from flask import Flask, render_template, request from keras.preprocessing.image import load_img from load import init import numpy as np import tensorflow as tf # initalize our flask app app = Flask(__name__) global weight_loaded_model, model, inputs weight_loaded_model, model, inputs = init() INPUT_HEIGHT = 416 INPUT_WIDTH = 416 INPUT_CHANNELS = 3 # decoding an image from base64 into raw representation def convertImage(imgData): imgData = imgData.decode('ascii').split(',')[1].encode('ascii') with open('output.png', 'wb') as output: output.write(base64.decodebytes(imgData)) @app.route('/') def index(): # render out pre-built HTML file right on the index page return render_template("index.html")
# for convert base64 string to image import base64 #system level operations (like loading files) import sys #for reading operating system data import os #tell our app where our saved model is sys.path.append(os.path.abspath("./model")) from load import init #initalize our flask app app = Flask(__name__) #global vars for easy reusability global model, graph model, graph = init() #decoding an image from base64 into raw representation def convertImage(imgData1): imgData1 = imgData1.decode("utf-8") imgstr = re.search(r'base64,(.*)', imgData1).group(1) #print(imgstr) imgstr_64 = base64.b64decode(imgstr) with open('output/output.png', 'wb') as output: output.write(imgstr_64) @app.route('/') def index(): #initModel()
from flask import Flask, render_template, url_for, request, redirect, session from datetime import datetime import string import time import datetime from load import init, synthesize app = Flask(__name__) print(datetime.datetime.now(), " model loading") # ttm_model, ssrn_model = init() g = init() print(datetime.datetime.now(), " model loaded") @app.route('/', methods=['POST', 'GET']) def index(): text_input = '' audio_filename = '' text_input_list = [] return render_template('index.html', text_input=text_input, text_input_list=text_input_list, audio_filename=audio_filename) @app.route('/submit', methods=['POST', 'GET']) def submit(): print('new submit request') text_input = ''