validation_error = validate_params(request) if validation_error: ret = {'error': validation_error} elif request.args['input_type'] == 'rev_id': ret = process_rev_id(request) else: ret = process_text(request) return jsonify(ret) @app.after_request def after_request(response): response.headers.add('Access-Control-Allow-Origin', '*') response.headers.add('Access-Control-Allow-Headers', 'Content-Type,Authorization') response.headers.add('Access-Control-Allow-Methods', 'GET,PUT,POST,DELETE') return response model_data = json.load(open("model_paths.json")) for k, v in model_data.items(): model = load_pipeline('./models', v['name']) v['model'] = model print(model.predict_proba(['f**k'])) if __name__ == "__main__": app.run(host='0.0.0.0')
parser.add_argument('--data_dir', default = '/tmp', help ='directory for saving training data') parser.add_argument('--model_dir', default = '/tmp', help ='directory for saving model' ) parser.add_argument('--task', default = 'attack', help = 'either attack or aggression') parser.add_argument('--model_type', default = 'linear', help = 'either linear or mlp') parser.add_argument('--ngram_type', default = 'char', help = 'either word or char') parser.add_argument('--label_type', default = 'oh' , help = 'either oh or ed') args = vars(parser.parse_args()) print("Downloading Data") #download_training_data(args['data_dir']) print("Parsing Data") X, y = parse_training_data(args['data_dir'], args['task']) print("Training Model") clf = train_model(X, y, args['model_type'], args['ngram_type'], args['label_type']) print(clf.predict_proba(['f**k'])) print("Saving Model") clf_name = "%s_%s_%s_%s" % (args['task'], args['model_type'], args['ngram_type'], args['label_type']) save_pipeline(clf, args['model_dir'], clf_name) print("Reloading Model") clf = load_pipeline(args['model_dir'], clf_name) print(clf.predict_proba(['f**k']))
default='attack', help='either attack, recipient_attack, aggression or toxicity') parser.add_argument('--model_type', default='linear', help='either linear or mlp') parser.add_argument('--ngram_type', default='char', help='either word or char') parser.add_argument('--label_type', default='oh', help='either oh or ed') args = vars(parser.parse_args()) print("Downloading Data") download_training_data(args['data_dir'], args['task']) print("Parsing Data") X, y = parse_training_data(args['data_dir'], args['task']) print("Training Model") clf = train_model(X, y, args['model_type'], args['ngram_type'], args['label_type']) print(clf.predict_proba(['f**k'])) print("Saving Model") clf_name = "%s_%s_%s_%s" % (args['task'], args['model_type'], args['ngram_type'], args['label_type']) save_pipeline(clf, args['model_dir'], clf_name) print("Reloading Model") clf = load_pipeline(args['model_dir'], clf_name) print(clf.predict_proba(['f**k']))