Esempio n. 1
0
def create_app():
    app = Flask(__name__)

    mlq = MLQ('cloth_recommendation', 'localhost', 6379, 0)
    mlq.create_reaper(call_how_often=30, job_timeout=100, max_retries=5)
    CALLBACK_URL = 'http://*****:*****@app.route('/')
    def hello_world():
        return render_template('home.html')

    @app.route('/home')
    def second_version():
        return render_template('home.html')

    @app.route('/getPredictions', methods=['POST'])
    def upload():
        print("GOT REQUEST!")
        gender = request.form['gender']
        occasion = request.form['occasion']
        use_files = request.form['use_files']
        fileList = request.form.getlist('files')
        job_id = mlq.post([occasion, gender, use_files, fileList],
                          CALLBACK_URL)
        return jsonify({
            'msg': 'Processing. Check back soon.',
            'job_id': job_id
        })

    @app.route('/status/<job_id>', methods=['GET'])
    def get_progress(job_id):
        #print(job_id)
        return jsonify({'msg': mlq.get_progress(job_id)})

    @app.route('/result/<job_id>', methods=['GET'])
    def get_result(job_id):
        job = mlq.get_job(job_id)
        return jsonify({'short_result': job['short_result']})

    @app.route('/callback', methods=['GET'])
    def train_model():
        success = request.args.get('success', None)
        job_id = request.args.get('job_id', None)
        short_result = request.args.get('short_result', None)
        print(
            "We received a callback! Job ID {} returned successful={} with short_result {}"
            .format(job_id, success, short_result))
        return 'ok'

    return app
Esempio n. 2
0
import asyncio
import time

from mlq.queue import MLQ

mlq = MLQ('example_app', 'localhost', 6379, 0)


def listener_func(number_dict, *args):
    print(number_dict['number'])
    time.sleep(10)
    return number_dict['number']**2


async def main():
    print("Running, waiting for messages.")
    mlq.create_listener(listener_func)


if __name__ == '__main__':
    asyncio.run(main())
Esempio n. 3
0
# Parts of this (c) Google Inc under Apache 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# The other bits by Tom Grek, public domain, no warranty.

import asyncio
import io
import json

from mlq.queue import MLQ

import numpy as np
import tensorflow as tf
import tensorflowjs as tfjs
from tensorflow.keras.utils import get_file

mlq = MLQ('deploy_demo', 'localhost', 6379, 0)

model = tfjs.converters.load_keras_model('./model.json')
# Some bug in Keras necessitates the next line
model._make_predict_function()

path = get_file('nietzsche.txt', origin='https://s3.amazonaws.com/text-datasets/nietzsche.txt')
with io.open(path, encoding='utf-8') as f:
    text = f.read().lower()
print('corpus length:', len(text))
chars = sorted(list(set(text)))
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))

def sample(preds, temperature=1.0):
Esempio n. 4
0
import json
import base64
from flask import Flask, request, render_template
from mlq.queue import MLQ
from werkzeug.utils import secure_filename

import worker

# Initialize the Flask application
app = Flask(__name__)
app.config['SECRET_KEY'] = 'TytNr1_MxgNHlKjdW7GZ8w'
app.config['IN_IMAGES_PATH'] = 'in_images'
app.config['OUT_IMAGES_PATH'] = 'out_images'

# Create MLQ: namespace, redis host, redis port, redis db
mlq = MLQ('prediction_app', 'redis', 6379, 0)
'''
METHODS
'''
ALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])


def allowed_file(filename):
    return '.' in filename and filename.rsplit(
        '.', 1)[1].lower() in ALLOWED_EXTENSIONS


'''
ENDPOINTS
'''
Esempio n. 5
0
from hrnet_pose.config import cfg, update_config
from hrnet_pose import models
import pkg_resources
from argparse import Namespace

from pipeline import InferencePipeline
from options.infer_options import InferOptions

from PIL import Image
import base64
import io
from tempfile import NamedTemporaryFile

import os

mlq = MLQ('pose_transfer', 'localhost', 6379, 0)

# Create the Inference Pipeline instance
pip_opts = InferOptions().parse(['--name', 'fashion_PInet_cycle'])
INFERENCE_PIPELINE = InferencePipeline.from_opts(pip_opts)

# Create the Inference Pipeline instance
#pip_opts = InferOptions().parse(['--name', 'fashion_PInet_video'])
#INFERENCE_PIPELINE_VIDEO = InferencePipeline.from_opts(pip_opts)

def inference(input_dict, *args):
    """
    Function for the actual inference
    """
    if input_dict['is_video']:
        source_image = input_dict['source_image']
Esempio n. 6
0
"""
Minimalistic app analyzing the sentiment using VADER (pre-trained from NLTK)
"""
from flask import Flask, request, jsonify
from mlq.queue import MLQ

app = Flask(__name__)

# redis queue - assumes redis already running on port 6379
mlq = MLQ('anakin', 'localhost', 6379, 0)


@app.route('/api/check_progress/<job_id>', methods=['GET'])
def check_progress(job_id):
    progress = mlq.get_progress(job_id)
    return progress


@app.route('/api/result/<job_id>', methods=['GET'])
def get_result(job_id):
    job = mlq.get_job(job_id)
    return jsonify(job['result']) or '[not available yet]'


@app.route('/api/submit', methods=['POST'])
def submit_job():
    job_id = mlq.post(request.get_json())
    return job_id
Esempio n. 7
0
          [167, 115], [163, 106], [149, 60], [173, 60], [183, 106], [223, 71]],
         [[205, 38], [205, 40], [203, 38], [197, 56], [193, 42], [177, 62],
          [177, 53], [199, 33], [195, 25], [205, 47], [225, 14], [137, 99],
          [139, 80], [159, 152], [157, 152], [205, 144], [205, 144]],
         [[33, 84], [33, 84], [29, 84], [33, 66], [31, 86], [59, 53],
          [59, 102], [101, 49], [101, 108], [133, 53], [131, 108], [133, 66],
          [133, 91], [187, 67], [185, 91], [241, 66], [235, 95]]]

# TODO: send real videos
VIDEOS = os.listdir('webapp/static/videos/out/')

app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER

# create an instance for the ml queue for handling the inference at the backend
mlq = MLQ(q_name='pose_transfer',
          redis_host='localhost',
          redis_port=6379,
          redis_db=0)


# Routes for MLQ (GPU server)
@app.route('/do_transfer', methods=['POST'])
def do_pose_transfer():
    # request the backend to transfer the incoming image
    # but for now this is just a dummy for testing mlq
    assert request.json['number']
    job_id = mlq.post(request.json, CALLBACK_URL)
    return jsonify({'msg': 'Processing, check back soon.', 'job_id': job_id})


@app.route('/api/status/<job_id>', methods=['GET'])
def get_progress(job_id):
Esempio n. 8
0
from pipeline import Pipeline
import os
import torch
from cat_attr import CatAttrPredictor, FashionModel
#from ./cat_attr import FashionModel

from mlq.queue import MLQ

from PIL import Image
from io import BytesIO
import base64

# Change to 'cuda' for GPU and 'cpu' for CPU
device = torch.device('cuda')

mlq = MLQ('cloth_recommendation', 'localhost', 6379, 0)

rec = Pipeline(device)


def runPredictionAndGiveResult(arguments):
    gender = arguments[1]
    occasion = arguments[0]
    use_files = arguments[2]
    print(use_files)
    finalList = []
    imageList = []
    if (use_files != "False" and use_files != "false" and use_files != False):
        fileList = arguments[3]
        f = fileList[0].split('data:image')
        f = f[1::]
Esempio n. 9
0
async def main(args):
    mlq = MLQ(args.namespace, args.redis_host, int(args.redis_port), 0)
    command = args.cmd
    if command == 'clear_all':
        print('Clearing everything in namespace {}'.format(args.namespace))
        for key in mlq._redis.scan_iter("{}*".format(args.namespace)):
            mlq._redis.delete(key)
    elif command == 'consumer':
        mlq.create_listener()
    elif command == 'test_consumer':
        print('Starting test consumer')
        mlq.create_listener(simple_consumer_func)
        mlq.create_listener(my_consumer_func)
    elif command == 'test_producer':
        print('Starting test producer')
        mlq.loop.run_in_executor(mlq.pool,
                                 functools.partial(my_producer_func, mlq))
    elif command == 'test_reaper':
        print('Starting test reaper')
        mlq.create_reaper()
    elif command == 'test_all':
        print('Starting all dummy services')
        mlq.create_listener(my_consumer_func)
        mlq.loop.run_in_executor(mlq.pool,
                                 functools.partial(my_producer_func, mlq))
        mlq.create_reaper()
    elif command == 'post':
        print('Posting message to queue.')
        mlq.post(args.msg, args.callback, args.functions)
    if args.reaper:
        mlq.create_reaper(args.reaper_interval, args.reaper_timeout,
                          args.reaper_retries)
    if args.server:
        thread = Thread(target=server,
                        args=[mlq, args.server_address, args.server_port])
        thread.start()
    return mlq
Esempio n. 10
0
def mlq():
    queue = MLQ('test_mlq_ns', 'localhost', 6379, 0)
    return queue
Esempio n. 11
0
def mlq():
    return MLQ('test_mlq_ns', 'localhost', 6379, 0)