示例#1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-t", "--time", help="Time elapsed, in seconds, so far in the game", 
                        required=True, type=int)
    parser.add_argument("-s", "--spread", help="Spread, relative to visiting team", 
                        required=True, type=float)
    parser.add_argument("-f", "--favorite", help="Current point total for the favorite", 
                        required=True, type=int)
    parser.add_argument("-u", "--underdog", help="Current point total for the underdog", 
                        required=True, type=int)
    args = parser.parse_args()

    
    time_sec = args.time
    spread = args.spread
    favorite_points = args.favorite
    underdog_points = args.underdog

    assert time_sec >= 0, "Time must be a positive integer"
    assert favorite_points >= 0, "Favorite points must be a positive integer"
    assert favorite_points >= 0, "Underdog points must be a positive integer"

    model = inference.load_model(MODEL_PATH)
    scaler = inference.load_scaler(SCALER_PATH)

    pred = inference.run_inference(model, scaler, time_sec, spread, 
                                   favorite_points, underdog_points)

    print(pred)
示例#2
0
def main(image_path, checkpoint_path):
    # 加载图像和模型
    image = cv2.imread(image_path)
    model = load_model(checkpoint_path)
    client = net()

    flag = 0
    while True:
        if flag:
            word = input("input:\n      ")
            if not len(word):
                continue
            word2audio(client, word)
            frames = gen_fake(model, 'audio/audio.wav', image)
            # 播放语音和视频
            pygame.mixer.init()
            pygame.mixer.music.load('audio/audio.wav')  # 加载音乐
            pygame.mixer.music.play()  # 播放
            for im in frames:
                cv2.imshow('frame', im)
                cv2.waitKey(1)
                time.sleep(1 / fps)
                # cv2.waitKey(1000//fps)
            pygame.quit()

        while True:
            frame = image
            # 显示结果帧e
            cv2.imshow('frame', frame)
            if cv2.waitKey(1) == 27:  # ord('q') enter:13 esc:27
                flag = 1
                break
示例#3
0
 def __init__(self, recOps, people):
     self.running = False
     self.audio = pyaudio.PyAudio()
     self.stream = self.audio.open(format=recOps['Format'], 
                         channels=recOps['Channels'], 
                         rate=recOps['Rate'], 
                         input=True, 
                         frames_per_buffer=recOps['Chunk'])
     self.vad = webrtcvad.Vad()
     self.vad.set_mode(1)
     self.recording = False
     self.audioPercent = 100
     self.recOps = recOps
     self.check = False
     self.pastFrames = []
     self.lock = threading.Lock()
     self.people = people
     encoder.load_model("encoder/saved_models/pretrained.pt")
     synthesizer = Synthesizer("synthesizer/saved_models/logs-pretrained/".joinpath("taco_pretrained"), low_mem=args.low_mem)
     vocoder.load_model("vocoder/saved_models/pretrained/pretrained.pt")
示例#4
0
def inferencer(work_queue):
    running = True
    data = work_queue.get()
    if type(data) == bool:
        running = data
    elif type(data) == ClassifiedImageBundle:
        data.set_progress()
    while running:
        model = load_model(data.get_np_array().shape)
        prediction = model.predict(np.array([data.get_np_array() / 255]),
                                   batch_size=1)
        print(prediction[0])
        data.set_classification(prediction[0])
        work_queue.task_done()
        data = work_queue.get()
        if type(data) == bool:
            running = data
        elif type(data) == ClassifiedImageBundle:
            data.set_progress()
示例#5
0
@app.route('/')
def home_endpoint():
    return render_template('home.html')


@app.route('/predict', methods=['POST'])
def get_prediction():
    image = request.files['file']
    if image.filename != '':
        fn = os.path.join(app.config['UPLOAD_FOLDER'],
                          image.filename + str(datetime.now().time()))
        image.save(fn)

        image = load_image(fn)
        res, preprocessed_image = inference(image)

        preprocessed_image = Image.fromarray(np.uint8(preprocessed_image *
                                                      255)).convert('RGB')
        buffer = BytesIO()
        preprocessed_image.save(buffer, format="PNG")
        myimage = buffer.getvalue()

        return jsonify(message=res, image=str(base64.b64encode(myimage))[2:-1])


if __name__ == '__main__':
    model = None
    load_model('model')
    app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000)))
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
from tensorflow import keras
from tensorflow_core.python.keras.api._v2.keras import layers
from inference import load_model, inference
from PathfindingDll import load_PathfindingDLL
import MCTS

estimate = load_PathfindingDLL()
model = load_model()

model.fit(test_input, test_target)
model.save('path/to/location')
示例#7
0
from flask import Flask, flash, request, redirect, url_for, render_template, jsonify
from werkzeug.utils import secure_filename

from inference import load_model, get_result

ALLOWED_EXTENSIONS = set(['png', 'jpg', 'jpeg'])

UPLOAD_FOLDER = 'static/uploads/'

app = Flask(__name__)
app.secret_key = "secret key"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER

filename = ''

netMain, metaMain = load_model()


def allowed_file(filename):
    return '.' in filename and filename.rsplit(
        '.', 1)[1].lower() in ALLOWED_EXTENSIONS


@app.route('/')
def upload_form():
    return render_template('upload.html')


@app.route('/', methods=['POST'])
def upload_image():
    if 'file' not in request.files:
示例#8
0
# Libs
import torch
import numpy as np
from sklearn.linear_model import LogisticRegression

# Own modules
from infer_sentence import make_ds
from dudu_utils import preprocess, process_block
from inference import load_model, infer_dataset

# Settings
# model_path = r'./models/cnw-2020-04-29_20-02-45.pth.tar'
model_path = r'./models/sswer-2020-04-29_15-47-38.pth.tar'
checkpoint = torch.load(model_path, map_location=lambda storage, loc: storage)
lut = checkpoint['lut']
model = load_model(model_path, len(lut))
window_size = 3
step_size = 3
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
BUFFER_DIR = './buffer'


def train_clf(network, dev):
    dataset, vocab_size = make_ds()
    ftrs, lbls = infer_dataset(network, dataset, dev)
    clf = LogisticRegression(max_iter=1000)
    clf.fit(ftrs, lbls)
    return clf


def pred_sentence():
示例#9
0
def load_model_cached(model_file):
    model = load_model(model_file)
    model.eval()
    return model
示例#10
0
from MCTS import Node
from PathfindingDll import load_PathfindingDLL
from GameControlerDLL import load_GameControlerDLL
from Game import get_random_start
import numpy as np
from inference import get_move_to_index, load_model
import tensorflow as tf

model = load_model('test')
inputs = np.ones(shape=(1, 9, 9, 4), dtype=np.float32)
policy = model(inputs, training=True)
示例#11
0
def save_pt_model(path: str) -> None:
    """
    path: path to which .pt file will be exported 
    """
    model = load_model()
    torch.save(model.state_dict(), path) 
示例#12
0
import cv2
from torchvision import models
import torchvision.transforms as transforms
from PIL import Image
from flask import Flask, jsonify, request
import base64
import numpy as np

import sys
sys.path.append('../ML')
from inference import prepare_image, load_model, inference
class_names = ["KIA Rio", "SKODA OCTAVIA", "Hyundai SOLARIS", "Volkswagen Polo", "Volkswagen Tiguan"]

mpath = '../eval/model.pth'
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = load_model(mpath, device)

app = Flask(__name__)

@app.route('/predict', methods=['POST'])
def predict():
    if request.method == 'POST':
        bm = request.form['content']
        bb = bm.encode('ascii')
        m = base64.b64decode(bb)
        nparr = np.fromstring(m, np.uint8)
        image = cv2.imdecode(nparr, cv2.IMREAD_COLOR) / 255.
        data = prepare_image(image)['image'].to(device).type(torch.float)
        res = inference(model, data)[0]
        res_dict = {k: v for k, v in zip(class_names, res)}
        return jsonify({'default': res_dict})
示例#13
0
app.logger.setLevel(logging.DEBUG)
logHandler = handlers.RotatingFileHandler('logs/app.log',
                                          maxBytes=1000000,
                                          backupCount=5)
formatter = logging.Formatter(
    '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logHandler.setFormatter(formatter)
app.logger.addHandler(logHandler)

MODEL_PATH = 'model/model.h5'
SCALER_PATH = 'model/scaler.pkl'

# load the model the model and scaler globally on startup
global graph  # Allow the graph to be accessed within the app context
graph = tf.get_default_graph()
model = inference.load_model(MODEL_PATH)
scaler = inference.load_scaler(SCALER_PATH)


@app.route("/games/<year>/<phase>/<week>")
def get_game_data_by_week(year, phase, week):
    app.logger.debug("Starting request for game data for {}:{}{}".format(
        year, phase, week))
    if phase.upper() not in {"PRE", "REG", "POST"}:
        raise BadRequest('Phase must be one of PRE, REG, POST')
    try:
        game_data = nfl_api.get_game_data(year, phase, week)
    except Exception as e:
        app.logger.error(e)
        raise HTTPException("Error collecting game data")
示例#14
0
        'home.html')


@app.route('/predict', methods=['POST'])
def get_prediction():
    image = request.files['file']
    if image.filename != '':
        fn = os.path.join(
            app.config['UPLOAD_FOLDER'], image.filename +
            str(datetime.now().time())
        )
        image.save(fn)

        image = load_image(fn)
        os.remove(fn)
        res_image, res = run_on_image(image)
        res_image = Image.fromarray(np.uint8(res_image)).convert('RGB')
        image_height_over_width = res_image.size[1] / res_image.size[0]
        #res_image = res_image.resize((260, int(image_height_over_width * 260)))
        buffer = BytesIO()
        res_image.save(buffer, format="PNG")
        return_image = buffer.getvalue()

        return jsonify(message=res, image=str(base64.b64encode(return_image))[2:-1])


if __name__ == '__main__':
    model = None
    load_model('mask_detector/models/model360.pth')
    app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000)))