Exemple #1
0
def run():
    predictor = Predictor()
    predictor.load_model()

    cv2.namedWindow("Webcam")
    capture = cv2.VideoCapture(0)

    if not capture.isOpened():
        return

    while True:
        rval, frame = capture.read()
        if not rval:
            break

        image = frame
        gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
        for face_info in predictor.predict(gray_image):
            draw_face_info(image, face_info)
        draw_landmarks(image, get_all_landmarks(gray_image), draw_dots=True, visualize=False)

        cv2.imshow("Webcam", image)
        key = cv2.waitKey(1)
        if key == 27 or key == ord('q'): # exit on ESC or Q
            break

    cv2.destroyWindow("Webcam")
    capture.release()
class Evaluator():
    "Evaluate a specific model over a validation set, saving metrics with the model."

    def __init__(self):
        self.predictor = Predictor()

    def _load_eval_set(self, eval_file):
        self.eval_df = pd.read_pickle(eval_file)

    def evaluate(self, version):
        """- Load validation data frame
        - Maps previously unseed countries to OTHER
        - Preprocesses data frame for additional features
        - Run prediction
        - Calculate confusion matrix and classification report with precision/recall/f-score
          for each country
        - Save metrics to separate file in model directory for future reference (could also save as
          a json for easy automatic loading and comparison)
        """
        self.predictor.load_model(version)
        self._load_eval_set(f"{DATA_DIR}/validation.pkl")

        # Deal with issue of countries unseen in training data
        self.eval_df['COUNTRY.OF.ORIGIN'].fillna('OTHER', inplace=True)
        seen_countries = set(self.predictor.label_encoder.classes_)
        self.eval_df['COUNTRY.OF.ORIGIN.MAPPED'] = self.eval_df[
            'COUNTRY.OF.ORIGIN'].apply(lambda x: 'OTHER'
                                       if x not in seen_countries else x)
        gold_labels = self.predictor.label_encoder.transform(
            self.eval_df['COUNTRY.OF.ORIGIN.MAPPED'])

        predictions = self.predictor.predict(self.eval_df)

        conf_mat = confusion_matrix(gold_labels, predictions)
        logger.info("Confusion matrix", confusion_matrix=conf_mat)

        report = metrics.classification_report(
            gold_labels,
            predictions,
            target_names=self.predictor.label_encoder.classes_)
        logger.info("Classification report", report=report)

        metrics_path = os.path.join(MODEL_DIR, version, METRICS_FILE)
        # Having trouble getting full matrix to print, will do later
        with open(metrics_path, "w") as metrics_fd:
            metrics_fd.write(f"* Confusion matrix:\n{conf_mat}\n")
            metrics_fd.write(f"* Classification report:\n{report}")
Exemple #3
0
HOST_ADDRESS = '0.0.0.0'
HOST_PORT = 8000
TIMEOUT = 60

app = FastAPI(
    title="Shipment Country of Origin Predictor",
    version="1.0.0",
    docs_url="/",
    openapi_url="/api/v1/openapi.json"
)

with open("model_version.txt") as version_fd:
    model_version = version_fd.read()

predictor = Predictor()
predictor.load_model(model_version)

class ShipmentModel(BaseModel):
    arrival_date: str = Body(..., alias="arrivalDate", description="Arrival date for shipment", example="09/20/2012")
    weight_kg: float = Body(None, alias="weightKg", description="Weight of shipment in kilograms", example=7660)
    us_port: Optional[str] = Body(None, alias="usPort", description="Arrival port for shipment", example="Long Beach, California")
    product_details: Optional[str] = Body(None, alias="productDetails", description="Details of shipment product",
                                          example="LA DEE DA ENDCAPLA DEE DA ENDCAPP.O.NO.:0253287160ITEM NO.550808586QTY:6300PCS1CTN63PCSPLACE OF DELIVERY:SOUTHGATE-FLOWGLN: 0078742000008DEPARTMENT NO.: 00007HTS:9503000073,9503000073PO TYPE:0043")


@app.post("/shipments")
def predict_country(shipment: ShipmentModel):

    features = {
        "ARRIVAL.DATE": [shipment.arrival_date],
        "PRODUCT.DETAILS": [shipment.product_details],
Exemple #4
0
import os
from io import BytesIO
from flask import Flask, request, render_template
import base64, json
from PIL import Image
from datetime import datetime
from predict import Predictor
from rpi_define import *

app = Flask(__name__)
pred = Predictor()
pred.load_model()
received_path = BUTING_PATH + r"\received"


@app.route("/imageSubmit", methods=["POST"])
def imageSubmit():
    img = BytesIO(base64.urlsafe_b64decode(request.form['image']))
    img = Image.open(img)
    name = received_path + r"\{}-{}.png".format(today(), "".join(
        now().split(":")))
    if not os.path.exists(received_path):
        os.makedirs(received_path)
    img.save(name)
    return pred.predict_img(name)


@app.route('/', methods=['GET', 'POST'])
def index():
    return render_template('index.html')
Exemple #5
0
class Trainer:
    def __init__(self, bert_config_file, is_training, num_labels, train_file,
                 dev_file, vocab_file, output_dir, max_seq_length,
                 learning_rate, batch_size, epochs, warmup_proportion,
                 virtual_batch_size_ratio, evaluate_every, init_ckpt):
        os.system(f"mkdir {output_dir}")
        self._data_train = Dataset(train_file, num_labels, vocab_file, True,
                                   output_dir, True, max_seq_length)
        self._dev_data = Dataset(dev_file, num_labels, vocab_file, True,
                                 output_dir, False, max_seq_length)
        num_train_step = int(self._data_train.size / batch_size * epochs)
        num_warmup_step = int(num_train_step * warmup_proportion)

        self._model = Model(bert_config_file, max_seq_length, init_ckpt,
                            is_training, num_labels)

        self._train_op, self._global_step = optimization.create_optimizer(
            self._model.loss, learning_rate, num_train_step, num_warmup_step,
            False, virtual_batch_size_ratio)

        self.batch_size = batch_size
        self.epochs = epochs
        self.evaluate_every = evaluate_every
        self.output_dir = output_dir
        self._predictor = Predictor(bert_config_file, max_seq_length,
                                    num_labels)

    def evaluate(self, step):
        print(f"saving model[{step}] ...")
        self._saver.save(self._sess,
                         os.path.join(f"{self.output_dir}/model"),
                         global_step=step)
        self._predictor.load_model(f"{self.output_dir}/model-{step}")
        self._predictor.predict_dataset(self._dev_data)

    def train(self):
        self._sess = tf.Session()

        tvars = tf.trainable_variables()
        initialized_variable_names = self._model.model.initialized_variable_names
        print('*** loading variablrs ***')
        for var in tvars:
            if var.name in initialized_variable_names:
                print(
                    f"name = {var.name}, shape = {var.shape}, *INIT_FROM_CKPT*"
                )
            else:
                print(f"name = {var.name}, shape = {var.shape}")

        self._sess.run(tf.global_variables_initializer())
        self._saver = tf.train.Saver(tf.global_variables(), max_to_keep=1000)

        data_reader = self._data_train.get_data_reader(self.batch_size,
                                                       self.epochs)

        for batch_id, batch_data in data_reader.get_batch_data():
            [
                input_ids_b, input_mask_b, segment_ids_b, label_ids_b,
                is_real_example_b
            ] = batch_data
            time_start = time.time()
            train_loss, global_step_train, _ = self._sess.run(
                fetches=[self._model.loss, self._global_step, self._train_op],
                feed_dict={
                    self._model.model.input_ids_p: input_ids_b,
                    self._model.model.input_mask_p: input_mask_b,
                    self._model.model.segment_ids_p: segment_ids_b,
                    self._model.labels: label_ids_b
                })
            time_duration = time.time() - time_start
            print(f"batch: {batch_id}, global_step: {global_step_train}, "
                  f"loss: {train_loss:.4f}, time: {time_duration}")
            if global_step_train != 0 and global_step_train % self.evaluate_every == 0:
                self.evaluate(global_step_train)
Exemple #6
0
# project imports
from utils import create_image_from_blob
from predict import Predictor
from config import (FLASK_TEMPLATE_DIR, FLASK_STATIC_DIR, FLASK_PORT,
                    FLASK_DEBUG, VIDEO_DEFAULT_WIDTH, VIDEO_DEFAULT_HEIGHT,
                    VIDEO_DEFAULT_FPS, VIDEO_DEFAULT_QUALITY)

# Create app
app = Flask(__name__,
            template_folder=FLASK_TEMPLATE_DIR,
            static_folder=FLASK_STATIC_DIR)
socketio = SocketIO(app)

# Create predictor
predictor = Predictor(threadsafe=True)
predictor.load_model()


@app.route('/')
def index():
    width = request.args.get('width', VIDEO_DEFAULT_WIDTH)
    height = request.args.get('height', VIDEO_DEFAULT_HEIGHT)
    fps = request.args.get('fps', VIDEO_DEFAULT_FPS)
    quality = request.args.get('quality', VIDEO_DEFAULT_QUALITY)

    return render_template('index.html',
                           width=width,
                           height=height,
                           fps=fps,
                           quality=quality)