Пример #1
0
def instance_predict():
    ''' 
    Receives a JSON with a single instance as payload. Makes prediction with model and 
    returns its classification. If model_name is set, it uses the model given by the name 
    (if it exists); otherwise, it uses the default_model.
    
    INPUT:
     - input: { model_name: string, 
                data: {"var_1": value_1, ..., "var_n" : value_n} }
    OUTPUT:
    - output: message
              status (successful or not)
    '''

    input_asDict = request.get_json()

    logger.info("Received [Predict Instance Request] (0/3) ... ")

    # Loads desired Model
    try:
        # Check if Model Name and Model Type are present
        if ("model_name" in input_asDict.keys()
                and input_asDict["model_name"]):
            model_name = input_asDict["model_name"]
        else:
            model_name = default_model_name
        filepath = "./models/" + model_name + ".joblib.dat"
        logger.info(filepath)
        if file_exists(filepath):
            model = load_model(filepath)
        else:
            message = "Model " + model_name + " does not exist."
            logger.error(message)
            return batch_response_http(message, [], 400)
    except:
        message = "Internal Server Error"
        logger.info(message)
        return custom_response_http(message, 500)
    logger.info("[Predict Instance Request] Model [%s] loaded (1/3) ... ",
                model_name)

    # Loads instance to be classified from json
    try:
        if ("data" in input_asDict.keys()):
            data, ok = load_json_data(input_asDict["data"])
            if ok == False:
                message = "Invalid data. Couldn't parse json."
                logger.error(message)
                return custom_response_http(message, 400)

            ok = treat_and_validate_instance_data(data)
            if ok == False:
                message = "Invalid data. Wrong format."
                logger.error(message)
                return custom_response_http(message, 400)
        else:
            message = "Instance does not exist."
            logger.error(message)
            return custom_response_http(message, 400)
    except:
        message = "Internal Server Error"
        logger.error(message)
        return custom_response_http(message, 500)
    logger.info("[Predict Instance Request] Data loaded (2/3) ... ")

    # Runs Prediction
    try:
        result = model_handler.predict_instance(model, data)
    except:
        message = "Could not make Prediction.",
        logger.error(message)
        return custom_response_http(message, 500)
    logger.info("[Predict Instance Request] Prediction Made (3/3) .")

    return custom_response_http(
        "Classification Success! Target: " + str(result), 200)
Пример #2
0
def batch_predict():
    ''' 
    Receives CSV with batch of instances as payload. Makes predictions with the 
    model and returns its classification. If model_name is set, it uses the model
    given by the name (if it exists); otherwise, it uses the default_model.

    INPUT:
    - input: { model_name: string, 
               data: csv
    OUTPUT:
    - output: message
              array with target (labeled 0 or 1)
              status (successful or not)
    '''

    input_asDict = request.get_json()

    logger.info("Received [Predict Instance Request] (0/3) ... ")

    # Loads desired Model
    try:
        # Check if Model Name and Model Type are present
        if ("model_name" in input_asDict.keys()
                and input_asDict["model_name"]):
            model_name = input_asDict["model_name"]
        else:
            model_name = default_model_name
        filepath = "./models/" + model_name + ".joblib.dat"
        if file_exists(filepath):
            model = load_model(filepath)
        else:
            message = "Model " + model_name + " does not exist."
            logger.error(message)
            return batch_response_http(message, [], 400)
    except:
        message = "Internal Server Error"
        logger.info(message)
        return batch_response_http(message, [], 500)
    logger.info("[Predict Batch Request] Model [%s] loaded (1/3) ... ",
                model_name)

    # Decodes Prediction Data
    try:
        if ("data" in input_asDict.keys()):
            data_as64 = input_asDict["data"]
            dataraw = base64.b64decode(data_as64)
            data_filepath = "./tmp/tmpcsv.csv"
            with open(data_filepath, 'wb') as fh:
                fh.write(dataraw)
            data = pd.read_csv(data_filepath)
            data, ok = treat_and_validate_batch_data(data)
            if not ok:
                remove(data_filepath)
                message = "Could not treat/validate data."
                logger.error(message)
                return batch_response_http(message, [], 400)
        else:
            message = "Data does not exist in request."
            logger.error(message)
            return batch_response_http(message, [], 400)
    except:
        remove(data_filepath)
        message = "Internal Server Error"
        logger.error(message)
        return batch_response_http(message, [], 500)
    logger.info("[Predict Batch Request] Data loaded (2/3) ... ")

    # Predicts Data
    try:
        prediction = model.predict(data).tolist()
    except:
        remove(data_filepath)
        message = "Internal Server Error"
        logger.error(message)
        return custom_response_http(message, 500)
    logger.info("[Predict Batch Request] Prediction Made (3/3) .")

    return batch_response_http("Classification Success!", prediction, 200)
def train_model():
    ''' 
    Receives a base64 encoded csv file with a batch of instances as payload. 
    If a file with the given name does not exist, a new model is created with the 
    given model name; otherwise, it returns an error.
    
    INPUT:
    - input: { model_name : string,
               data: base64 encoded csv file }

    OUTPUT:
    - output: message
            status (successful or not)
    '''

    input_asDict = request.get_json()

    logger.info("Received [Train Model Request] (0/3) ... ")

    if ("model_name" in input_asDict.keys() and input_asDict["model_name"]):
        model_name = input_asDict["model_name"]
    else:
        message = "Model Name is required."
        logger.error(message)
        return custom_response_http(message, 400)
    logger.info("[Train Model Request] Model Name Set: [" + model_name +
                "] (1/3) ... ")

    # Decodes Training Data
    try:
        if ("data" in input_asDict.keys()):
            data_as64 = input_asDict["data"]
            dataraw = base64.b64decode(data_as64)
            data_filepath = "./tmp/tmpcsv.csv"
            with open(data_filepath, 'wb') as fh:
                fh.write(dataraw)
            data = pd.read_csv(data_filepath)
            X, y = split_Xy(data)
        else:
            remove(data_filepath)
            message = "Data does not exist in request."
            logger.error(message)
            return custom_response_http(message, 400)
    except:
        message = "Internal Server Error"
        logger.error(message)
        return custom_response_http(message, 500)

    logger.info("[Train Model Request] Decoded Dataset. (2/3) ... ")

    # Loads/Creates Model
    try:
        filepath = "./models/" + model_name + ".joblib.dat"
        if file_exists(filepath):
            message = "Cannot overwrite model. Choose another name."
            logger.error(message)
            return custom_response_http(message, 400)
        else:
            filepath = "./models/" + default_model_name + ".joblib.dat"
            old_model = load_model(filepath)
            model = xgb.XGBClassifier(params=old_model.get_xgb_params())
            model = model.fit(X, y)
            joblib.dump(model, filepath)
    except:
        remove(data_filepath)
        message = "Internal Server Error"
        logger.error(message)
        return custom_response_http(message, 500)
    logger.info("[Train Model Request] Trained Dataset. (3/3) ... ")

    logger.info(type(custom_response_http("bu", 500)))
    remove(data_filepath)
    return custom_response_http("Model Training was successful! ", 200)
Пример #4
0
    # load image as grayscale
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # resize image
    res = cv2.resize(gray, dsize=(w, h), interpolation=cv2.INTER_CUBIC)
    # flat image
    img_flat = res.flatten() / 255.0
    X = img_flat.reshape(1, h, w, 1).astype('float32')
    return X


def predict(model, X):
    predictions = model.predict_classes(X, verbose=0)
    return predictions[0]


if __name__ == "__main__":
    if len(sys.argv) == 2:
        filename = sys.argv[1]
        img = cv2.imread(filename)
        if img is None:
            exit(1)
    else:
        exit(1)

    height, width = img.shape[:2]

    localize_model = model_handler.load_model(TRAINED_MODELS + MODEL)
    localizer = Localizer(localize_model, img)

    fields = localizer.process(debug=True)
Пример #5
0
 def predict_word(self, X):
     self.model = model_handler.load_model(self.model_name,
                                           self.weight_name)
     predictions = self.model.predict(X, verbose=0)
     words = self.labels_to_words(predictions)
     return words