Пример #1
0
def predict_url(args):
    """
    Function to predict an url
    """
    # Check user configuration
    update_with_query_conf(args)
    conf = config.conf_dict

    merge = True
    catch_url_error(args['urls'])

    # Load model if needed
    if loaded_ts != conf['testing']['timestamp'] or loaded_ckpt != conf[
            'testing']['ckpt_name']:
        load_inference_model(timestamp=conf['testing']['timestamp'],
                             ckpt_name=conf['testing']['ckpt_name'])
        conf = config.conf_dict

    # Make the predictions
    with graph.as_default():
        pred_lab, pred_prob = test_utils.predict(
            model=model,
            X=args['urls'],
            conf=conf,
            top_K=top_K,
            filemode='url',
            merge=merge,
            use_multiprocessing=False
        )  # safer to avoid memory fragmentation in failed queries

    if merge:
        pred_lab, pred_prob = np.squeeze(pred_lab), np.squeeze(pred_prob)

    return format_prediction(pred_lab, pred_prob)
Пример #2
0
def predict_data(args, merge=True):
    """
    Function to predict an image in binary format
    """
    #####FIXME: Remove after DEEPaaS upgrade
    if type(args['files']) is not list:
        args['files'] = [args['files']]
    ########################################

    catch_localfile_error(args['files'])

    if not loaded:
        load_inference_model()

    filenames = []
    images = [f.read() for f in args['files']]
    for image in images:
        f = tempfile.NamedTemporaryFile(delete=False)
        f.write(image)
        f.close()
        filenames.append(f.name)

    try:
        with graph.as_default():
            pred_lab, pred_prob = predict(
                model=model,
                X=filenames,
                conf=conf,
                top_K=top_K,
                filemode='local',
                merge=merge,
                use_multiprocessing=False
            )  # safer to avoid memory fragmentation in failed queries
    except Exception as e:
        raise e
    finally:
        for f in filenames:
            os.remove(f)

    if merge:
        pred_lab, pred_prob = np.squeeze(pred_lab), np.squeeze(pred_prob)

    return format_prediction(pred_lab, pred_prob)
Пример #3
0
def predict_data(args):
    """
    Function to predict an image in binary format
    """
    # Check user configuration
    update_with_query_conf(args)
    conf = config.conf_dict

    merge = True
    catch_localfile_error(args['files'])

    # Load model if needed
    if loaded_ts != conf['testing']['timestamp'] or loaded_ckpt != conf[
            'testing']['ckpt_name']:
        load_inference_model(timestamp=conf['testing']['timestamp'],
                             ckpt_name=conf['testing']['ckpt_name'])
        conf = config.conf_dict

    # Create a list with the path to the images
    filenames = [f.filename for f in args['files']]

    # Make the predictions
    try:
        with graph.as_default():
            pred_lab, pred_prob = test_utils.predict(
                model=model,
                X=filenames,
                conf=conf,
                top_K=top_K,
                filemode='local',
                merge=merge,
                use_multiprocessing=False
            )  # safer to avoid memory fragmentation in failed queries
    finally:
        for f in filenames:
            os.remove(f)

    if merge:
        pred_lab, pred_prob = np.squeeze(pred_lab), np.squeeze(pred_prob)

    return format_prediction(pred_lab, pred_prob)
Пример #4
0
def predict_url(args, merge=True):
    """
    Function to predict an url
    """
    catch_url_error(args['urls'])

    if not loaded:
        load_inference_model()
    with graph.as_default():
        pred_lab, pred_prob = predict(
            model=model,
            X=args['urls'],
            conf=conf,
            top_K=top_K,
            filemode='url',
            merge=merge,
            use_multiprocessing=False
        )  # safer to avoid memory fragmentation in failed queries

    if merge:
        pred_lab, pred_prob = np.squeeze(pred_lab), np.squeeze(pred_prob)

    return format_prediction(pred_lab, pred_prob)
Пример #5
0
print("++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++después")
# INCISO: Ahora la parte que continúa está basada en el predicting a datasplit txt file que incluye Ignacio en el notebook
# 3.0 . Esta preparación previa es necesaria para computar la matriz de confusión. 
#
# OJO: ahora lo que le vas a dar para testear el modelo dado por el timestamp SÍ se encuentra en data/dataset_files 
# Y ES CON LO QUE TÚ QUIERES TESTEAR EL MODELO.
SPLIT_NAME = input("Indica el nombre del split con el que evaluas. Es de data/dataset_files. Ejemplos: val train ...: ")
# Load the data
X, y = load_data_splits(splits_dir=paths.get_ts_splits_dir(),
                        im_dir=conf['general']['images_directory'],
                        split_name=SPLIT_NAME)
# Predict
# Añade esto si quieres no usar aumentacion en la validacion:
# 
print(conf['augmentation']['val_mode'])
pred_lab, pred_prob = predict(model, X, conf, top_K=TOP_K, filemode='local')

#Ahora guardamos las predicciones
# Save the predictions
pred_dict = {'filenames': list(X),
             'pred_lab': pred_lab.tolist(),
             'pred_prob': pred_prob.tolist()}
if y is not None:
    pred_dict['true_lab'] = y.tolist()
# No incluimos la parte de guardarlo en json porque lo vamos a utilizar ahora mismo. 
# Importamos el warning este porque Ignacio lo sugiere.
import warnings
warnings.filterwarnings("ignore") # To ignore UndefinedMetricWarning: [Recall/Precision/F-Score] is ill-defined and being set to 0.0 in labels with no [true/predicted] samples.

# INCISO: Sacamos por pantalla distintas métricas relevantes SOBRE EL SPLIT SELECCIONADO