Ejemplo n.º 1
0
def predict_cmd(model, output_format=None):
    '''
    Instantiates a Predict object to run a prediction using the given input
    file and model.

    This method must be self-contained and suitable for being called in
    cascade, by models which use the output of other models as input.
    '''
    from flame.predict import Predict

    predict = Predict(model['endpoint'], model['version'], output_format)

    ext_input, model_set = predict.get_model_set()

    if ext_input:

        success, model_res = get_external_input(predict, model_set,
                                                model['infile'])

        if not success:
            return False, model_res

        # now run the model using the data from the external sources
        success, results = predict.run(model_res)

    else:

        # run the model with the input file
        success, results = predict.run(model['infile'])

    LOG.info('Prediction completed...')

    return success, results
Ejemplo n.º 2
0
def predict_cmd(arguments, output_format=None):
    '''
    Instantiates a Predict object to run a prediction using the given input
    file and model.

    This method must be self-contained and suitable for being called in
    cascade, by models which use the output of other models as input.
    '''
    from flame.predict import Predict

    # safety check if model exists
    repo_path = pathlib.Path(utils.model_repository_path())
    model_list = os.listdir(repo_path)

    if arguments['endpoint'] not in model_list:
        LOG.error('Endpoint name not found in model repository.')
        return False, 'Endpoint name not found in model repository.'

    # ** DEPRECATE **
    # this is a back-compatibility trick for older versions of APIs
    # not supporting the label argument
    if 'label' not in arguments:
        arguments['label'] = 'temp'

    predict = Predict(arguments['endpoint'],
                      version=arguments['version'],
                      output_format=output_format,
                      label=arguments['label'])

    ensemble = predict.get_ensemble()

    # ensemble[0]     Boolean with True for ensemble models and False otherwyse
    # ensemble[1]     List of ensemble model model_names
    # ensemble[2]     List of ensemble model versions

    if ensemble[0]:

        if arguments['infile'] is None:
            return False, 'ensemble models require allways an input file'

        success, model_res = get_ensemble_input(predict, ensemble[1],
                                                ensemble[2],
                                                arguments['infile'])

        if not success:
            return False, model_res

        # now run the model using the data from the external sources
        success, results = predict.run(model_res)

    else:

        # run the model with the input file
        success, results = predict.run(arguments['infile'])

    LOG.info('Prediction completed...')

    return success, results
Ejemplo n.º 3
0
def get_predictions(models_frame, name_col, version_col, file):
    """
    Get prediction of a set of compounds for a given list of models.
    Endpoint and version column names needed. Returns a dataframe
    with predictions for each endpoint as columns
    """
    model_predictions = pd.DataFrame()
    for model, version in zip(models_frame[name_col],
                              models_frame[version_col]):
        pred = Predict(model, version)
        results = pred.run(file)
        results = json.loads(results[1])
        c0 = np.asarray(results['c0'])
        c1 = np.asarray(results['c1'])
        final = []
        for val, val2 in zip(c0, c1):
            if val and not val2:
                final.append(0)
            elif val2 and not val:
                final.append(1)
            else:
                final.append(2)
        model_predictions[model + '-' + str(version)] = final
    return model_predictions
Ejemplo n.º 4
0
def predict_cmd(arguments, output_format=None):
    '''
    Instantiates a Predict object to run a prediction using the given input
    file and model.

    This method must be self-contained and suitable for being called in
    cascade, by models which use the output of other models as input.
    '''
    from flame.predict import Predict

    # safety check if model exists
    endpoint_dir = utils.model_path(arguments['endpoint'], 0)
    if not os.path.isdir(endpoint_dir):
        return False, 'Endpoint name not found in model repository.'

    # ** DEPRECATE **
    # this is a back-compatibility trick for older versions of APIs 
    # not supporting the label argument
    if 'label' not in arguments:
        arguments['label'] = 'temp'

    if 'output_format' in arguments:
        output_format = arguments['output_format']

    predict = Predict(arguments['endpoint'], version=arguments['version'],  output_format=output_format, label=arguments['label'])

    if utils.isSingleThread():
        predict.set_single_CPU()

    ensemble = predict.get_ensemble()

    # ensemble[0]     Boolean with True for ensemble models and False otherwyse
    # ensemble[1]     List of ensemble model model_names
    # ensemble[2]     List of ensemble model versions

    if ensemble[0]:

        if arguments['infile'] is None:
            return False, 'ensemble models require allways an input file'

        emodels = ensemble[1]
        evers   = ensemble[2]

        success, model_res = get_ensemble_input(predict, emodels, evers, arguments['infile'])

        if not success:
            predict.conveyor.setError (model_res)
            LOG.error (model_res)
            # return False, model_res        # TO-DO, comment this line and run prediction to allow odata to generate error info

        # check the presence of changes in the inner models
        modelID = predict.conveyor.getMeta('modelID')
        for i in range(len (emodels)):
            success, iID = utils.getModelID(emodels[i], evers[i], 'model')
            if success:
                if iID not in modelID:
                    predict.conveyor.setWarning (f'Inner model {emodels[i]}.{evers[i]} has been updated. Rebuilding of ensemble model is recommended')
                    LOG.warning (f'Inner model {emodels[i]}.{evers[i]} has been updated. Rebuilding of ensemble model is recommended')

        # now run the model using the data from the external sources
        success, results = predict.run(model_res)

    else:

        # run the model with the input file
        success, results = predict.run(arguments['infile'])

    LOG.info('Prediction completed...')

    return success, results