def process():

    data = request.form

    data_dictionary = data.copy()
    print(data_dictionary)
    final_models= []

    model_option = data_dictionary["model_option"]
    pretrained_models = data_dictionary["pretrained_models"].split(",")
    if(model_option == 'yes'):
        model_file = data_dictionary["model_file"]
        final_models = pretrained_models
        final_models.append('own_model')
    else:
        final_models = pretrained_models
    email_user = data_dictionary["email_user"]

    #print('final_models')
    #print(model_file)

    if(model_option == 'yes'):
	#Verify model
        print('empece a leer')
        model = vm.load_model_cnn(model_file)
        print('acabe a leer')
	    #Processing Images
        x_test = dp.load_test_images()
        y_test = dp.load_test_targets()
        test_data, test_target = dp.process_data(x_test, y_test, 8)

	    #Generate metrics
        m.generate_metrics(model, test_data, test_target, y_test, x_test)

    #Generate pdf
    fpr_micro, tpr_micro, fpr_macro, tpr_macro = pg.load_metrics_data_roc(final_models)
    report_values = pg.load_metrics_data_report(final_models)
    ruta_macro = pg.generate_graphs_macro(final_models, fpr_macro, tpr_macro)
    ruta_micro = pg.generate_graphs_micro(final_models, fpr_micro, tpr_micro)
    pg.generate_pdf(ruta_micro, ruta_macro, report_values)

	#Send Email
    #ec.zip(directory_results,name_destination)
    ec.send_results(email_user)






    if (data_dictionary) :
        return jsonify({ 'success' :  'Successfully Process Please Ckeck your Email' })
    else:
        parametros=[]
        return jsonify({'error' : 'Problemas with the process. Try Again'})
    def test_get_unknown_metrics_type(self):
        '''When no metrics types are specified an exception is thrown'''
        from metrics import generate_metrics

        res = generate_metrics(bibcodes=testset, metrics_types=[])
        # An unknown metrics type should return an empty dictionary
        self.assertEqual(res, {})
Example #3
0
def create_model(x_train,
                 y_train,
                 x_test,
                 y_test,
                 numFolds=5,
                 c=1,
                 k='linear',
                 save=True,
                 baseName='femlpModel'):
    """
    Model providing function:

    Create Keras model with SVM as classifier, compile test and generate metrics.
    """
    ################# define SVM #################
    clf = svm.SVC(kernel=k, C=c, probability=True, random_state=1337)
    clf.fit(x_train, y_train)
    # Classify
    y = np_utils.to_categorical(y_test, 2)
    classesPredictionList = clf.predict(x_test)  # 0 or 1
    classesProbaPredictionList = clf.predict_proba(x_test)  # probability
    sensitivity, specificity, accuracy, precision, recall, F1_score, auc = metrics.generate_metrics(
        classesPredictionList, classesProbaPredictionList, y, verbose=False)

    if (save):
        joblib.dump(clf, "output/" + baseName + ".pkl")

    print("Accuracy: {:.4f}".format(accuracy))
    print("Sensitivity: {:.4f}".format(sensitivity))
    print("Specificity: {:.4f}".format(specificity))
    print("F1 Score: {:.4f}".format(F1_score))
    print("AUC: {:.4f}".format(auc))
Example #4
0
def init_train():
    train_traders = {}
    for j in range(0, 2):
        id = str(uuid.uuid4())
        train_traders[id] = [
            generate_tracks("S{}".format(i)) for i in range(0, 10)
        ]

    metrics = {}
    for k in train_traders.keys():
        tracks = pd.concat(train_traders[k], axis=1)
        print(k)

        for t in tracks.columns:
            metrics[k + '-' + t] = generate_metrics(tracks[[t]])
    #   metrics[k]={t: generate_metrics(tracks[t]) for t in tracks.columns}

    test_frame = pd.DataFrame(metrics).T

    y = test_frame[['mean']]
    X = test_frame[['expectancy', 'maxDD', 'sharpe']]
    model = sm.OLS(y, X).fit()
    predictions = model.predict(X)  # make the predictions by the model

    # Print out the statistics
    return model.params.values.flatten().tolist()
Example #5
0
def generate_trader_strategy_metrics(coefficients):
    predicted_returns = []
    traders = select_all('public', 'trader_tracks').traderid.unique().tolist()
    for t in traders:
        t_tracks = select_where('public', 'trader_tracks', 'traderid', '=', t)
        strategies = t_tracks.strategyid.unique().tolist()
        for s in strategies:
            t_track = t_tracks.query('strategyid == @s', engine='python')[[
                'dateindex', 'value'
            ]].set_index('dateindex')
            metrics_test = generate_metrics(t_track)
            y = [
                metrics_test['expectancy'], metrics_test['maxDD'],
                metrics_test['sharpe']
            ]
            predicted_returns.append({
                'traderid':
                t,
                'strategyid':
                s,
                'strategy_metrics':
                metrics_test,
                'predicted_return':
                [i * j for i, j in list(zip(coefficients, y))][0]
            })

    return predicted_returns
Example #6
0
def create_model(x_train, y_train, x_test, y_test, numberOfClasses=2, numberOfEpochs = 10, batchSize = 30, save=True, baseName='fftModel'):
    """
    Model providing function:

    Create Keras model with MLP as classifier, compile test and generate metrics.
    """
    base_model = load_model()

    # Compile
    base_model.compile( loss='binary_crossentropy', optimizer= 'sgd', metrics=['accuracy'] )

    # Train
    base_model.fit(x_train,
                   y_train,
                   batch_size = batchSize,
                   epochs = numberOfEpochs,
                   verbose = 0,
                   validation_data=(x_test, y_test))  
  
    # Classify
    classesPredictionList = base_model.predict_classes(x_test, verbose=0) # 0 or 1
    classesProbaPredictionList = base_model.predict_proba(x_test) # probability
    sensitivity, specificity, accuracy, precision, recall, F1_score, auc = metrics.generate_metrics(classesPredictionList,classesProbaPredictionList,y_test,verbose=False)

    # Save Model
    if(save):
        save_model(base_model, baseName)
    
    print("Accuracy: {:.4f}".format(accuracy))
    print("Sensitivity: {:.4f}".format(sensitivity))
    print("Specificity: {:.4f}".format(specificity))
    print("F1 Score: {:.4f}".format(F1_score))
    print("AUC: {:.4f}".format(auc))
    def test_get_unknown_metrics_type(self):
        '''When no metrics types are specified an exception is thrown'''
        from metrics import generate_metrics

        res = generate_metrics(bibcodes=testset, metrics_types=[])
        # An unknown metrics type should return an empty dictionary
        self.assertEqual(res, {})
Example #8
0
def create_modelCV(x_train,
                   y_train,
                   x_test,
                   y_test,
                   numFolds=5,
                   c=1,
                   k='linear'):
    """
    Model providing function:

    Create Keras model with SVM as classifier, compile test and generate metrics.
    """
    ### Cross-validation
    skf = StratifiedKFold(n_splits=numFolds, shuffle=True, random_state=1337)
    X = x_train
    Y = y_train
    sensitivitys, specificitys, accuracys, precisions, recalls, F1_scores, aucs = [
        [], [], [], [], [], [], []
    ]
    #kpbar = tqdm(total=numFolds, desc="Kfold", leave=False)
    y = np_utils.to_categorical(Y, 2)
    Y = numpy.array(Y)
    for train_index, test_index in skf.split(X, Y):
        ################# define SVM #################
        clf = svm.SVC(kernel=k, C=c, probability=True, random_state=1337)
        clf.fit(X[train_index], Y[train_index])
        # Classify
        classesPredictionList = clf.predict(X[test_index])  # 0 or 1
        classesProbaPredictionList = clf.predict_proba(
            X[test_index])  # probability
        sensitivity, specificity, accuracy, precision, recall, F1_score, auc = metrics.generate_metrics(
            classesPredictionList,
            classesProbaPredictionList,
            y[test_index],
            verbose=False)
        sensitivitys.append(sensitivity)
        specificitys.append(specificity)
        accuracys.append(accuracy)
        precisions.append(precision)
        recalls.append(recall)
        F1_scores.append(F1_score)
        aucs.append(auc)

    sensitivitys = numpy.array(sensitivitys)
    specificitys = numpy.array(specificitys)
    accuracys = numpy.array(accuracys)
    precisions = numpy.array(precisions)
    recalls = numpy.array(recalls)
    F1_scores = numpy.array(F1_scores)
    aucs = numpy.array(aucs)
    print("Mean Accuracy: {:.4f} (+/- {:.4f})".format(accuracys.mean(),
                                                      accuracys.std()))
    print("Mean Sensitivity: {:.4f} (+/- {:.4f})".format(
        sensitivitys.mean(), sensitivitys.std()))
    print("Mean Specificity: {:.4f} (+/- {:.4f})".format(
        specificitys.mean(), specificitys.std()))
    print("Mean F1 Score: {:.4f} (+/- {:.4f})".format(F1_scores.mean(),
                                                      F1_scores.std()))
    print("Mean AUC: {:.4f} (+/- {:.4f})".format(aucs.mean(), aucs.std()))
    def test_no_identifiers_found(self):
        '''When no identifiers are found an exception is thrown'''
        from metrics import generate_metrics

        res = generate_metrics(bibcodes=testset, metrics_types=[])
        # No identifiers (i.e. no records found in database) should return
        # an empty dictionary
        self.assertEqual(res, {})
    def test_no_identifiers_found(self):
        '''When no identifiers are found an exception is thrown'''
        from metrics import generate_metrics

        res = generate_metrics(bibcodes=testset, metrics_types=[])
        # No identifiers (i.e. no records found in database) should return
        # an empty dictionary
        self.assertEqual(res, {})
Example #11
0
 def post(self):
     bibcodes = []
     query = None
     try:
         include_tori = request.json['tori']
     except:
         include_tori = True
     # Force that we either have a valid metrics type or all types
     try:
         types = [t for t in request.json['types'] if t in allowed_types]
     except:
         types = []
     types = types or allowed_types
     # Same with histogram type
     try:
         histograms = request.json['histograms']
     except:
         histograms = []
     histograms = histograms or allowed_histograms
     if 'bibcodes' in request.json:
         bibcodes = map(str, request.json['bibcodes'])
         if len(bibcodes) > current_app.config.get('METRICS_MAX_SUBMITTED'):
             return {
                 'Error':
                 'Unable to get results!',
                 'Error Info':
                 'No results: number of submitted \
                      bibcodes exceeds maximum number'
             }, 200
         elif len(bibcodes) == 0:
             return {
                 'Error': 'Unable to get results!',
                 'Error Info': 'No bibcodes found in POST body'
             }, 200
     elif 'query' in request.json:
         query = request.json['query']
     else:
         return {
             'Error': 'Unable to get results!',
             'Error Info': 'Nothing to calculate metrics!'
         }, 200
     results = generate_metrics(bibcodes=bibcodes,
                                query=query,
                                tori=include_tori,
                                types=types,
                                histograms=histograms)
     # If the results contain an error message something went boink
     if "Error" in results:
         return results, 500
     # otherwise we have real results or an empty dictionary
     if results:
         return results
     else:
         return {
             'Error': 'Unable to get results!',
             'Error Info': 'No data available to generate metrics'
         }, 200
Example #12
0
 def get(self, bibcode):
     results = generate_metrics(bibcodes=[bibcode], types=["basic", "histograms"], histograms=["reads", "citations"])
     # If the results contain an error message something went boink
     if "Error" in results:
         return results, 500
     # otherwise we have real results or an empty dictionary
     if results:
         return results
     else:
         return {"Error": "Unable to get results!", "Error Info": "No data available to generate metrics"}, 200
Example #13
0
def create_modelCV(x_train, y_train, x_test, y_test, numFolds= 5, numberOfClasses=2, numberOfEpochs = 10, batchSize = 30):
    """
    Model providing function:

    Create Keras model with SVM as classifier, compile test and generate metrics.
    """
    ### Cross-validation
    skf = StratifiedKFold(n_splits=numFolds, shuffle=True, random_state=1337)
    X = x_train
    Y = y_train
    sensitivitys, specificitys, accuracys, precisions, recalls, F1_scores, aucs = [[],[],[],[],[],[],[]]
    #kpbar = tqdm(total=numFolds, desc="Kfold", leave=False)
    y = np_utils.to_categorical(Y, 2)

    for train_index, test_index in skf.split(X, Y):
        
        base_model = load_model()

        # Compile
        base_model.compile( loss='binary_crossentropy', optimizer= 'sgd', metrics=['accuracy'] )
        
        # Train
        base_model.fit(X[train_index],
                       y[train_index],
                       batch_size = batchSize,
                       epochs = numberOfEpochs,
                       verbose = 0,
                       validation_data=(X[test_index], y[test_index]))
        # Classify
        classesPredictionList = base_model.predict_classes(X[test_index], verbose=0) # 0 or 1
        classesProbaPredictionList = base_model.predict_proba(X[test_index]) # probability
        sensitivity, specificity, accuracy, precision, recall, F1_score, auc = metrics.generate_metrics(classesPredictionList,classesProbaPredictionList,y[test_index],verbose=False)
        sensitivitys.append(sensitivity)
        specificitys.append(specificity)
        accuracys.append(accuracy)
        precisions.append(precision)
        recalls.append(recall)
        F1_scores.append(F1_score)
        aucs.append(auc)
    
    sensitivitys = numpy.array(sensitivitys)
    specificitys = numpy.array(specificitys)
    accuracys = numpy.array(accuracys)
    precisions = numpy.array(precisions)
    recalls = numpy.array(recalls)
    F1_scores = numpy.array(F1_scores)
    aucs = numpy.array(aucs)
    print("Mean Accuracy: {:.4f} (+/- {:.4f})".format(accuracys.mean(), accuracys.std()))
    print("Mean Sensitivity: {:.4f} (+/- {:.4f})".format(sensitivitys.mean(), sensitivitys.std()))
    print("Mean Specificity: {:.4f} (+/- {:.4f})".format(specificitys.mean(), specificitys.std()))
    print("Mean F1 Score: {:.4f} (+/- {:.4f})".format(F1_scores.mean(), F1_scores.std()))
    print("Mean AUC: {:.4f} (+/- {:.4f})".format(aucs.mean(), aucs.std()))
Example #14
0
 def get(self, bibcode):
     results = generate_metrics(bibcodes=[bibcode],
                                types=['basic', 'citations', 'histograms'],
                                histograms=['reads', 'citations'])
     # If the results contain an error message something went boink
     if "Error" in results:
         return results, 500
     # otherwise we have real results or an empty dictionary
     if results:
         return results
     else:
         return {'Error': 'Unable to get results!',
                 'Error Info': 'No data available to generate metrics'}, 200
Example #15
0
 def get(self, bibcode):
     results = generate_metrics(bibcodes=[bibcode],
                                types=['basic', 'histograms'],
                                histograms=['reads', 'citations'])
     # If the results contain an error message something went boink
     if "Error" in results:
         return results, 500
     # otherwise we have real results or an empty dictionary
     if results:
         return results
     else:
         return {
             'Error': 'Unable to get results!',
             'Error Info': 'No data available to generate metrics'
         }, 200
Example #16
0
 def post(self):
     bibcodes = []
     query = None
     try:
         include_tori = request.json["tori"]
     except:
         include_tori = True
     # Force that we either have a valid metrics type or all types
     try:
         types = [t for t in request.json["types"] if t in allowed_types]
     except:
         types = []
     types = types or allowed_types
     # Same with histogram type
     try:
         histograms = request.json["histograms"]
     except:
         histograms = []
     histograms = histograms or allowed_histograms
     if "bibcodes" in request.json:
         bibcodes = map(str, request.json["bibcodes"])
         if len(bibcodes) > current_app.config.get("METRICS_MAX_SUBMITTED"):
             return (
                 {
                     "Error": "Unable to get results!",
                     "Error Info": "No results: number of submitted \
                      bibcodes exceeds maximum number",
                 },
                 200,
             )
         elif len(bibcodes) == 0:
             return {"Error": "Unable to get results!", "Error Info": "No bibcodes found in POST body"}, 200
     elif "query" in request.json:
         query = request.json["query"]
     else:
         return {"Error": "Unable to get results!", "Error Info": "Nothing to calculate metrics!"}, 200
     results = generate_metrics(
         bibcodes=bibcodes, query=query, tori=include_tori, types=types, histograms=histograms
     )
     # If the results contain an error message something went boink
     if "Error" in results:
         return results, 500
     # otherwise we have real results or an empty dictionary
     if results:
         return results
     else:
         return {"Error": "Unable to get results!", "Error Info": "No data available to generate metrics"}, 200
Example #17
0
def main():
    args = parser.parse_args()
    metrics = generate_metrics(args.sma_samples)
    if args.serial_port:
        ser = connect(args.serial_port, 1 / args.polling_rate,
                      args.connect_timeout)
    while True:
        for metric, func in metrics.items():
            data = func()
            if data:
                message = "%s %s" % (metric, " ".join(map(repr, func())))
                if args.serial_port:
                    try:
                        send(message, ser)
                    except Exception as e:
                        print(e, file=sys.stderr)
                        ser.close()
                        ser = connect(args.serial_port, 1 / args.polling_rate,
                                      args.connect_timeout)
                else:
                    print(message)
        time.sleep(1 / args.polling_rate)
        monitoring.update()
Example #18
0
 def post(self):
     bibcodes = []
     query = None
     stime = time.time()
     try:
         include_tori = request.json['tori']
     except:
         include_tori = True
     # Force that we either have a valid metrics type or all types
     try:
         types = [t for t in request.json['types'] if t in allowed_types]
     except:
         types = []
     types = types or allowed_types
     # If "simple" metrics are requested, more records are allowed
     if len(types) == 1 and types[0] == 'simple':
         max_records = current_app.config.get('METRICS_MAX_SIMPLE')
         types = ['basic', 'citations', 'indicators', 'histograms']
         include_tori = False
     else:
         max_records = current_app.config.get('METRICS_MAX_SUBMITTED')
     # Same with histogram type
     try:
         histograms = request.json['histograms']
     except:
         histograms = []
     histograms = histograms or allowed_histograms
     if 'bibcodes' in request.json:
         if 'query' in request.json and request.json['query']:
             current_app.logger.warning('Metrics requested, but both bibcodes and query specified!')
             return {'Error': 'Unable to get results!',
                     'Error Info': 'Cannot send both bibcodes and query'}, 403
         bibcodes = map(str, request.json['bibcodes'])
         current_app.logger.info('Metrics requested for %s bibcodes'%len(bibcodes))
         if len(bibcodes) > max_records:
             current_app.logger.warning('Metrics requested for %s bibcodes. Maximum is: %s!'%(len(bibcodes), max_records))
             return {'Error': 'Unable to get results!',
                     'Error Info': 'No results: number of submitted \
                      bibcodes exceeds maximum number'}, 403
         elif len(bibcodes) == 0:
             current_app.logger.warning('Metrics requested, but no bibcodes supplied!')
             return {'Error': 'Unable to get results!',
                     'Error Info': 'No bibcodes found in POST body'}, 403
         elif len(bibcodes) == 1:
             current_app.logger.debug('Metrics requested for single record')
             if len(types) > 0:
                 types = [t for t in types if t in ['basic', 'citations', 'histograms']]
             if len(types) == 0:
                 types=['basic', 'citations', 'histograms']
             if len(histograms) > 0:
                 histograms = [h for h in histograms if h in ['reads', 'citations']]
             if len(histograms) == 0:
                 histograms=['reads', 'citations']
     elif 'query' in request.json:
         query = request.json['query']
         current_app.logger.info('Metrics requested for query: %s'%query)
     else:
         return {'Error': 'Unable to get results!',
                 'Error Info': 'Nothing to calculate metrics!'}, 403
     results = generate_metrics(
         bibcodes=bibcodes, query=query, tori=include_tori,
         types=types, histograms=histograms)
     # If the results contain an error message something went boink
     if "Error" in results:
         current_app.logger.error('Metrics request request blew up')
         return results, 500
     # otherwise we have real results or an empty dictionary
     if results:
         duration = time.time() - stime
         current_app.logger.info('Metrics request successfully completed in %s real seconds'%duration)
         return results
     else:
         current_app.logger.info('Metrics request returned empty result')
         return {'Error': 'Unable to get results!',
                 'Error Info': 'No data available to generate metrics'}, 200
Example #19
0
actual = []

print("Fetching metric data... ")
for pred in predictionList:
    if pred == 'DNA':
        prediction.append(bin(0))
    elif pred == 'RNA':
        prediction.append(bin(1))
    elif pred == 'DRNA':
        prediction.append(bin(2))
    else:
        prediction.append(bin(3))

for true in trueList:
    if true == 'DNA':
        actual.append(bin(0))
    elif true == 'RNA':
        actual.append(bin(1))
    elif true == 'DRNA':
        actual.append(bin(2))
    else:
        actual.append(bin(3))

metricList = m.generate_metrics(prediction, actual)

print("Printing...")
for m in metricList:
    print(m)

print("Done.")
Example #20
0
def create_modelCV(x_train,
                   y_train,
                   x_test,
                   y_test,
                   numFolds=5,
                   numberOfClasses=2,
                   MLP1=100,
                   MLP2=200,
                   numberOfEpochs=20,
                   batchSize=30):
    """
    Model providing function:

    Create Keras model with SVM as classifier, compile test and generate metrics.
    """
    ### Cross-validation
    skf = StratifiedKFold(n_splits=numFolds, shuffle=True, random_state=1337)
    X = x_train
    Y = y_train
    sensitivitys, specificitys, accuracys, precisions, recalls, F1_scores, aucs = [
        [], [], [], [], [], [], []
    ]
    #kpbar = tqdm(total=numFolds, desc="Kfold", leave=False)
    y = np_utils.to_categorical(Y, 2)
    Y = numpy.array(Y)

    for train_index, test_index in skf.split(X, Y):

        ################ define MLP #################
        # create my MLP
        top_model = Sequential()
        top_model.add(
            Flatten(input_shape=(8, 8, 50))
        )  # shape of last layer or my_model. CouldnĀ“t get it automatically properly using my_model.output_shape
        top_model.add(Dense(MLP1))
        top_model.add(Activation(
            'relu',
            name='act_1'))  # set name, otherwise duplicate names appear
        top_model.add(Dropout(0.5))
        top_model.add(Dense(MLP2))
        top_model.add(Activation('relu', name='act_2'))
        top_model.add(Dense(numberOfClasses))
        top_model.add(Activation('softmax', name='softmax'))

        # Compile
        top_model.compile(loss='binary_crossentropy',
                          optimizer='sgd',
                          metrics=['accuracy'])
        # Train
        top_model.fit(X[train_index],
                      y[train_index],
                      batch_size=batchSize,
                      epochs=numberOfEpochs,
                      verbose=0,
                      validation_data=(X[test_index], y[test_index]))
        # Classify
        classesPredictionList = top_model.predict_classes(X[test_index],
                                                          verbose=0)  # 0 or 1
        classesProbaPredictionList = top_model.predict_proba(
            X[test_index])  # probability
        sensitivity, specificity, accuracy, precision, recall, F1_score, auc = metrics.generate_metrics(
            classesPredictionList,
            classesProbaPredictionList,
            y[test_index],
            verbose=False)
        sensitivitys.append(sensitivity)
        specificitys.append(specificity)
        accuracys.append(accuracy)
        precisions.append(precision)
        recalls.append(recall)
        F1_scores.append(F1_score)
        aucs.append(auc)

    sensitivitys = numpy.array(sensitivitys)
    specificitys = numpy.array(specificitys)
    accuracys = numpy.array(accuracys)
    precisions = numpy.array(precisions)
    recalls = numpy.array(recalls)
    F1_scores = numpy.array(F1_scores)
    aucs = numpy.array(aucs)
    print("Mean Accuracy: {:.4f} (+/- {:.4f})".format(accuracys.mean(),
                                                      accuracys.std()))
    print("Mean Sensitivity: {:.4f} (+/- {:.4f})".format(
        sensitivitys.mean(), sensitivitys.std()))
    print("Mean Specificity: {:.4f} (+/- {:.4f})".format(
        specificitys.mean(), specificitys.std()))
    print("Mean F1 Score: {:.4f} (+/- {:.4f})".format(F1_scores.mean(),
                                                      F1_scores.std()))
    print("Mean AUC: {:.4f} (+/- {:.4f})".format(aucs.mean(), aucs.std()))
Example #21
0
def create_model(x_train,
                 y_train,
                 x_test,
                 y_test,
                 numberOfClasses=2,
                 MLP1=100,
                 MLP2=200,
                 numberOfEpochs=20,
                 batchSize=30,
                 save=True,
                 baseName='femlpModel'):
    """
    Model providing function:

    Create Keras model with MLP as classifier, compile test and generate metrics.
    """
    ################# define MLP #################
    # create my MLP
    top_model = Sequential()
    top_model.add(
        Flatten(input_shape=(8, 8, 50))
    )  # shape of last layer or my_model. CouldnĀ“t get it automatically properly using my_model.output_shape
    top_model.add(Dense(MLP1))
    top_model.add(Activation(
        'relu', name='act_1'))  # set name, otherwise duplicate names appear
    top_model.add(Dropout(0.5))
    top_model.add(Dense(MLP2))
    top_model.add(Activation('relu', name='act_2'))
    top_model.add(Dense(numberOfClasses))
    top_model.add(Activation('softmax', name='softmax'))

    # Compile
    top_model.compile(loss='binary_crossentropy',
                      optimizer='sgd',
                      metrics=['accuracy'])

    # Train
    top_model.fit(x_train,
                  y_train,
                  batch_size=batchSize,
                  epochs=numberOfEpochs,
                  verbose=0,
                  validation_data=(x_test, y_test))

    # Classify
    classesPredictionList = top_model.predict_classes(x_test,
                                                      verbose=0)  # 0 or 1
    classesProbaPredictionList = top_model.predict_proba(x_test)  # probability
    sensitivity, specificity, accuracy, precision, recall, F1_score, auc = metrics.generate_metrics(
        classesPredictionList,
        classesProbaPredictionList,
        y_test,
        verbose=False)

    # Save Model
    if (save):
        save_model(top_model, baseName)

    print("Accuracy: {:.4f}".format(accuracy))
    print("Sensitivity: {:.4f}".format(sensitivity))
    print("Specificity: {:.4f}".format(specificity))
    print("F1 Score: {:.4f}".format(F1_score))
    print("AUC: {:.4f}".format(auc))
Example #22
0
def evaluate(fileName):
    print('\n    ' + fileName)

    #Create a directory for the rasterized testing points
    subprocess.call('mkdir rasterized', shell=True)

    #Make a geodataframe, then create an ID column
    points = gpd.read_file('testData/testingPoints.shp')
    points = points.to_crs({'init': 'epsg:26916'})
    points['Point_ID'] = points.index

    #Open the template raster for template information
    template = rasterio.open('predictions/' + fileName)
    meta = template.meta.copy()
    meta['nodata'] = 9999

    #Create an individual file for each point
    for index, row in points.iterrows():

        #Create a new raster for writing.
        with rasterio.open('rasterized/' + str(row.Point_ID) + '.tif', 'w',
                           **meta) as out:
            out_arr = out.read(1)

            #Transform and rasterize shape data
            shapes = ((geom, value)
                      for geom, value in zip([row.geometry], [row['OM']]))
            burned = features.rasterize(shapes=shapes,
                                        fill=0,
                                        out=out_arr,
                                        transform=out.transform)

            #Write the data out as a raster
            out.write_band(1, burned)
            out.close()

    #Get the filenames for each individual raster
    individuals = os.listdir('rasterized')

    #Get the index and value of each individual rasterized point
    point_data = {}
    for individual in individuals:
        #Open the input raster
        raster = rasterio.open('rasterized/' + individual)
        array = raster.read(1)

        #Get the index for the min value (the datapoint)
        flat_index = np.argmin(array)
        index = np.unravel_index(flat_index, array.shape)

        #Write the data to the dictionary
        key = os.path.splitext(individual)[0]
        point_data[key] = {'index': index, 'value': array[index]}

    #Read the prediction file in as an array
    raster = rasterio.open('predictions/' + fileName)
    prediction = raster.read(1)

    #Get the actual and predicted values
    value_pairs = []
    for point in list(point_data.keys()):
        value_pairs.append([
            point_data[point]['value'],
            prediction[point_data[point]['index'][0],
                       point_data[point]['index'][1]]
        ])

    #Get performance metrics
    scores = metrics.generate_metrics(value_pairs)

    print('     R2 Score: ' + str(scores[0]))
    print('     RMSE: ' + str(scores[1]))
    print('     ME: ' + str(scores[2]))
    print('     MAE: ' + str(scores[3]))

    #Remove the individual rasters
    shutil.rmtree('rasterized/')
Example #23
0
def validate(point_data, topo, buffers):

    #Run the function for each filename
    #############################################################
    points = list(point_data.keys())
    value_pairs = list()
    iteration = 0
    length = len(points)
    for test_point in points:

        #Take the validation point out of the training set
        #############################################################
        training_points = points.copy()
        training_points.remove(test_point)
        training_buffers = training_points

        #Assemble the training set
        #############################################################
        training_set = list()
        for training_point in training_points:
            obs = list()
            for feature in topo:
                obs.append(feature[point_data[training_point]['index']])
            for buffer_feature in training_buffers:
                obs.append(buffers[buffer_feature][point_data[training_point]
                                                   ['index']])
            obs.append(point_data[training_point]['value'])
            training_set.append(obs)

        #Assemble the test set
        #############################################################
        testing_set = list()
        obs = list()
        for feature in topo:
            obs.append(feature[point_data[test_point]['index']])
        for buffer_feature in training_buffers:
            obs.append(
                buffers[buffer_feature][point_data[test_point]['index']])
        testing_set.append(obs)

        #Generate Prediction
        #############################################################
        prediction = train_predict(training_set, testing_set)

        value_pairs.append([point_data[test_point]['value'], prediction[0]])

        #Log Progress
        #############################################################
        iteration += 1
        #print(str(int(iteration/length*90)+5)+'%')

    scores = metrics.generate_metrics(value_pairs)

    print('     R2 Score: ' + str(scores[0]))
    print('     RMSE: ' + str(scores[1]))
    print('     ME: ' + str(scores[2]))
    print('     MAE: ' + str(scores[3]))
    return {
        'R2': scores[0],
        'RMSE': scores[1],
        'ME': scores[2],
        'MAE': scores[3]
    }