Пример #1
0
def build_new_network():

    # Gets a valid model before proceeding
    partial_model = helper.get_model(None)

    # Gets correct learn rate and hidden layers before proceeding
    learn_rate, hidden_layers = helper.get_network_inputs()

    # Buils the network
    model, criterion, optimizer = helper.build_network(learn_rate,
                                                       hidden_layers,
                                                       partial_model)
    model.criterion = criterion
    model.optimizer = optimizer

    return model
Пример #2
0
def get_model():
    model_file = '../data/all-files-for-ml/' + 'all_mnb' + '.pkl'
    mnb = joblib.load(open(model_file, 'rb'))
    data = pd.read_csv("../data/all-files-for-ml/all_x.csv")
    df = pd.DataFrame(data)
    cols = df.columns
    features = cols # = symptoms
    features_raw = [str(features[x]) for x in range(len(features))]
    # convert feature array into dict of symptom: index
    feature_dict = {}
    for i,f in enumerate(features):
        feature_dict[f] = i
    return mnb, features, feature_dict

MODEL, LABELS, LABELS_DICT = get_model()

@app.route('/', defaults={'path': ''})
@app.route('/<path:path>')
def serve(path):
     path_dir = os.path.abspath("../app/backend") #path react build
     if path != "" and os.path.exists(os.path.join(path_dir, path)):
        return send_from_directory(os.path.join(path_dir), path)
     else:
        return render_template("react.html")



@limiter.limit("500 per hour")
@app.route('/api/labels', methods = ['GET'])
def labels():
from helper import get_tensor, get_model
import json
model = get_model()

with open('mappings.json', 'r') as f:
    mappings = json.load(f)

with open('remidies.json', 'r') as f:
    remidies = json.load(f)

    
def get_disease_name(image_bytes):
	tensor = get_tensor(image_bytes)
	outputs = model.forward(tensor)
	_, prediction = outputs.max(1)
	category = prediction.item()
	disease = mappings[str(category)]

	return  disease, remidies[disease]
Пример #4
0
    type=int)
parser.add_argument('--epochs',
                    help="Amount of epochs the network is trained (default=5)",
                    default=5,
                    type=int)
parser.add_argument('--gpu',
                    help="Use the gpu to accelerate training",
                    action='store_true')

args = parser.parse_args()
train_dir = args.data_directory[0] + '/train'
valid_dir = args.data_directory[0] + '/valid'

(trainloader, validationloader,
 class_to_idx) = helper.get_dataloaders(train_dir, valid_dir)

if not torch.cuda.is_available() and args.gpu:
    raise (
        "No gpu available to train the network. Please remove the --gpu argument to train using the cpu"
    )
device = ('cuda' if args.gpu else 'cpu')

(model, optimizer) = helper.get_model(args.arch, args.learning_rate,
                                      args.hidden_units)
helper.train_model(model, trainloader, validationloader, optimizer, device,
                   args.epochs)

if args.save_dir != None:
    model.class_to_idx = class_to_idx
    torch.save(model, args.save_dir)
Пример #5
0
import os, time

import helper
from dataset import classes_names

from PIL import Image
import numpy as np
import pandas as pd

test_data_dir = 'test'

if __name__ == '__main__':
    model_path = helper.train_and_evaluate()
    model = helper.get_model(model_path)

    img_names = []
    img_pils = []
    for img_name in os.listdir(test_data_dir):
        img = Image.open(os.path.join(test_data_dir, img_name))
        img = img.convert('RGB')
        img_names.append(img_name)
        img_pils.append(img)

    predicts = helper.predict(model, img_pils)  # outputs are classes indexes
    predict_classes = [classes_names[cls] for cls in predicts]  # names here

    # img_names = ['1.png', '2.png']
    # predict_classes = ['hello', 'world']

    pd_data = {'file': img_names, 'species': predict_classes}
    df = pd.DataFrame(pd_data)