Exemplo n.º 1
0
Arquivo: model.py Projeto: pvr1/mlapp
def load():

    path = Path('.')
    global model
    global learn
    global classes
    model = CONFIG['model_name']
    # Check if we need to download Model file
    if CONFIG[model]['url'] != "":
        try:
            logging.info(f"Downloading model file from: {CONFIG[model]['url']}")
            urllib.request.urlretrieve(CONFIG[model]['url'], f"models/{model}.pth")
            logging.info(f"Downloaded model file and stored at path: models/{model}.pth")
        except HTTPError as e:
            logging.critical(f"Failed in downloading file from: {CONFIG[model]['url']}, Exception: '{e}'")
            sys.exit(4)

    init_data = ImageDataBunch.single_from_classes(
                                    path, CONFIG[model]['classes'], tfms=get_transforms(),
                                    size=CONFIG[model]['size']
                                ).normalize(imagenet_stats)
    classes = CONFIG[model]['classes']
    logging.info(f"Loading model: {CONFIG['model_name']}, architecture: {CONFIG[model]['arch']}, file: models/{model}.pth")
    learn = create_cnn(init_data, eval(f"models.{CONFIG[model]['arch']}"))
    learn.load(model, device=CONFIG[model]['device'])

    # Create direcotry to get feedback for this model
    Path.mkdir(Path(path_to(FEEDBACK_DIR, model)), parents=True, exist_ok=True)
Exemplo n.º 2
0
async def setup_learner():
    await download_file(model_file_url, path/'models'/f'{model_file_name}.pth')
    data_bunch = ImageDataBunch.single_from_classes(path, classes,
        tfms=get_transforms(), size=224).normalize(imagenet_stats)
    learn = create_cnn(data_bunch, models.resnet34, pretrained=False)
    learn.load(model_file_name)
    return learn
Exemplo n.º 3
0
async def setup_learner():
    await download_file(model_file_url,
                        path / 'models' / f'{model_file_name}.pth')
    data_bunch = ImageDataBunch.single_from_classes(
        path, classes, tfms=get_transforms(),
        size=224).normalize(imagenet_stats)
    learn = create_cnn(data_bunch, models.resnet34, pretrained=False)
    learn.load(model_file_name)
    return learn
Exemplo n.º 4
0
def main():
    args = parse_args()
    n, prefix = args['n_epochs'], args['prefix']
    bs, img_sz = args['batch_size'], args['image_size']
    prefix += '_' if prefix else ''

    bunch = create_data_bunch(bs,
                              img_sz,
                              args['train_size'],
                              args['valid_size'],
                              use_cache=args['use_cache'])
    train_sz, valid_sz = len(bunch.train_dl) / bunch.c, len(
        bunch.valid_dl) / bunch.c
    learn = create_cnn(bunch, args['network'])
    learn.metrics = [accuracy, error_rate]

    if args['continue']:
        log.info('Continue training using cached data')

    log.info('Epochs: %d', args['n_epochs'])
    log.info('Model: %s', args['network_name'])
    log.info('# of classes: %d', bunch.c)
    log.info('Train size (per class): %d', train_sz)
    log.info('Valid size (per class): %d', valid_sz)

    if args['continue']:
        cbs = [SaveModelCallback(learn, name='bestmodel_continue')]

        try:
            learn.load(f'{prefix}final_224')
        except Exception as e:
            log.error('Cannot restore model')
            log.error(e)
            sys.exit(1)

        learn.unfreeze()
        learn.fit_one_cycle(n, callabacks=cbs, max_lr=slice(3e-5, 3e-5))
        learn.save(f'{prefix}continued_224')

    else:
        cbs = [SaveModelCallback(learn)]

        learn.fit_one_cycle(1)
        learn.save(f'{prefix}one_224')

        learn.unfreeze()
        learn.freeze_to(-2)
        learn.fit_one_cycle(n - 2, max_lr=slice(1e-4, 1e-3))
        learn.save(f'{prefix}unfreeze_224')

        learn.unfreeze()
        learn.fit_one_cycle(1, callbacks=cbs, max_lr=slice(10e-5, 5e-5))
        learn.save(f'{prefix}final_224')

    log.info('Done!')
Exemplo n.º 5
0
def bearsInference():

    #Get querystring to figure out which model to load
    ic=request.args.get('imageclassifier')
    print(ic)

    classes=[]    
    path=None

    if ic=='KDEF':
        path=os.path.join(HERE, "tmp/KDEF")
        classes = ['afraid', 'angry', 'disgusted', 'happy', 'neutral', 'sad', 'surprised']
    elif ic=='teddys':    
        path=os.path.join(HERE, "tmp/bears")
        classes = ['black', 'grizzly', 'teddys']


    learn = create_cnn(ImageDataBunch.single_from_classes(path, classes, tfms=get_transforms(), size=224).normalize(imagenet_stats), models.resnet34)
    learn.load('stage-2')

    fp = request.files['file']
    #img=open_image(bearspath + '/models/00000014.jpg')

    # Read EXIF data 
    exifData={}
    imgTemp=Image.open(fp)
    exifDataRaw=imgTemp._getexif()
    angle=0
    if not exifDataRaw==None:
        for orientation in ExifTags.TAGS.keys():
            if ExifTags.TAGS[orientation]=='Orientation':
                break
        exif=dict(exifDataRaw.items())
#        print(exif[orientation])    

        if exif[orientation] == 3:
            angle=180
        elif  exif[orientation] == 6:
            angle=270
        elif  exif[orientation] == 8:
            angle=90

    img=open_image(fp)

    rotate(img,angle)


    pred_class,pred_idx,outputs = learn.predict(img)
    img_data = encode(img)

    body = { 'label': str(pred_class), 'image': img_data }
    
    resp= Response(response=json.dumps({"response": body}), status=200, mimetype='application/json')
    #print (str(pred_class))
    return resp
Exemplo n.º 6
0
    def __init__(self):
        self.x = "hello"
        path = untar_data(URLs.PETS)
        path_anno = path / 'annotations'
        path_images = path / 'images'
        fnames = get_image_files(path_images)

        np.random.seed(2)
        pat = re.compile(r'/([^/]+)_\d+.jpg$')

        bs = 6
        # create data loaderi
        self.data = ImageDataBunch.from_name_re(
            path, fnames, pat, ds_tfms=get_transforms(), size=224,
            bs=bs).normalize(imagenet_stats)

        self.learner = create_cnn(self.dataloader,
                                  models.resnet34,
                                  metrics=error_rate)
Exemplo n.º 7
0
 def train(self):
     
     path = '/lib/Auth/RecFace/images/models/'
     try:
         root_models = [f for f in listdir(path) if isfile(join(path, f))]
         a = len(root_models)
     except:
         a = 0
     
     data = self.getDataBunch(val_p = 0)
     data.normalize(imagenet_stats)
     print("Classes: ", data.c)
     
     learn = create_cnn(data, models.resnet34, path = "/lib/Auth/RecFace/images/")
     
     learn.fit_one_cycle(8, 3e-3, wd=1e-6)
     learn.unfreeze()
     learn.fit_one_cycle(4, 6e-5, wd=1e-6)
     learn.fit_one_cycle(4, 1e-6)
     learn.save('root-'+str(a))
Exemplo n.º 8
0
async def classify_url(request):
    bytes = await get_bytes(request.query_params["url"])
    img = open_image(BytesIO(bytes))
    cars = Path(
        '/home/jupyter/tutorials/data/Competitions/CarClassification/car_data')
    names = cars / '../names.csv'
    classes = pd.read_csv(names, names=['cars'], header=None)
    #data = ImageDataBunch.from_folder(cars, train = 'train', valid='test', ds_tfms=get_transforms(), size=224, bs=64)
    data_eval = ImageDataBunch.single_from_classes(cars,
                                                   classes,
                                                   tfms=get_transforms(),
                                                   size=299)
    learn = create_cnn(data_eval, models.resnet50)
    learn.model.load_state_dict(
        torch.load(cars / 'models/learn50-uf-10e.pth', map_location='cpu'))
    pred_class, class_pos, losses = learn.predict(img)
    return JSONResponse({
        "predicted_class": classes[153],
        "Probability": losses[1].sort(descending=True)[0]
    })
def train(settings):
    model_path = settings['Logger']['args']['log_dir']
    path = settings['Dataloaders']['path']
    train_folder = f'{path}train'
    test_folder = f'{path}test'
    train_lbl = f'{path}train_labels.csv'
    ORG_SIZE = 96

    bs = 64
    num_workers = 4  # Apprently 2 cpus per kaggle node, so 4 threads I think
    sz = 96

    df_trn = pd.read_csv(train_lbl)

    tfms = fvision.get_transforms(do_flip=True, flip_vert=True, max_rotate=.0, max_zoom=.1,
                          max_lighting=0.05, max_warp=0.)

    data = fvision.ImageDataBunch.from_csv(path, csv_labels='train_labels.csv', folder='train', ds_tfms=tfms, size=sz, suffix='.tif',
                                   test=test_folder, bs=bs)

    stats = data.batch_stats()
    data.normalize(stats)

    data.show_batch(rows=5, figsize=(12, 9))

    def auc_score(y_pred, y_true, tens=True):
        score = roc_auc_score(y_true, torch.sigmoid(y_pred)[:, 1])
        if tens:
            score = fvision.tensor(score)
        else:
            score = score
        return score

    learn = fvision.create_cnn(
        data,
        models.densenet201,
        path=model_path,
        metrics=[auc_score],
        ps=0.5
    )
    learn.lr_find()
    # learn.recorder.plot()

    lr = 1e-04

    learn.fit_one_cycle(1, lr)
    # learn.recorder.plot()
    # learn.recorder.plot_losses()

    learn.unfreeze()
    learn.lr_find()

    learn.fit_one_cycle(1, slice(1e-4, 1e-3))
    # learn.recorder.plot()
    # learn.recorder.plot_losses()

    preds, y = learn.get_preds()
    pred_score = auc_score(preds, y)
    pred_score

    preds, y = learn.TTA()
    pred_score_tta = auc_score(preds, y)
    pred_score_tta

    save_path = settings['Model']['save_path']

    torch.save(dict(state_dict=learn.model.state_dict()), save_path)
Exemplo n.º 10
0
                                         bs=64)
'''
数据归一化
'''
data.normalize(vision.imagenet_stats)
'''
显示数据集中的第一个数据
'''
img, label = data.train_ds[0]
print(label)
img.show()

'''
创建一个学习器。数据集、模型类型、评估指标
'''
learn = vision.create_cnn(data, vision.models.resnet18, metrics=fastai.vision.accuracy)
'''
训练1个epoch,学习率0.01
'''
learn.fit(1, 0.01)
'''
打印:
epoch     train_loss  valid_loss  accuracy
1         0.062133    0.008120    0.997056
'''

'''
打印训练完的模型的准确率
'''
acc = fastai.vision.accuracy(*learn.get_preds())
print(acc)
 def init_predictor(self):
     data = ImageDataBunch.single_from_classes(
         "", self.classes, size=224).normalize(imagenet_stats)
     learn = create_cnn(data, self.model).load(self.custom_model_path)
     return learn
app = Starlette()

app.debug = True
app.mount('/static', StaticFiles(directory="static"))

path = Path(__file__).parents[0] / "tmp"
classes = [
    'arts_and_crafts', 'mid-century-modern', 'rustic', 'traditional',
    'transitional'
]
imagenet_stats = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
data2 = ImageDataBunch.single_from_classes(path,
                                           classes,
                                           tfms=get_transforms(),
                                           size=224).normalize(imagenet_stats)
learner = create_cnn(data2, models.resnet34)
learner.load('stage-2')

#cat_learner = create_cnn(cat_data, models.resnet34)
#cat_learner.model.load_state_dict(
#    torch.load("usa-inaturalist-cats.pth", map_location="cpu")
#)


@app.route("/random-image-predict", methods=["GET"])
async def random_image_predict(request):
    print('hello from random image predict')
    bytes = await (data["file"].read())
    return predict_image_from_bytes(bytes)

Exemplo n.º 13
0
# package this file with the models folder instead for global
# access my multiple apps. Also, this just makes more sense

from fastai.vision import create_cnn, ImageDataBunch, open_image, get_transforms, models
from fastai.metrics import accuracy
import torch.nn.functional as F
import torch

imageNum = 0

data = ImageDataBunch.from_folder("datamodels/",
                                  ds_tfms=get_transforms(),
                                  test='test',
                                  size=224,
                                  bs=1)
learn = create_cnn(data, models.resnet34, metrics=accuracy)
learn.load("model", device="cpu")
learn.precompute = False


def save_image(image):
    """
    Save an uploaded image for processing.

    Args:
        image: UploadedFile
            The image that is to be classified.

    Returns:
        str: image name
    """
Exemplo n.º 14
0
    'Aland+Islands', 'Saint+Vincent+and+the+Grenadines', 'Luxembourg',
    'Western+Sahara', 'Anguilla', 'Liechtenstein', 'Macao',
    'Saint+Pierre+and+Miquelon', 'Andorra', 'French+Southern+Territories',
    'British+Virgin+Islands', 'Montserrat', 'Nauru', 'Saint+Barthelemy',
    'Bahamas', 'South+Georgia+and+the+South+Sandwich+Islands', 'Dominica',
    'Singapore', 'Vanuatu', 'Bermuda', 'Jersey', 'Christmas+Island', 'Grenada',
    '+D.C.",United+States', 'Solomon+Islands', 'Monaco', 'Kiribati',
    'Norfolk+Island', 'Vatican', 'Saint+Lucia', 'Saint+Kitts+and+Nevis',
    'Turks+and+Caicos+Islands', 'Micronesia', 'U.S.+Virgin+Islands',
    'Antigua+and+Barbuda', 'American+Samoa', 'Saint+Martin',
    'Svalbard+and+Jan+Mayen', 'Sint+Maarten'
]

fake_data = ImageDataBunch.single_from_classes(
    path, classes, tfms=get_transforms(), size=224).normalize(imagenet_stats)
learn = create_cnn(fake_data, models.resnet50)
learn.model.load_state_dict(
    torch.load('resnet50-big-finetuned-bs64.pth', map_location='cpu'))


@app.route("/upload", methods=["POST"])
async def upload(request):
    data = await request.form()
    s = data["file"]
    return predict_image_from_string(s)


@app.route("/heatmap", methods=["POST"])
async def heatmap(request):
    data = await request.form()
    s = data["file"]
Exemplo n.º 15
0
async def get_bytes(url):
    async with aiohttp.ClientSession() as session:
        async with session.get(url) as response:
            return await response.read()


app = Starlette()

path = Path("data/bears")
classes = ['black', 'grizzly', 'teddys']

data = ImageDataBunch.single_from_classes(path,
                                          classes,
                                          tfms=get_transforms(),
                                          size=224).normalize(imagenet_stats)
learn = create_cnn(data, models.resnet34).load('stage-2')


@app.route("/upload", methods=["POST"])
async def upload(request):
    data = await request.form()
    bytes = await (data["file"].read())
    return predict_image_from_bytes(bytes)


@app.route("/classify-url", methods=["GET"])
async def classify_url(request):
    bytes = await get_bytes(request.query_params["url"])
    return predict_image_from_bytes(bytes)

Exemplo n.º 16
0
app = Starlette()

dl_pan_images_path = Path("/tmp")
dl_pan_fnames = [
    "/{}_1.jpg".format(c) for c in ["Pan Card", "Driving Licence"]
]

dl_pan_data = ImageDataBunch.from_name_re(
    dl_pan_images_path,
    dl_pan_fnames,
    r"/([^/]+)_\d+.jpg$",
    ds_tfms=get_transforms(),
    size=224,
)
dl_pan_learner = create_cnn(dl_pan_data, models.resnet34)
dl_pan_learner.model.load_state_dict(
    torch.load("dl_or_pan.pth", map_location="cpu"))


@app.route("/upload", methods=["POST"])
async def upload(request):
    data = await request.form()
    bytes = await (data["file"].read())
    return predict_image_from_bytes(bytes)


@app.route("/classify-url", methods=["GET"])
async def classify_url(request):
    bytes = await get_bytes(request.query_params["url"])
    return predict_image_from_bytes(bytes)
Exemplo n.º 17
0
# You path where you have stored models/weights.pth
path = Path("data")

# Classes
classes = ["alligator", "crocodile"]

# Create a DataBunch
data = ImageDataBunch.single_from_classes(
    path,
    classes,
    ds_tfms=get_transforms(),
    size=224,
).normalize(imagenet_stats)

# Create a learner and load the weights
learn = create_cnn(data, models.resnet34)
learn.load("stage-2")


@app.route("/upload", methods=["POST"])
async def upload(request):
    data = await request.form()
    bytes = await (data["file"].read())
    return predict_image_from_bytes(bytes)


@app.route("/classify-url", methods=["GET"])
async def classify_url(request):
    bytes = await get_bytes(request.query_params["url"])
    return predict_image_from_bytes(bytes)
Exemplo n.º 18
0
torch.manual_seed(999)
if torch.cuda.is_available():
    torch.cuda.manual_seed_all(999)
torch.backends.cudnn.deterministic = True

path = Path(
    '/mnt/Datasets/kaggle_diabetic_retinopathy/experiments/512px_6000i_2bt_autocropXresize/'
)
labels_df = pd.read_csv(path / 'labels.csv')
print(labels_df.head())
print(labels_df.groupby('label').agg('count'))

# batch size
bs = 60

data = fv.ImageDataBunch.from_df(path=path / 'train_flat/',
                                 df=labels_df,
                                 fn_col=5,
                                 label_col=2,
                                 bs=bs)
data.show_batch(rows=3, figsize=(6, 6))
plt.show()
data.normalize()

kappa = fv.KappaScore()
kappa.weights = "quadratic"

learn = fv.create_cnn(data, fv.models.resnet34, metrics=[fm.error_rate, kappa])

learn.fit_one_cycle(15)  # , max_lr=slice(1e-6,5e-2))
Exemplo n.º 19
0
app.debug = True
app.mount('/static', StaticFiles(directory='static'))
env = Environment(loader=FileSystemLoader(ROOT_DIR), trim_blocks=True)

categories = [
    f'/{name.strip()}_1.jpg' for name in Path('categories.txt').open()
]

placeholder_data = ImageDataBunch.from_name_re(Path('/tmp'),
                                               categories,
                                               pat=r'/([^/]+)_\d+.jpg$',
                                               ds_tfms=get_transforms(),
                                               device='cpu',
                                               size=224)

learn = create_cnn(placeholder_data, resnet50)
state = torch.load(f'models/{MODEL_NAME}.pth', map_location='cpu')
learn.model.load_state_dict(state, state)
predictor = Predictor(learn, *imagenet_stats)


@app.route('/')
def home(request):
    template = env.get_template('index.html')
    return HTMLResponse(template.render(static_url='/static'))


@app.route('/send', methods=['POST'])
async def send(request):
    data = await request.json()
    # predictions = predict_image_from_base64(data['imgBase64'])
Exemplo n.º 20
0
def create_network():

    empty_data = ImageDataBunch.load_empty(Path(UPLOAD_FOLDER), 'export.pkl')
    learn = create_cnn(empty_data, models.resnet18).load('stage-2')

    return learn
Exemplo n.º 21
0
        "Canada-Lynx",
        "North-American-Mountain-Lion",
        "Eastern-Bobcat",
        "Central-American-Ocelot",
        "Ocelot",
        "Jaguar",
    ]
]
cat_data = ImageDataBunch.from_name_re(
    cat_images_path,
    cat_fnames,
    r"/([^/]+)_\d+.jpg$",
    ds_tfms=get_transforms(),
    size=224,
)
cat_learner = create_cnn(cat_data, models.resnet34)
cat_learner.model.load_state_dict(
    torch.load("usa-inaturalist-cats.pth", map_location="cpu"))


@app.route("/upload", methods=["POST"])
async def upload(request):
    data = await request.form()
    bytes = await (data["file"].read())
    return predict_image_from_bytes(bytes)


@app.route("/classify-url", methods=["GET"])
async def classify_url(request):
    bytes = await get_bytes(request.query_params["url"])
    return predict_image_from_bytes(bytes)