Beispiel #1
0
async def setup_learner():
    await download_file(model_file_url, path/'models'/f'{model_file_name}.pth')
    data_bunch = ImageDataBunch.single_from_classes(path, classes,
        tfms=get_transforms(), size=224).normalize(imagenet_stats)
    learn = create_cnn(data_bunch, models.resnet34, pretrained=False)
    learn.load(model_file_name)
    return learn
def authenticate():
    
    path = '/lib/Auth/RecFace/images/models/'
    root_models = [f for f in listdir(path) if isfile(join(path, f))]
    if 'tmp.pth' in root_models:
        root_models.remove('tmp.pth')
        
    classes = ["Test", "Train"]
    data = ImageDataBunch.single_from_classes('/lib/Auth/RecFace/images/', 
                                               classes, 
                                               ds_tfms=None, 
                                               size = 224)
    
    
    data.normalize(imagenet_stats)
    learn = cnn_learner(data, models.vgg16_bn)
    
    imgs = getFaces.getFaces()
    if len(imgs)==0:
        return False
    
    for img in imgs:
        img = resize(img, (224,224), interpolation = INTER_AREA)
        imwrite('temp.jpeg', img)
        img = open_image('temp.jpeg')
        for mod in root_models:
            if compare(mod.split('.')[0], img, learn):
                return True
    return False
Beispiel #3
0
def load():

    path = Path('.')
    global model
    global learn
    global classes
    model = CONFIG['model_name']
    # Check if we need to download Model file
    if CONFIG[model]['url'] != "":
        try:
            logging.info(f"Downloading model file from: {CONFIG[model]['url']}")
            urllib.request.urlretrieve(CONFIG[model]['url'], f"models/{model}.pth")
            logging.info(f"Downloaded model file and stored at path: models/{model}.pth")
        except HTTPError as e:
            logging.critical(f"Failed in downloading file from: {CONFIG[model]['url']}, Exception: '{e}'")
            sys.exit(4)

    init_data = ImageDataBunch.single_from_classes(
                                    path, CONFIG[model]['classes'], tfms=get_transforms(),
                                    size=CONFIG[model]['size']
                                ).normalize(imagenet_stats)
    classes = CONFIG[model]['classes']
    logging.info(f"Loading model: {CONFIG['model_name']}, architecture: {CONFIG[model]['arch']}, file: models/{model}.pth")
    learn = create_cnn(init_data, eval(f"models.{CONFIG[model]['arch']}"))
    learn.load(model, device=CONFIG[model]['device'])

    # Create direcotry to get feedback for this model
    Path.mkdir(Path(path_to(FEEDBACK_DIR, model)), parents=True, exist_ok=True)
Beispiel #4
0
async def setup_learner():
    await download_file(model_file_url,
                        path / 'models' / f'{model_file_name}.pth')
    data_bunch = ImageDataBunch.single_from_classes(
        path, classes, tfms=get_transforms(),
        size=224).normalize(imagenet_stats)
    learn = create_cnn(data_bunch, models.resnet34, pretrained=False)
    learn.load(model_file_name)
    return learn
Beispiel #5
0
def bearsInference():

    #Get querystring to figure out which model to load
    ic=request.args.get('imageclassifier')
    print(ic)

    classes=[]    
    path=None

    if ic=='KDEF':
        path=os.path.join(HERE, "tmp/KDEF")
        classes = ['afraid', 'angry', 'disgusted', 'happy', 'neutral', 'sad', 'surprised']
    elif ic=='teddys':    
        path=os.path.join(HERE, "tmp/bears")
        classes = ['black', 'grizzly', 'teddys']


    learn = create_cnn(ImageDataBunch.single_from_classes(path, classes, tfms=get_transforms(), size=224).normalize(imagenet_stats), models.resnet34)
    learn.load('stage-2')

    fp = request.files['file']
    #img=open_image(bearspath + '/models/00000014.jpg')

    # Read EXIF data 
    exifData={}
    imgTemp=Image.open(fp)
    exifDataRaw=imgTemp._getexif()
    angle=0
    if not exifDataRaw==None:
        for orientation in ExifTags.TAGS.keys():
            if ExifTags.TAGS[orientation]=='Orientation':
                break
        exif=dict(exifDataRaw.items())
#        print(exif[orientation])    

        if exif[orientation] == 3:
            angle=180
        elif  exif[orientation] == 6:
            angle=270
        elif  exif[orientation] == 8:
            angle=90

    img=open_image(fp)

    rotate(img,angle)


    pred_class,pred_idx,outputs = learn.predict(img)
    img_data = encode(img)

    body = { 'label': str(pred_class), 'image': img_data }
    
    resp= Response(response=json.dumps({"response": body}), status=200, mimetype='application/json')
    #print (str(pred_class))
    return resp
Beispiel #6
0
 def __init__(self, picture_file):
     self.classes = [
         'Afghan_hound', 'African_hunting_dog', 'Airedale',
         'American_Staffordshire_terrier', 'Appenzeller',
         'Australian_terrier', 'Bedlington_terrier', 'Bernese_mountain_dog',
         'Blenheim_spaniel', 'Border_collie', 'Border_terrier',
         'Boston_bull', 'Bouvier_des_Flandres', 'Brabancon_griffon',
         'Brittany_spaniel', 'Cardigan', 'Chesapeake_Bay_retriever',
         'Chihuahua', 'Dandie_Dinmont', 'Doberman', 'English_foxhound',
         'English_setter', 'English_springer', 'EntleBucher', 'Eskimo_dog',
         'French_bulldog', 'German_shepherd', 'German_short-haired_pointer',
         'Gordon_setter', 'Great_Dane', 'Great_Pyrenees',
         'Greater_Swiss_Mountain_dog', 'Ibizan_hound', 'Irish_setter',
         'Irish_terrier', 'Irish_water_spaniel', 'Irish_wolfhound',
         'Italian_greyhound', 'Japanese_spaniel', 'Kerry_blue_terrier',
         'Labrador_retriever', 'Lakeland_terrier', 'Leonberg', 'Lhasa',
         'Maltese_dog', 'Mexican_hairless', 'Newfoundland',
         'Norfolk_terrier', 'Norwegian_elkhound', 'Norwich_terrier',
         'Old_English_sheepdog', 'Pekinese', 'Pembroke', 'Pomeranian',
         'Rhodesian_ridgeback', 'Rottweiler', 'Saint_Bernard', 'Saluki',
         'Samoyed', 'Scotch_terrier', 'Scottish_deerhound',
         'Sealyham_terrier', 'Shetland_sheepdog', 'Shih-Tzu',
         'Siberian_husky', 'Staffordshire_bullterrier', 'Sussex_spaniel',
         'Tibetan_mastiff', 'Tibetan_terrier', 'Walker_hound', 'Weimaraner',
         'Welsh_springer_spaniel', 'West_Highland_white_terrier',
         'Yorkshire_terrier', 'affenpinscher', 'basenji', 'basset',
         'beagle', 'black-and', 'bloodhound', 'bluetick', 'borzoi', 'boxer',
         'briard', 'bull_mastiff', 'cairn', 'chow', 'clumber',
         'cocker_spaniel', 'collie', 'curly-coated_retriever', 'dhole',
         'dingo', 'flat-coated_retriever', 'giant_schnauzer',
         'golden_retriever', 'groenendael', 'keeshond', 'kelpie',
         'komondor', 'kuvasz', 'malamute', 'malinois', 'miniature_pinscher',
         'miniature_poodle', 'miniature_schnauzer', 'otterhound',
         'papillon', 'pug', 'redbone', 'schipperke', 'silky_terrier',
         'soft-coated_wheaten_terrier', 'standard_poodle',
         'standard_schnauzer', 'toy_poodle', 'toy_terrier', 'vizsla',
         'whippet', 'wire-haired_fox_terrier'
     ]
     f_name, f_ext = os.path.splitext(picture_file)
     img_path = os.path.join(app.root_path, 'static', 'images',
                             f_name + f_ext)
     self.img = open_image(img_path)
     self.data = ImageDataBunch.single_from_classes(
         "./", self.classes, ds_tfms=get_transforms(),
         size=224).normalize(imagenet_stats)
     self.learner = cnn_learner(self.data, models.resnet50)
     self.learner.load('stage-2-rerun')
async def classify_url(request):
    bytes = await get_bytes(request.query_params["url"])
    img = open_image(BytesIO(bytes))
    cars = Path(
        '/home/jupyter/tutorials/data/Competitions/CarClassification/car_data')
    names = cars / '../names.csv'
    classes = pd.read_csv(names, names=['cars'], header=None)
    #data = ImageDataBunch.from_folder(cars, train = 'train', valid='test', ds_tfms=get_transforms(), size=224, bs=64)
    data_eval = ImageDataBunch.single_from_classes(cars,
                                                   classes,
                                                   tfms=get_transforms(),
                                                   size=299)
    learn = create_cnn(data_eval, models.resnet50)
    learn.model.load_state_dict(
        torch.load(cars / 'models/learn50-uf-10e.pth', map_location='cpu'))
    pred_class, class_pos, losses = learn.predict(img)
    return JSONResponse({
        "predicted_class": classes[153],
        "Probability": losses[1].sort(descending=True)[0]
    })
Beispiel #8
0
def model_to_learner(model: nn.Module,
                     im_size: int = IMAGENET_IM_SIZE) -> Learner:
    """Create Learner based on pyTorch ImageNet model.

    Args:
        model (nn.Module): Base ImageNet model. E.g. models.resnet18()
        im_size (int): Image size the model will expect to have.

    Returns:
         Learner: a model trainer for prediction
    """

    # Currently, fast.ai api requires to pass a DataBunch to create a model trainer (learner).
    # To use the learner for prediction tasks without retraining, we have to pass an empty DataBunch.
    # single_from_classes is deprecated, but this is the easiest go-around method.
    # Create ImageNet data spec as an empty DataBunch.
    # Related thread: https://forums.fast.ai/t/solved-get-prediction-from-the-imagenet-model-without-creating-a-databunch/36197/5
    empty_data = ImageDataBunch.single_from_classes(
        "", classes=imagenet_labels(), size=im_size).normalize(imagenet_stats)

    return Learner(empty_data, model)
Beispiel #9
0
    pil_img.save(buff, format="JPEG")
    return base64.b64encode(buff.getvalue()).decode("utf-8")


async def get_bytes(url):
    async with aiohttp.ClientSession() as session:
        async with session.get(url) as response:
            return await response.read()


app = Starlette()

path = Path('data/')

classes = ['landscape', 'people-close-up', 'people-landscape']
data2 = ImageDataBunch.single_from_classes(path, classes,
                                           size=224).normalize(imagenet_stats)
learn = cnn_learner(data2, models.resnet34, pretrained=False)
learn.load('model')
# learn = load_learner(path, 'model.pth')

index_html = """
<!doctype html>
<html lang="en">
  <head>
    <meta charset="utf-8">
    <meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
    <meta name="description" content="">
    <meta name="author" content="">

    <title>Tour pic classifier</title>
Beispiel #10
0
import aiohttp
import asyncio

app = FastAPI()


class Prediction(BaseModel):
    name = "StatueLearner"
    pred_ts: Optional[datetime] = None
    predictions: List[tuple] = []


path = '.'
classes = ['chinese', 'egyptian', 'greek']
data = ImageDataBunch.single_from_classes(path,
                                          classes,
                                          ds_tfms=get_transforms(),
                                          size=224).normalize(imagenet_stats)
learner = cnn_learner(data, models.resnet50)
learner.load('stage-1-resnet50-er19')


async def get_bytes(url):
    async with aiohttp.ClientSession() as session:
        async with session.get(url) as response:
            return await response.read()


@app.get("/")
async def root():
    return {"message": "Hello World"}
 def init_predictor(self):
     data = ImageDataBunch.single_from_classes(
         "", self.classes, size=224).normalize(imagenet_stats)
     learn = create_cnn(data, self.model).load(self.custom_model_path)
     return learn