Exemple #1
0
def upload_image():
    if request.method == "POST":
        if request.files:
            image = request.files["image"]
            if allowed_file(image.filename):
                pred, pred_idx, probs = learn_inf.predict(
                    PILImage.create(image))
                pred = pred.replace('_', ' ').title()
                if probs[pred_idx] > MIN_STANDARD:
                    return render_template(
                        "public/upload_image.html",
                        messages=
                        f"I think it is a {pred} flag; Probability: {probs[pred_idx]:.04f}"
                    )
                else:
                    return render_template(
                        "public/upload_image.html",
                        messages=f"I can't recognise this one." +
                        f" But if had to guess, " +
                        f"I would say it was the flag of {pred}; Probability: {probs[pred_idx]:.04f}"
                    )
            else:
                return render_template(
                    "public/upload_image.html",
                    messages=
                    f"Sorry, invalid image type: Must be a: {ALLOWED_EXTENSIONS}"
                )

    return render_template("public/upload_image.html", messages="")
Exemple #2
0
def fix_rotation(file_data):
    # check EXIF data to see if has rotation data from iOS. If so, fix it.
    try:
        image = PILImage.create(file_data)
        for orientation in ExifTags.TAGS.keys():
            if ExifTags.TAGS[orientation] == 'Orientation':
                break

        exif = dict(image.getexif().items())

        rot = 0
        if exif[orientation] == 3:
            rot = 180
        elif exif[orientation] == 6:
            rot = 270
        elif exif[orientation] == 8:
            rot = 90

        if rot != 0:
            st.write(
                f"Rotating image {rot} degrees (you're probably on iOS)...")
            image = image.rotate(rot, expand=True)
            # This step is necessary because image.rotate returns a PIL.Image, not PILImage, the fastai derived class.
            image.__class__ = PILImage

    except (AttributeError, KeyError, IndexError):
        pass  # image didn't have EXIF data

    return image
def create_inference_model(checkpoint: str = None, model='resnet34', path='.'):
    if model == 'resnet34':
        model = resnet34
    elif model == 'resnet18':
        model = resnet18
    elif model == 'mobilenet_v2':
        model = mobilenet_v2

    # Create an inference model instance and load the requested checkpoint
    inf_db = DataBlock(blocks=[ImageBlock, CategoryBlock],
                       get_x=ItemGetter(0),
                       get_y=ItemGetter(1))

    dummy_img = PILImage.create(np.zeros((415, 415, 3), dtype=np.uint8))
    source = [(dummy_img, False), (dummy_img, True)]

    inf_dls = inf_db.dataloaders(source)

    if model == mobilenet_v2:
        learner = cnn_learner(inf_dls,
                              model,
                              cut=-1,
                              splitter=_mobilenetv2_split,
                              pretrained=False)
    else:
        learner = cnn_learner(inf_dls, model, pretrained=False)
    learner.path = Path(path)

    if checkpoint is not None:
        learner.load(checkpoint, with_opt=False, device='cpu')

    return learner
def split_documents(pdf_documents: list, dpi: int = 50):
    # Split each page into square tiles with side length equal to
    # A4 width
    a4_width = 8.3
    dpi = 50
    tile_size = int(a4_width * dpi)
    info = []
    tiles = []
    errors = []

    for i, pdf in enumerate(pdf_documents):
        try:
            pages = pdf2image.convert_from_bytes(pdf['bytes'], dpi=dpi)
            for j, page in enumerate(pages):
                page = expand_image(page, tile_size, tile_size)
                for k, tile in enumerate(get_tiles(page, tile_size)):
                    tile = [
                        tile['x_start'], tile['y_start'], tile['x_stop'],
                        tile['y_stop']
                    ]
                    info.append({
                        'document': pdf['filename'],
                        'page': j + 1,
                        'tile': k + 1,
                        'tile_extent': tile
                    })
                    tiles.append(PILImage.create(np.array(page.crop(tile))))
        except Exception as e:
            errors.append({'document': pdf['filename'], 'error': e})
            print('Unable to open', pdf['filename'], 'because', e)

    return tiles, pd.DataFrame(info), pd.DataFrame(errors)
 def evaluate_classification(images, animal_type):
     num_total, num_correct = 0, 0
     for image_file in images:
         image = PILImage.create(image_file)
         predicted_animal_type, _ = classify(image, loaded_learner)
         num_total += 1
         if predicted_animal_type == animal_type:
             num_correct += 1
     return num_total, num_correct
def bayes_build_inference_dfdlpreds(self:Learner, path, dataset, item_count=100,n_sample=10):
    items = get_image_files(path).shuffle()[:item_count]
    dl = self.dls.test_dl(items.map(lambda o: PILImage.create(o)), num_workers=0)
    res = self.bayes_get_preds(dl=dl,n_sample=n_sample)
    ents = res[2]
    preds = res[0]
    unc = uncertainty_best_probability(preds)
    bald = BALD(preds)
    df = pd.DataFrame(pd.Series(items,name='image_files'))
    df['entropy'] = pd.Series(ents,name='entropy')
    df['best_prob_uncertainty'] = pd.Series(unc,name='best_prob_uncertainty')
    df['bald'] = pd.Series(bald,name='bald')
    df['dataset'] = dataset
    return (df,dl, preds)
 def get_dog_details(self, img):
     """Use pre trained model and get dog details."""
     if os.path.exists(model_path):
         logger.info("Invoking Model:" + model_path)
         learn_inference = load_learner(model_path)
         logger.info("Running Predictor...")
         img = PILImage.create(img)
         pred, pred_idx, probs = learn_inference.predict(img)
         logger.info('Predicted Result:' + str(pred))
         return {
             'success': True,
             'pred': str(pred),
             # 'pred_idx': pred_idx,
             # 'probs': probs
         }
     else:
         logger.debug("Model not found:" + model_path)
         return {'success': False, 'message': 'Apologies. Model not found'}
Exemple #8
0
def upload_image():
    if request.method == "POST":
        if request.files:
            image = request.files["image"]
            if allowed_file(image.filename):
                pred, pred_idx, probs = learn_inf.predict(
                    PILImage.create(image))
                return render_template(
                    "public/upload_image.html",
                    messages=
                    f"Prediction: {pred}; Probability: {probs[pred_idx]:.04f}")
            else:
                return render_template(
                    "public/upload_image.html",
                    messages=
                    f"Sorry, invalid image type: Must be a: {ALLOWED_EXTENSIONS}"
                )

    return render_template("public/upload_image.html", messages="")
'## German Character Recogniser'
"Here's the [GitHub](https://github.com/jacKlinc/german_char_recogniser) repo"
'Upload a picture of a vowel'

learner_inf = load_learner('./res/AEIOU_model.pkl')

# Upload
pic = st.file_uploader("Upload Image")

'Click Classify to find whether it\'s an A or a B'

probs = []
pred_idx = 1
pred = 'n/a'

# Display image
if pic is not None:
    img = load_image(pic)
    st.image(img)

    # Parse image
    pil_img = PILImage.create(pic)

    # Predict category
    pred, pred_idx, probs = predict_img(pil_img)

# Classify
if st.button('Classify'):
    'Prediction: ', pred
    'Probability: ', str(round(probs[pred_idx].item(), 5))
Exemple #10
0
 def encodes(self, img: PILImage):
     if self.idx == 0:
         aug_img = self.train_aug(image=np.array(img))['image']
     else:
         aug_img = self.valid_aug(image=np.array(img))['image']
     return PILImage.create(aug_img)
from PIL import Image as pImage
from fastai.learner import load_learner
from fastai.vision.core import PILImage

current_folder = Path(__file__).parent

st.set_option('deprecation.showfileUploaderEncoding', False)
st.title('Instrument classifier')
st.markdown(f'currently supported instruments: `acoustic_guitar`,'
            f'`bass_guitar`, `drums`, `flute`, `gramophone`, `harp`, `piano`,'
            f' `saxophone`, `tabla`, `violen`')
uploader_image = st.file_uploader("upload your image to be classified",
                                  key="input_image_loader")


@st.cache(allow_output_mutation=True)
def load_model():

    return load_learner(current_folder / "instrument_classifier.pkl")


instrument_model = load_model()

if uploader_image is not None:
    image = PILImage.create(uploader_image)

    prediction, prediction_index, probability = instrument_model.predict(image)
    st.text(
        f"predicted: {prediction}, probability: {probability[prediction_index]}"
    )
    st.image(image)