Ejemplo n.º 1
0
def _predict_single(fp):
    img = open_image(fp)
    pred = img.predict(_learn)
    idx = pred.argmax()
    label = _learn.data.classes[idx]
    img_data = encode(img)
    return {'label': label, 'name': fp.filename, 'image': img_data}
Ejemplo n.º 2
0
def predict(img_path, class_add_thresh, message_id):
    learn, data = load_model(inference=True)

    img = open_image(img_path)
    pred_class, pred_idx, outputs = learn.predict(img)

    confidence = float(outputs[pred_idx])
    zipped = zip((round(n, 3) for n in map(float, outputs)), data.classes)
    zipped = sorted(zipped, key=lambda tup: tup[0], reverse=True)

    print(f'Predicted Class: {pred_class}')
    print(f'Probs: {zipped}')

    if class_add_thresh and confidence > class_add_thresh:
        now_str = str(datetime.now()).replace(":", "_")
        filename = f'auto_add_{now_str}_{confidence}'
        filetype = img_path[img_path.rfind('.'):]
        path = PATH / 'train' / str(pred_class) / f'{filename}{filetype}'

        Path(path).write_bytes(
            Path(img_path).read_bytes())  # save the image to the training set
        print(AUTO_ADD_MESSAGE.format(conf=confidence,
                                      thresh=class_add_thresh))

        r.set(message_id, str(path))

    r.set(f'{message_id}_added',
          int(class_add_thresh and confidence >
              class_add_thresh))  # redis wants an int, byte, or string
Ejemplo n.º 3
0
def predict():
    print('yes')
    try:
        f = request.files['img']
        f.save(
            os.path.join(app.config['UPLOAD_FOLDER'],
                         secure_filename(f.filename)))
    except Exception as e:
        print('cannot upload')
        print(str(e))
    try:
        #img = request.form['img']
        #print(img)
        #path = url_for('static', filename = secure_filename(img))
        #print(img)
        img = open_image('./static/image/' + secure_filename(f.filename))
        print(f.filename)

        pred_class, pred_idx, outputs = learn.predict(img)
        print(pred_class)
    except Exception as e:
        print(str(e))
    return render_template('predict.html',
                           predict_value=pred_class,
                           path=f.filename)
Ejemplo n.º 4
0
async def analyze(request):
    img_data = await request.form()
    img_bytes = await (img_data['file'].read())
    img = open_image(BytesIO(img_bytes))
    prediction = learn.predict(img)[0]
    prob = list(
        sorted(zip(classes, list(learn.predict(img)[2].numpy())),
               key=lambda x: -x[1]))
    return JSONResponse({'result': str(prediction), 'prob': str(prob)})
Ejemplo n.º 5
0
def _predict_single(fp):
  img = open_image(fp)
  pred_class,pred_idx,outputs = _learn.predict(img)
  #pred = img.predict(_learn)
  #idx = pred.argmax()
  #label = _learn.data.classes[idx]
  img_data = encode(img)
  #return { 'label': label, 'name': fp.filename, 'image': img_data }
  return { 'label': str(pred_class), 'name': fp.filename, 'image': img_data }
Ejemplo n.º 6
0
def predictFile(file):
    if os.path.isfile(file):
        try:
            img=open_image(file,convert_mode="L")
            r=learn.predict(img)
            res=str(r[0]).capitalize(),str(float(torch.max(r[2])*100))
            return res 
        except:
            pass
Ejemplo n.º 7
0
def happysad_eval(pic):
    trained_model = model_fetch()
    classes = ['happy', 'sad']
    bytes_pic = open_image(BytesIO(pic))
    pred_class, pred_idx, outputs = trained_model.predict(bytes_pic)
    os.remove(Path.cwd() / 'export.pkl')
    return JSONResponse({
        "pred_class": str(pred_class),
        "results": dict(zip(classes, outputs.tolist()))
    })
Ejemplo n.º 8
0
def predict_image_from_bytes(bytes):
    img = open_image(BytesIO(bytes))
    pred_class, _pred_idx, outputs = LEARN.predict(img)
    return JSONResponse({
        "predictions": sorted(
            zip(LEARN.data.classes, map(float, outputs)),
            key=lambda p: p[1],
            reverse=True
        )
    })
Ejemplo n.º 9
0
    def form_valid(self, form):

        _, _, outputs = LEARN.predict(
            open_image(BytesIO(form.files['pic'].read())))
        predictions = zip(LEARN.data.classes, outputs)

        return render(self.request, self.template_name, {
            'form': form,
            'predictions': predictions
        })
Ejemplo n.º 10
0
def bearsInference():

    #Get querystring to figure out which model to load
    ic=request.args.get('imageclassifier')
    print(ic)

    classes=[]    
    path=None

    if ic=='KDEF':
        path=os.path.join(HERE, "tmp/KDEF")
        classes = ['afraid', 'angry', 'disgusted', 'happy', 'neutral', 'sad', 'surprised']
    elif ic=='teddys':    
        path=os.path.join(HERE, "tmp/bears")
        classes = ['black', 'grizzly', 'teddys']


    learn = create_cnn(ImageDataBunch.single_from_classes(path, classes, tfms=get_transforms(), size=224).normalize(imagenet_stats), models.resnet34)
    learn.load('stage-2')

    fp = request.files['file']
    #img=open_image(bearspath + '/models/00000014.jpg')

    # Read EXIF data 
    exifData={}
    imgTemp=Image.open(fp)
    exifDataRaw=imgTemp._getexif()
    angle=0
    if not exifDataRaw==None:
        for orientation in ExifTags.TAGS.keys():
            if ExifTags.TAGS[orientation]=='Orientation':
                break
        exif=dict(exifDataRaw.items())
#        print(exif[orientation])    

        if exif[orientation] == 3:
            angle=180
        elif  exif[orientation] == 6:
            angle=270
        elif  exif[orientation] == 8:
            angle=90

    img=open_image(fp)

    rotate(img,angle)


    pred_class,pred_idx,outputs = learn.predict(img)
    img_data = encode(img)

    body = { 'label': str(pred_class), 'image': img_data }
    
    resp= Response(response=json.dumps({"response": body}), status=200, mimetype='application/json')
    #print (str(pred_class))
    return resp
Ejemplo n.º 11
0
def augment_image(path, size=(224, 224), x=10):
    img = image.open_image(path)
    augmented = [
        img.apply_tfms(tfms=tfms[0], size=size, padding_mode='border')
        for i in range(x)
    ]
    orig = load_img(path, target_size=size)
    return [orig] + [
        array_to_img(aug_img.data, data_format='channels_first')
        for aug_img in augmented
    ]
Ejemplo n.º 12
0
def infer_classes(png_fname):
    """
    XXX TODO
    predict classes (background audio type or speech)
    
    """
    # 1 as speech

    from fastai.vision.image import open_image
    classes = model_classes.predict(open_image(png_fname))

    return classes
Ejemplo n.º 13
0
    def predictscore(self, filename):
        img = open_image(filename)
        pred_class, pred_idx, outputs = self.learn.predict(img)

        if self.verbose:
            print("pred")
            print(pred_class)
            print("pred_idx")
            print(pred_idx)
            print(outputs)

        return (pred_class.obj, outputs[pred_idx].item())
Ejemplo n.º 14
0
def predict(url):
    response = requests.get(url)
    img = Image.open(BytesIO(response.content)).resize((512, 384),
                                                       Image.ANTIALIAS)
    filename = "./ml-model/data/img.jpg"
    img.save(filename)

    learn = load_learner("./ml-model/")

    category = str(learn.predict(open_image(filename))[0])
    tensor_probs = learn.predict(open_image(filename))[2]
    pred_probs = {
        "cardboard": round(float(tensor_probs[0]), 2),
        "glass": round(float(tensor_probs[1]), 2),
        "metal": round(float(tensor_probs[2]), 2),
        "paper": round(float(tensor_probs[3]), 2),
        "plastic": round(float(tensor_probs[4]), 2),
        "trash": round(float(tensor_probs[5]), 2)
    }
    ans = {"url": url, "category": category, "pred_probs": pred_probs}

    return ans
Ejemplo n.º 15
0
def predictImage(request):
    fileObj = getFile(request)
    filePath, fileName, fileUrl = handled_uploaded_file(fileObj)
    print(filePath)
    _,_,outputs = LEARN.predict(open_image(filePath))
    outputs = outputs.numpy()
    # print(fileObj)
    # print(np.around(outputs*100, 3))
    # print(outputs[np.argmax(outputs)])
    # print(LEARN.data.classes[np.argmax(outputs)])
    # print(fileUrl)

    context = {'scores': list(np.around(outputs*100, 3)),
               'prediction_score': np.around(outputs[np.argmax(outputs)]*100, 3),
               'labels': LEARN.data.classes,
               'prediction_label': LEARN.data.classes[np.argmax(outputs)],
               'image': fileUrl}

    return render(request, 'index.html', context)
Ejemplo n.º 16
0
def run_test(model, path, show_images=False, test_folder='test'):
  count = 0
  correct = 0
  if not path[-1] == '/':
    path += '/'
  testpath = path + test_folder + '/'
  for folder in os.listdir(testpath):
    folderpath = testpath+folder
    if os.path.isdir(folderpath):
      for img in os.listdir(folderpath):
        imgpath = folderpath + '/' + img
        img = image.open_image(imgpath)
        p = model.predict(img)
        if show_images:
          img.show(title=p)
        prediction = str(p[0])
        if prediction == folder:
          correct += 1
        count += 1
  print("Result for test: " + str(correct) + "/" + str(count) + "(" + str(correct/count) + ")")
Ejemplo n.º 17
0
 def predict(self, img_path):
     img = open_image(img_path)
     return self.learn.predict(img)
Ejemplo n.º 18
0
def get_prediction(bytes):
    img = open_image(BytesIO(bytes))
    pred = learner.predict(img)
    return pred
Ejemplo n.º 19
0
 def predict(self, filename):
     img = open_image(filename)
     pred_class, pred_idx, outputs = self.learn.predict(img)
     return pred_class
Ejemplo n.º 20
0
# In[ ]:


learn = load_learner("/content/drive/My Drive/data/models/")


# In[ ]:


learn.load("/content/drive/My Drive/data/models/model3")


# In[49]:


learn.predict(open_image("stack-of-paper.jpg"))


# In[ ]:


from PIL import Image

img = Image.open("stack-of-paper.jpg").resize((512,384), Image.ANTIALIAS)
img.save("stack-of-paper.jpg")


# In[46]:


get_ipython().system('wget "http://tmib.com/wp-content/uploads/2014/08/stack-of-paper.jpg"')
Ejemplo n.º 21
0
def make_prediction(img_bytes):
    img = open_image(BytesIO(img_bytes))
    pred_class,pred_idx,outputs = learn.predict(img)
    prob = outputs[pred_idx].item()*100.0
    return (str(pred_class),  str(round(prob,1)))
def predict_recylable_from_image_url():
    """
    Get recycling item class prediction and probability from a `fastai` model.
    This endpoint accepts an image `url` as a query argument. It returns the recycling image
    `url`, `class`, and `probability` (probability of being that class) based on the image.
    To access the API Docs, visit host:port/apidocs (e.g.: 0.0.0.0:80/apidocs)
    ---
    tags:
      - Probability of recycling class from image URL
    parameters:
      - name: url
        in: query
        type: string
        required: true
        description: url for recyling image to analyze
    responses:
      400:
        description: Bad Request
      200:
        description: OK
        properties:
          url:
            type: string
            example: google.com/images/plastic_bottle.jpg
          class:
            type: string
            example: plastics
          probability:
            type: float
            example: 0.992

    """
    url = _get_image_url_from_request(request)
    g.log = g.log.bind(url=url)

    try:
        response = requests.get(url)
        image_from_interwebs = im.open_image(BytesIO(response.content))
        g.log = g.log.bind(image_opened=True)
    except Exception as e:
        print(e)
        abort(
            400,
            'Unable to open image specified in `url`. Check API docs for usage.'
        )

    try:
        pred_class, _, pred_probs = LEARNER.predict(image_from_interwebs)
        g.log = g.log.bind(prediction_made=True)
    except Exception as e:
        print(e)
        abort(
            400,
            'Issue getting model prediction for image. Check API docs for usage.'
        )

    try:
        g.log = g.log.bind(pred_class=pred_class.obj,
                           pred_probability=max(pred_probs.tolist()),
                           pred_probability_list=pred_probs.tolist())
        output = {
            'url': url,
            'class': pred_class.obj,
            'probability': max(pred_probs.tolist()),
        }
        return jsonify(output)
    except Exception as e:
        print(e)
        abort(400, 'Issue JSON-ifying output. Check API docs for usage.')
Ejemplo n.º 23
0
 def open(self, fn, **kwargs):
     return open_image(fn, **kwargs)
def addmask(img, i):
    mask = image.open_image('{:s}/{:06d}.jpg'.format("masks", i),
                            convert_mode="L")
    img.px = torch.max(img.px, mask.px)
    return img
def addgrain(img, i, alpha):
    grain = image.open_image('{:s}/{:06d}.jpg'.format("grains", i),
                             convert_mode="L")
    img.px = torch.mul(img.px, grain.px * alpha + (1 - alpha))
    return img
    contrast = np.clip(rnd[0] * 0.5 + 1, a_min=0.4, a_max=3)
    brightness = np.clip(rnd[1] * 0.25 + 0.5, a_min=0.2, a_max=0.8)
    grainalpha = np.clip(rnd[2] * 0.25 + 0.5, a_min=0, a_max=1) * 0.2
    return deterioration(img, index, contrast, brightness, grainalpha)


def loguniform(low=0, high=1, size=None):
    return np.exp(np.random.uniform(np.log(low), np.log(high), size))


def expuniform(low=0, high=1, size=None):
    return np.log(np.random.uniform(np.exp(low), np.exp(high), size))


data = []
for filename in glob.glob('color/*.jpg'):
    im = image.open_image(filename, convert_mode='L')

    contrast = np.random.uniform(0.5, 3)
    brightness = np.random.uniform(0.2, 0.8)
    #    contrast = np.clip(rnd[0]*0.5+1, a_min=0.4, a_max=3)
    #    brightness = np.clip(rnd[1]*0.25+0.5, a_min=0.2, a_max=0.8)
    data.append([path.basename(filename), contrast, brightness])
    im = deterioration(im, 0, contrast, brightness)
    im.save("deteriorated/" + path.basename(filename))

df = pd.DataFrame(columns=["image", "contrasts", "brightness"], data=data)
df.head()

df.set_index("image").to_csv("deteriorated/data.csv")
Ejemplo n.º 27
0
    def _categorize_feature_class(
        self,
        feature_class,
        raster,
        class_value_field,
        class_name_field,
        confidence_field,
        cell_size,
        coordinate_system,
        predict_function,
        batch_size,
        overwrite
    ):
        import arcpy
        arcpy.env.overwriteOutput = overwrite

        if batch_size is None:
            batch_size  = self._data.batch_size

        if predict_function is None:
            predict_function = _prediction_function
        
        norm_mean = torch.tensor(imagenet_stats[0])
        norm_std = torch.tensor(imagenet_stats[1])
        
        fcdesc = arcpy.Describe(feature_class)
        oid_field = fcdesc.OIDFieldName
        if not (fcdesc.dataType == 'FeatureClass' and fcdesc.shapeType == 'Polygon'):
            e = Exception(f"The specified FeatureClass at '{feature_class}' is not valid, it should be Polygon FeatureClass")
            raise(e)
        fields = arcpy.ListFields(feature_class)
        field_names = [f.name for f in fields]
        if class_value_field in field_names:
            if not overwrite:
                e = Exception(f"The specified class_value_field '{class_value_field}' already exists in the target FeatureClass, please specify a different name or set `overwrite=True`")
                raise(e)
        arcpy.DeleteField_management(feature_class, 
                                [ class_value_field ])
        arcpy.AddField_management(feature_class, class_value_field, "LONG")
            
        if class_name_field in field_names:
            if not overwrite:
                e = Exception(f"The specified class_name_field '{class_name_field}' already exists in the target FeatureClass, please specify a different name or set `overwrite=True`")
                raise(e)
        arcpy.DeleteField_management(feature_class, 
                                [ class_name_field ])
        arcpy.AddField_management(feature_class, class_name_field, "TEXT")

        if confidence_field is not None:
            if confidence_field in field_names:
                if not overwrite:
                    e = Exception(f"The specified confidence_field '{confidence_field}' already exists in the target FeatureClass, please specify a different name or set `overwrite=True`")
                    raise(e)
            arcpy.DeleteField_management(feature_class, 
                                    [ confidence_field ])
            arcpy.AddField_management(feature_class, confidence_field, "DOUBLE")

        if raster is not None:
            #Arcpy Environment to export data
            arcpy.env.cellSize = cell_size
            arcpy.env.outputCoordinateSystem = coordinate_system
            arcpy.env.cartographicCoordinateSystem = coordinate_system

            tempid_field = _tempid_field = 'f_fcuid'
            i = 1
            while tempid_field in field_names:
                tempid_field = _tempid_field + str(i)
                i+=1
            arcpy.AddField_management(feature_class, tempid_field, "LONG")
            arcpy.CalculateField_management(feature_class, tempid_field, f"!{oid_field}!")

            temp_folder = arcpy.env.scratchFolder
            temp_datafldr = os.path.join(temp_folder, 'categorize_features_'+str(int(time.time())))
            result = arcpy.ia.ExportTrainingDataForDeepLearning(
                in_raster=raster,
                out_folder=temp_datafldr,
                in_class_data=feature_class,
                image_chip_format="TIFF",
                tile_size_x=self._data.chip_size,
                tile_size_y=self._data.chip_size,
                stride_x=0,
                stride_y=0,
                output_nofeature_tiles="ALL_TILES",
                metadata_format="Labeled_Tiles",
                start_index=0,
                class_value_field=tempid_field,
                buffer_radius=0,
                in_mask_polygons=None,
                rotation_angle=0
            )
            # cleanup
            arcpy.DeleteField_management(feature_class, [ tempid_field ])
            image_list = ImageList.from_folder(os.path.join(temp_datafldr, 'images'))
            def get_id(imagepath):
                with open(os.path.join(temp_datafldr, 'labels', os.path.basename(imagepath)[:-3]+'xml')) as f:
                    return(int(f.read().split('<name>')[1].split('<')[0]))

            for i in range(0, len(image_list), batch_size):
                # Get Temporary Ids
                tempids =[ get_id(f) for f in image_list.items[i:i+batch_size] ]
                
                # Get Image batch
                image_batch = torch.stack([ im.data for im in image_list[i:i+batch_size] ])
                image_batch = normalize(image_batch, mean=norm_mean, std=norm_std)
                
                # Get Predications
                predicted_classes, predictions_conf = self._predict_batch(image_batch)
                
                # Update Feature Class
                where_clause = f"{oid_field} IN ({','.join(str(e) for e in tempids)})"
                update_cursor = arcpy.UpdateCursor(
                    feature_class,
                    where_clause=where_clause,
                    sort_fields=f"{oid_field} A"
                )
                for row in update_cursor:
                    row_tempid = row.getValue(oid_field)
                    ui = tempids.index(row_tempid)
                    classvalue = self._data.classes[predicted_classes[ui]]
                    row.setValue(class_value_field, classvalue)
                    row.setValue(class_name_field, self._data.class_mapping[classvalue])
                    if confidence_field is not None:
                        row.setValue(confidence_field, predictions_conf[ui])
                    update_cursor.updateRow(row)

                # Remove Locks
                del row
                del update_cursor

            # Cleanup
            arcpy.Delete_management(temp_datafldr)
            shutil.rmtree(temp_datafldr, ignore_errors=True)

        else:
            feature_class_attach = feature_class+'__ATTACH'
            nrows = arcpy.GetCount_management(feature_class_attach)[0]
            store={}
            for i in range(0, int(nrows), batch_size):
                attachment_ids = []
                rel_objectids = []
                image_batch = []
                
                # Get Image Batch
                with arcpy.da.SearchCursor(feature_class_attach, [ 'ATTACHMENTID', 'REL_OBJECTID', 'DATA' ]) as search_cursor:
                    for c, item in enumerate(search_cursor):
                        if c >= i and c < i+batch_size :
                            attachment_ids.append(item[0])
                            rel_objectids.append(item[1])
                            attachment = item[-1]
                            im = open_image(io.BytesIO(attachment.tobytes())) # Read Bytes
                            im = im.resize(self._data.chip_size) # Resize
                            image_batch.append(im.data) # Convert to tensor
                            del item
                            del attachment
                            #del im
                image_batch = torch.stack(image_batch)
                image_batch = normalize(image_batch, mean=norm_mean, std=norm_std)
                
                # Get Predictions and save to store
                predicted_classes, predictions_conf = self._predict_batch(image_batch)
                for ai in range(len(attachment_ids)):
                    if store.get(rel_objectids[ai]) is None:
                        store[rel_objectids[ai]] = []
                    store[rel_objectids[ai]].append([predicted_classes[ai], predictions_conf[ai]])
                
            # Update Feature Class
            update_cursor = arcpy.UpdateCursor(feature_class)
            for row in update_cursor:
                row_oid = row.getValue(oid_field)
                max_prediction_class, max_prediction_value = predict_function(store[row_oid])
                if max_prediction_class is not None:
                    classvalue = self._data.classes[max_prediction_class]
                    classname = self._data.class_mapping[classvalue]
                else:
                    classvalue = None
                    classname = None
                row.setValue(class_value_field, classvalue)
                row.setValue(class_name_field, classname)
                if confidence_field is not None:
                    row.setValue(confidence_field, max_prediction_value)
                update_cursor.updateRow(row)

            # Remove Locks
            del row
            del update_cursor
        return True
Ejemplo n.º 28
0
    def _categorize_feature_layer(
        self,
        feature_layer,
        raster,
        class_value_field,
        class_name_field,
        confidence_field,
        cell_size,
        coordinate_system,
        predict_function,
        batch_size,
        overwrite
    ):  
        #
        norm_mean = torch.tensor(imagenet_stats[0])
        norm_std = torch.tensor(imagenet_stats[1])

        # Check and create Fields 
        class_name_field_template = {
            "name": class_name_field.lower(),
            "type": "esriFieldTypeString",
            "alias": class_name_field,
            "sqlType": "sqlTypeOther",
            "length": 256,
            "nullable": True,
            "editable": True,
            "visible": True,
            "domain": None,
            "defaultValue": ''
        }

        class_value_field_template = {
            "name": class_value_field.lower(),
            "type": "esriFieldTypeInteger",
            "alias": class_value_field,
            "sqlType": "sqlTypeOther",
            "nullable": True,
            "editable": True,
            "visible": True,
            "domain": None,
            "defaultValue": -999
        }

        to_delete = []
        to_create = []

        feature_layer_fields = { f['name'].lower():f for f in feature_layer.properties["fields"] }
        oid_field = feature_layer.properties['objectIdField']

        if class_value_field_template['name'] in feature_layer_fields:
            if overwrite:
                to_delete.append(feature_layer_fields[class_value_field_template['name']])
            else:
                e = Exception(f"The specified class_value_field '{class_value_field}' already exists, please specify a different name or set `overwrite=True`")
                raise(e)
        to_create.append(class_value_field_template)

        if class_name_field_template['name'] in feature_layer_fields:
            if overwrite:
                to_delete.append(feature_layer_fields[class_name_field_template['name']])
            else:
                e = Exception(f"The specified class_name_field '{class_name_field}' already exists, please specify a different name or set `overwrite=True`")
                raise(e)
        to_create.append(class_name_field_template)
        
        if confidence_field is not None:
            confidence_field_template = {
                "name": confidence_field.lower(),
                "type": "esriFieldTypeDouble",
                "alias": confidence_field,
                "sqlType": "sqlTypeDouble",
                "nullable": True,
                "editable": True,
                "visible": True,
                "domain": None,
                "defaultValue": -999
            }
            if confidence_field_template['name'] in feature_layer_fields:
                if overwrite:
                    to_delete.append(feature_layer_fields[confidence_field_template['name']])
                else:
                    e = Exception(f"The specified confidence_field '{confidence_field}' already exists, please specify a different name or set `overwrite=True`")
                    raise(e)
            to_create.append(confidence_field_template)

        feature_layer.manager.delete_from_definition({'fields': to_delete})
        feature_layer.manager.add_to_definition({'fields': to_create})

        # Get features for updation
        fields_to_update = [oid_field, class_value_field, class_name_field]
        if confidence_field is not None:
            fields_to_update.append(confidence_field)
        feature_layer_features = feature_layer.query(out_fields=",".join(fields_to_update), return_geometry=False).features
        update_store = {}

        if raster is not None:
            import arcpy

            #Arcpy Environment to export data
            arcpy.env.cellSize = cell_size
            arcpy.env.outputCoordinateSystem = coordinate_system
            arcpy.env.cartographicCoordinateSystem = coordinate_system

            feature_layer_url = feature_layer.url

            if feature_layer._token is not None:
                feature_layer_url = feature_layer_url + f"?token={feature_layer._token}"

            
            
            # Create Temporary ID field
            tempid_field = _tempid_field = 'f_fcuid'
            i = 1
            while tempid_field in feature_layer_fields:
                tempid_field = _tempid_field + str(i)
                i+=1
            arcpy.AddField_management(feature_layer_url, tempid_field, "LONG")
            #feature_layer.manager.add_to_definition({'fields': [tempid_field_template]})
            arcpy.CalculateField_management(feature_layer_url, tempid_field, f"{oid_field}", "SQL")

            temp_folder = arcpy.env.scratchFolder
            temp_datafldr = os.path.join(temp_folder, 'categorize_features_'+str(int(time.time())))
            result = arcpy.ia.ExportTrainingDataForDeepLearning(
                in_raster=raster,
                out_folder=temp_datafldr,
                in_class_data=feature_layer_url,
                image_chip_format="TIFF",
                tile_size_x=self._data.chip_size,
                tile_size_y=self._data.chip_size,
                stride_x=0,
                stride_y=0,
                output_nofeature_tiles="ALL_TILES",
                metadata_format="Labeled_Tiles",
                start_index=0,
                class_value_field=tempid_field,
                buffer_radius=0,
                in_mask_polygons=None,
                rotation_angle=0
            )
            # cleanup
            arcpy.DeleteField_management(feature_layer_url, [ tempid_field ])

            image_list = ImageList.from_folder(os.path.join(temp_datafldr, 'images'))
            def get_id(imagepath):
                with open(os.path.join(temp_datafldr, 'labels', os.path.basename(imagepath)[:-3]+'xml')) as f:
                    return(int(f.read().split('<name>')[1].split('<')[0]))

            for i in range(0, len(image_list), batch_size):
                # Get Temporary Ids
                tempids = [ get_id(f) for f in image_list.items[i:i+batch_size] ]
                
                # Get Image batch
                image_batch = torch.stack([ im.data for im in image_list[i:i+batch_size] ])
                image_batch = normalize(image_batch, mean=norm_mean, std=norm_std)
                
                # Get Predications
                predicted_classes, predictions_conf = self._predict_batch(image_batch)
                
                # push prediction to store
                for ui, oid in enumerate(tempids):
                    classvalue = self._data.classes[predicted_classes[ui]]
                    update_store[oid] = {
                        oid_field: oid,
                        class_value_field: classvalue,
                        class_name_field:  self._data.class_mapping[classvalue]
                    } 
                    if confidence_field is not None:
                        update_store[oid][confidence_field] = predictions_conf[ui]

            # Cleanup
            arcpy.Delete_management(temp_datafldr)
            shutil.rmtree(temp_datafldr, ignore_errors=True)

        else:
            out_folder = tempfile.TemporaryDirectory().name
            os.mkdir(out_folder)
            feature_layer.export_attachments(out_folder)
            with open(os.path.join(out_folder, 'mapping.txt')) as file:
                feature_attachments_mapping = json.load(file)
                images_store = []
                for oid in feature_attachments_mapping:
                    for im in feature_attachments_mapping[oid]:
                        images_store.append({
                            'oid': oid,
                            'im': os.path.join(out_folder,im)
                        })
            update_store_scratch = {}
            for i in range(0, len(images_store), batch_size):
                rel_objectids = []
                image_batch = []
                for r in images_store[i:i+batch_size]:
                    im = open_image(r['im']) # Read Bytes
                    im = im.resize(self._data.chip_size) # Resize
                    image_batch.append(im.data) # Convert to tensor
                    rel_objectids.append(int(r['oid']))
                image_batch = torch.stack(image_batch)
                image_batch = normalize(image_batch, mean=norm_mean, std=norm_std)
                # Get Predictions and save to scratch
                predicted_classes, predictions_conf = self._predict_batch(image_batch)
                for ai, oid in enumerate(rel_objectids):
                    if update_store_scratch.get(oid) is None:
                        update_store_scratch[oid] = []
                    update_store_scratch[oid].append([predicted_classes[ai], predictions_conf[ai]])
            # Prepare final updated features
            for oid in update_store_scratch:
                max_prediction_class, max_prediction_value = predict_function(update_store_scratch[oid])
                if max_prediction_class is not None:
                    classvalue = self._data.classes[max_prediction_class]
                    classname = self._data.class_mapping[classvalue]
                else:
                    classvalue = None
                    classname = None                
                update_store[oid] = {
                    oid_field: oid,
                    class_value_field: classvalue,
                    class_name_field:  classname
                } 
                if confidence_field is not None:
                    update_store[oid][confidence_field] = max_prediction_value

        # Update Features
        features_to_update = []
        for feat in feature_layer_features:
            if update_store.get(feat.attributes[oid_field]) is not None:
                updated_attributes = update_store[feat.attributes[oid_field]]
                for f in fields_to_update:
                    feat.attributes[f] = updated_attributes[f]
                features_to_update.append(feat)
        step = 100
        for si in range(0, len(features_to_update), step):
            feature_batch = features_to_update[si:si+step]
            response = feature_layer.edit_features(updates=feature_batch)
            for resp in response.get('updateResults', []):
                if resp.get('success', False):
                    continue
                warnings.warn(f"Something went wrong for data {resp}")
            time.sleep(2)
Ejemplo n.º 29
0
            img = img_to_array(img) / 255
            if batchImages.shape == (0, ):
                batchImages = np.array([img])
                continue
            batchImages = np.r_[batchImages, np.array([img])]
        features = model.predict(batchImages, batch_size=batchImages.shape[0])
        features = features.reshape(
            (features.shape[0], np.prod(features.shape[1:])))
        if test_x.shape == (0, ):
            test_x = features
            continue
        test_x = np.r_[test_x, features]

    # deserialize model of classifier
    model = pickle.load(open(MODEL_PATH, 'rb'))

    # evaluate the model
    preds = model.predict(test_x)
    df = pd.DataFrame(np.array([[str(i).zfill(4), LABELS[int(p)]]
                                for i, p in enumerate(preds)]),
                      columns=('id', 'label'))
    df.to_csv(BASE_CSV_PATH + "/test_prediction.csv", index=False)
    return df


df = test(10)
df.describe()
for i, label in df.values[:10]:
    img_paths = list(paths.list_images(TEST_DIR))
    img = image.open_image(TEST_DIR + i + '.jpg')
    img.show(title=label)
Ejemplo n.º 30
0
def predict(filename):    
    app_root = os.path.dirname(os.path.abspath(__file__))
    app_root=os.path.join(app_root,'Skincancermodel')
    p=['Actinic keratoses',
    'Basal cell carcinoma',
    'Benign keratosis ',
    'Dermatofibroma',
    'Melanocytic nevi',
    'Melanoma',
    'Vascular lesions']
    img = open_image(os.path.join(app_root,filename))
    img=cv2.imread(os.path.join(app_root,filename))
    #converting from gbr to hsv color space
    img_HSV = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    #skin color range for hsv color space 
    HSV_mask = cv2.inRange(img_HSV, (0, 15, 0), (17,170,255)) 
    HSV_mask = cv2.morphologyEx(HSV_mask, cv2.MORPH_OPEN, np.ones((3,3), np.uint8))

    #converting from gbr to YCbCr color space
    img_YCrCb = cv2.cvtColor(img, cv2.COLOR_BGR2YCrCb)
    #skin color range for hsv color space 
    YCrCb_mask = cv2.inRange(img_YCrCb, (0, 135, 85), (255,180,135)) 
    YCrCb_mask = cv2.morphologyEx(YCrCb_mask, cv2.MORPH_OPEN, np.ones((3,3), np.uint8))

    #merge skin detection (YCbCr and hsv)
    global_mask=cv2.bitwise_and(YCrCb_mask,HSV_mask)
    global_mask=cv2.medianBlur(global_mask,3)
    global_mask = cv2.morphologyEx(global_mask, cv2.MORPH_OPEN, np.ones((4,4), np.uint8))


    HSV_result = cv2.bitwise_not(HSV_mask)
    YCrCb_result = cv2.bitwise_not(YCrCb_mask)
    global_result=cv2.bitwise_not(global_mask)

    #print(global_result.size)
    #show results
    # cv2.imshow("1_HSV.jpg",HSV_result)
    # cv2.imshow("2_YCbCr.jpg",YCrCb_result)
    # cv2.imshow("3_global_result.jpg",global_result)
    # cv2.imshow("Image.jpg",img)
    #cv2.imwrite("1_HSV.jpg",HSV_result)
    #cv2.imwrite("2_YCbCr.jpg",YCrCb_result)
    #cv2.imwrite("3_global_result.jpg",global_result)
    #cv2.waitKey(0)
    #cv2.destroyAllWindows()  
    ii=0
    jj=0
    for i in range(0,global_result.shape[0]):
      for j in range(0,global_result.shape[1]):
        ii+=global_result[i][j]
        jj+=255
    print(ii)
    print(jj)
    print(ii/jj)
    if ii/jj>0.5:
        return {
            "answer":"no skin"
        }
    #os.remove(os.path.join(app_root,filename))
    learn  = load_learner(os.path.join( os.path.dirname(os.path.abspath(__file__)),'Skincancermodel'),"export.pkl")
    aa=p[learn.predict(img)[1]]
    bb=learn.predict(img)
    predict_names={
        "answer":aa,
        "v1":str(bb[0]),
        "v2":str(bb[1]),
        "v3":str(bb[2])
    }
    print("predict______predict")
    print(predict_names)
    return predict_names