Beispiel #1
0
def save_windows(boxes, imagePath):
    image_color = io.imread(imagePath, as_grey=False)
    image_color = util.img_as_ubyte(image_color)
    imageFilename = os.path.basename(imagePath)  # Get the filename
    imageBasename = os.path.splitext(imageFilename)[0]  #Take out the extension
    annotationsFilePath = cfg.annotationsFolderPath + 'gt.' + imageBasename + '.txt'
    annotatedBoxes = utils.readINRIAAnnotations(annotationsFilePath)
    signalTypes = utils.readINRIAAnnotationsDetection(annotationsFilePath)
    signalTypes = list(reversed(signalTypes))
    count = 0
    for box in boxes:
        if box[0] < 0 or box[1] < 0:
            continue
        if box[2] >= image_color.shape[1].__int__() or \
                        box[3] >= image_color.shape[0].__int__():
            continue
        annotated = 'NONSIGNAL'
        for idx in range(0, len(annotatedBoxes)):
            aBox = annotatedBoxes[idx]
            currentRatio = computeOverlap(box, aBox)
            currentRatio = math.ceil(currentRatio * 10) / 10
            if currentRatio > 0.5:
                annotated = signalTypes[idx]
                break
        crop = image_color[box[1]:box[3], box[0]:box[2]]
        imageName = imagePath.split('/')  #Working on the crop name...
        fileName = imageName[len(imageName) - 1]
        fileName = fileName[:len(fileName) - 4]
        fileName = (fileName + '.' + str(count))
        filename = (fileName + '.' + annotated + '.jpg')
        crop = resize(crop, (32, 32))
        io.imsave('Crops/' + filename, crop)  #Save the crop
        print('Crop saved')
        count += 1
Beispiel #2
0
def save_windows(boxes, imagePath):
    image_color = io.imread(imagePath, as_grey=False)
    image_color = util.img_as_ubyte(image_color)
    imageFilename = os.path.basename(imagePath) # Get the filename
    imageBasename = os.path.splitext(imageFilename)[0] #Take out the extension
    annotationsFilePath = cfg.annotationsFolderPath+'gt.'+imageBasename+'.txt'
    annotatedBoxes = utils.readINRIAAnnotations(annotationsFilePath)
    signalTypes = utils.readINRIAAnnotationsDetection(annotationsFilePath)
    signalTypes = list(reversed(signalTypes))
    count = 0
    for box in boxes:
        if box[0] < 0 or box[1] < 0:
            continue
        if box[2] >= image_color.shape[1].__int__() or \
                        box[3] >= image_color.shape[0].__int__():
            continue
        annotated = 'NONSIGNAL'
        for idx in range(0, len(annotatedBoxes)):
            aBox = annotatedBoxes[idx]
            currentRatio = computeOverlap(box, aBox)
            currentRatio = math.ceil(currentRatio*10)/10
            if currentRatio > 0.5:
                annotated = signalTypes[idx]
                break
        crop = image_color[box[1]:box[3],box[0]:box[2]]
        imageName = imagePath.split('/')  #Working on the crop name...
        fileName = imageName[len(imageName)-1]
        fileName = fileName[:len(fileName)-4]
        fileName = (fileName+'.'+str(count))
        filename = (fileName+'.'+annotated+'.jpg')
        crop = resize(crop,(32,32))
        io.imsave('Crops/'+filename, crop)  #Save the crop
        print('Crop saved')
        count += 1
def f(resultsFile, model):

    totalTP = np.zeros(len(detection_thresholds))
    totalFN = np.zeros(len(detection_thresholds))
    totalFP = np.zeros(len(detection_thresholds))

    resultsFilePath = cfg.resultsFolder + '/' + resultsFile

    file = open(resultsFilePath, 'r')
    imageResults = pickle.load(file)
    file.close()

    #Retrieve the data for this result
    detectedBoxes = imageResults['bboxes']
    detectedScores = imageResults['scores']
    imagePath = imageResults['imagepath']
    modelIndexes = imageResults['model']

    curThreshIDX = 0

    imageFilename = os.path.basename(imagePath)  # Get the filename
    imageBasename = os.path.splitext(imageFilename)[0]  #Take out the extension

    #Find the annotations for this image.
    annotationsFilePath = cfg.annotationsFolderPath + 'gt.' + imageBasename + '.txt'
    annotatedBoxes = utils.readINRIAAnnotations(annotationsFilePath)

    for thresh in detection_thresholds:
        #Select only the bounding boxes that passed the current detection threshold
        idx, = np.where(detectedScores > thresh)

        if len(idx) > 0:
            detectedBoxes = detectedBoxes[idx]
            detectedScores = detectedScores[idx]
            modelIndexes = modelIndexes[idx]
            #Apply NMS on the selected bounding boxes
            detectedBoxes, detectedScores, modelIndexes = nms_comp.non_max_suppression_fast(
                detectedBoxes,
                detectedScores,
                modelIndexes,
                overlapthresh=cfg.nmsOverlapThresh)

        else:
            detectedBoxes = []
            detectedScores = []
            modelIndexes = []

        #Compute the statistics for the current detected boxes
        TP, FP, FN = eval.evaluateImage(
            annotatedBoxes, detectedBoxes, modelIndexes, model,
            detectedScores)  #For model compensation

        totalTP[curThreshIDX] += TP
        totalFP[curThreshIDX] += FP
        totalFN[curThreshIDX] += FN

        curThreshIDX += 1

    return [totalTP, totalFP, totalFN]
def f(resultsFile, model):

    totalTP = np.zeros(len(detection_thresholds))
    totalFN = np.zeros(len(detection_thresholds))
    totalFP = np.zeros(len(detection_thresholds))

    resultsFilePath = cfg.resultsFolder+'/'+resultsFile

    file = open(resultsFilePath, 'r')
    imageResults = pickle.load(file)
    file.close()


    #Retrieve the data for this result
    detectedBoxes = imageResults['bboxes']
    detectedScores = imageResults['scores']
    imagePath = imageResults['imagepath']
    modelIndexes = imageResults['model']

    curThreshIDX = 0

    imageFilename = os.path.basename(imagePath) # Get the filename
    imageBasename = os.path.splitext(imageFilename)[0] #Take out the extension

    #Find the annotations for this image.
    annotationsFilePath = cfg.annotationsFolderPath+'gt.'+imageBasename+'.txt'
    annotatedBoxes = utils.readINRIAAnnotations(annotationsFilePath)

    for thresh in detection_thresholds:
        #Select only the bounding boxes that passed the current detection threshold
        idx, = np.where(detectedScores > thresh)


        if len(idx) > 0:
            detectedBoxes = detectedBoxes[idx]
            detectedScores = detectedScores[idx]
            modelIndexes = modelIndexes[idx]
            #Apply NMS on the selected bounding boxes
            detectedBoxes, detectedScores, modelIndexes = nms_comp.non_max_suppression_fast(detectedBoxes, detectedScores, modelIndexes,  overlapthresh= cfg.nmsOverlapThresh)

        else:
            detectedBoxes = []
            detectedScores = []
            modelIndexes = []

        #Compute the statistics for the current detected boxes
        TP, FP, FN = eval.evaluateImage(annotatedBoxes, detectedBoxes, modelIndexes, model, detectedScores )   #For model compensation

        totalTP[curThreshIDX] += TP
        totalFP[curThreshIDX] += FP
        totalFN[curThreshIDX] += FN

        curThreshIDX += 1

    return [totalTP, totalFP, totalFN]
def run():

    print ('Start evaluating results')
    fileList = os.listdir(cfg.resultsFolder)
    resultsFileList = list(filter(lambda element: '.result' in element, fileList))

    detection_thresholds = np.arange(cfg.decision_threshold_min,
                                     cfg.decision_threshold_max,
                                     cfg.decision_threshold_step)

    totalTP = np.zeros(len(detection_thresholds))
    totalFN = np.zeros(len(detection_thresholds))
    totalFP = np.zeros(len(detection_thresholds))

    for resultsFile in resultsFileList:
        resultsFilePath = cfg.resultsFolder+'/'+resultsFile

        file = open(resultsFilePath, 'rb')
        imageResults = pickle.load(file)
        file.close()

        #Retrieve the data for this result
        detectedBoxes = imageResults['bboxes']
        detectedScores = imageResults['scores']
        imagePath = imageResults['imagepath']

        curThreshIDX = 0

        imageFilename = os.path.basename(imagePath) # Get the filename
        imageBasename = os.path.splitext(imageFilename)[0] #Take out the extension

        #Find the annotations for this image.
        annotationsFilePath = cfg.annotationsFolderPath+'/'+imageBasename+'.txt'
        annotatedBoxes = utils.readINRIAAnnotations(annotationsFilePath)

        for thresh in detection_thresholds:
            #Select only the bounding boxes that passed the current detection threshold
            idx, = np.where(detectedScores > thresh)

            if len(idx) > 0:
                detectedBoxes = detectedBoxes[idx]
                detectedScores = detectedScores[idx]
                #Apply NMS on the selected bounding boxes
                detectedBoxes, detectedScores = nms.non_max_suppression_fast(detectedBoxes, detectedScores, overlapthresh= cfg.nmsOverlapThresh)
            else:
                detectedBoxes = []
                detectedScores = []

            #Compute the statistics for the current detected boxes
            TP, FP, FN = eval.evaluateImage(annotatedBoxes, detectedBoxes, detectedScores )

            totalTP[curThreshIDX] += TP
            totalFP[curThreshIDX] += FP
            totalFN[curThreshIDX] += FN

            curThreshIDX += 1

    #Compute metrics
    print (totalTP + totalFP)
    detection_rate = totalTP / (totalTP + totalFN) #Tasa de deteccion
    miss_rate = 1 - detection_rate #Tasa de error
    fppi = totalFP / len(resultsFileList) #FPPI (Falsos positivos por imagen)

    #Plot the results
    plt.figure()
    plt.plot(fppi, miss_rate, 'r', label='Miss-Rate vs FPPI')

    plt.xlabel('FPPI ')
    plt.ylabel('Error rate')

    plt.title(cfg.model + ' ' + cfg.modelFeatures)
    plt.legend()
    plt.show()
def run():

    print 'Start evaluating results'
    fileList = os.listdir(cfg.resultsFolder)
    resultsFileList = filter(lambda element: '.result' in element, fileList)

    detection_thresholds = np.arange(cfg.decision_threshold_min,
                                     cfg.decision_threshold_max,
                                     cfg.decision_threshold_step)

    totalTP = np.zeros(len(detection_thresholds))
    totalFN = np.zeros(len(detection_thresholds))
    totalFP = np.zeros(len(detection_thresholds))

    for resultsFile in resultsFileList:
        resultsFilePath = cfg.resultsFolder+'/'+resultsFile

        file = open(resultsFilePath, 'r')
        imageResults = pickle.load(file)
        file.close()

        #Retrieve the data for this result
        detectedBoxes = imageResults['bboxes']
        detectedScores = imageResults['scores']
        imagePath = imageResults['imagepath']

        curThreshIDX = 0

        imageFilename = os.path.basename(imagePath) # Get the filename
        imageBasename = os.path.splitext(imageFilename)[0] #Take out the extension

        #Find the annotations for this image.
        annotationsFilePath = cfg.annotationsFolderPath+'/'+imageBasename+'.txt'
        annotatedBoxes = utils.readINRIAAnnotations(annotationsFilePath)

        for thresh in detection_thresholds:
            #Select only the bounding boxes that passed the current detection threshold
            idx, = np.where(detectedScores > thresh)

            if len(idx) > 0:
                detectedBoxes = detectedBoxes[idx]
                detectedScores = detectedScores[idx]
                #Apply NMS on the selected bounding boxes
                detectedBoxes, detectedScores = nms.non_max_suppression_fast(detectedBoxes, detectedScores, overlapthresh= cfg.nmsOverlapThresh)
            else:
                detectedBoxes = []
                detectedScores = []

            #Compute the statistics for the current detected boxes
            TP, FP, FN = eval.evaluateImage(annotatedBoxes, detectedBoxes, detectedScores )

            totalTP[curThreshIDX] += TP
            totalFP[curThreshIDX] += FP
            totalFN[curThreshIDX] += FN

            curThreshIDX += 1

    #Compute metrics
    print totalTP + totalFP
    detection_rate = totalTP / (totalTP + totalFN) #Tasa de deteccion
    miss_rate = 1 - detection_rate #Tasa de error
    fppi = totalFP / len(resultsFileList) #FPPI (Falsos positivos por imagen)

    #Plot the results
    plt.figure()
    plt.plot(fppi, miss_rate, 'r', label='Miss-Rate vs FPPI')

    plt.xlabel('FPPI ')
    plt.ylabel('Error rate')

    plt.title(cfg.model + ' ' + cfg.modelFeatures)
    plt.legend()
    plt.show()