Exemple #1
0
def testImage(imagePath):

    fileList = os.listdir(cfg.modelRootPath)

    # Filter all model files
    modelsList = filter(lambda element: '.model' in element, fileList)

    # Filter our specific feature method
    currentModel = cfg.modelFeatures
    currentModelsList = filter(lambda element: currentModel in element,
                               modelsList)

    models = []

    for modelname in currentModelsList:

        file = open(cfg.modelRootPath + modelname, 'r')
        svc = pickle.load(file)
        models.append(svc)

        file.close()

    image = io.imread(imagePath, as_grey=True)
    image = util.img_as_ubyte(
        image)  #Read the image as bytes (pixels with values 0-255)

    feats = feature_extractor.extractFeatures(image, imagePath)
    max_score = -2
    counter = 0
    model_index = 14  #Backgorund

    #Obtain prediction score for each model
    for model in models:

        decision_func = model.decision_function(feats)
        score = decision_func[0]
        if score > max_score:
            max_score = score
            modelname = currentModelsList[counter]
            model_index = modelname.split('_')
            model_index = model_index[2]
            model_index = model_index[0:len(model_index) -
                                      6]  #Parse class index from model name
        counter += 1

    print model_index
    #Condition by intuition: If score is too low is background?
    # if max_score < cfg.min_score:
    #     model_index = cfg.index_background   #Assign background index

    return model_index
Exemple #2
0
def testImage(imagePath):

    fileList = os.listdir(cfg.modelRootPath)

    # Filter all model files
    modelsList = filter(lambda element: '.model' in element, fileList)

    # Filter our specific feature method
    currentModel = cfg.modelFeatures
    currentModelsList = filter(lambda element: currentModel in element, modelsList)

    models = []

    for modelname in currentModelsList:

        file = open(cfg.modelRootPath + modelname, 'r')
        svc = pickle.load(file)
        models.append(svc)

        file.close()

    image = io.imread(imagePath, as_grey=True)
    image = util.img_as_ubyte(image) #Read the image as bytes (pixels with values 0-255)

    feats = feature_extractor.extractFeatures(image, imagePath)
    max_score = -2
    counter = 0
    model_index = 14 #Backgorund

    #Obtain prediction score for each model
    for model in models:

        decision_func = model.decision_function(feats)
        score = decision_func[0]
        if score > max_score:
            max_score = score
            modelname = currentModelsList[counter]
            model_index = modelname.split('_')
            model_index = model_index[2]
            model_index = model_index[0:len(model_index)-6]     #Parse class index from model name
        counter += 1

    print model_index
    #Condition by intuition: If score is too low is background?
    # if max_score < cfg.min_score:
    #     model_index = cfg.index_background   #Assign background index

    return model_index
Exemple #3
0
def testImage(imagePath, decisionThreshold = cfg.decision_threshold, applyNMS=True):

    fileList = os.listdir(cfg.modelRootPath)

    # Filter all model files
    modelsList = filter(lambda element: '.model' in element, fileList)

    # Filter our specific feature method
    currentModel = cfg.model+'_'+cfg.modelFeatures
    currentModelsList = filter(lambda element: currentModel in element, modelsList)

    models = []
    subImages = [] #To save backgorund crops

    for modelname in currentModelsList:

        file = open(cfg.modelRootPath + modelname, 'r')
        svc = pickle.load(file)
        models.append(svc)

        file.close()

    image = io.imread(imagePath, as_grey=True)
    image = util.img_as_ubyte(image) #Read the image as bytes (pixels with values 0-255)

    rows, cols = image.shape
    pyramid = tuple(pyramid_gaussian(image, downscale=cfg.downScaleFactor))

    scale = 0
    boxes = None
    scores = None


    for p in pyramid[0:]:
        #We now have the subsampled image in p
        window_shape = (32,32)

        #Add padding to the image, using reflection to avoid border effects
        if cfg.padding > 0:
            p = pad(p,cfg.padding,'reflect')

        try:
            views = view_as_windows(p, window_shape, step=cfg.window_step)
        except ValueError:
            #block shape is bigger than image
            break

        num_rows, num_cols, width, height = views.shape

        for row in range(0, num_rows):
            for col in range(0, num_cols):
                #Get current window
                subImage = views[row, col]
                # subImages.append(subImage)   #To save backgorund crops: Accumulate them in an array
                #Extract features
                feats = feature_extractor.extractFeatures(subImage)

                #Obtain prediction score for each model
                for model in models:

                    decision_func = model.decision_function(feats)

                    # if decision_func > decisionThreshold:
                    if decision_func > 0.2:  #For bootstrapping
                        # Signal found!
                        h, w = window_shape
                        scaleMult = math.pow(cfg.downScaleFactor, scale)

                        x1 = int(scaleMult * (col*cfg.window_step - cfg.padding + cfg.window_margin))
                        y1 = int(scaleMult * (row*cfg.window_step - cfg.padding + cfg.window_margin))
                        x2 = int(x1 + scaleMult*(w - 2*cfg.window_margin))
                        y2 = int(y1 + scaleMult*(h - 2*cfg.window_margin))

                        #bootstrapping: Save image (if positive)
                        subImages.append(subImage)

                        bbox = (x1, y1, x2, y2)
                        score = decision_func[0]

                        if boxes is not None:
                            boxes = np.vstack((bbox, boxes))
                            scores = np.hstack((score, scores))
                        else:
                            boxes = np.array([bbox])
                            scores = np.array([score])
                        break

        scale += 1


    # To save backgorund crops
    # numSubImages = len(subImages)
    # for x in range(0,10): #Save 10 crops for each background image
    #     randomIndex = random.randint(1,numSubImages-1) #Get a random window index
    #     imageName = imagePath.split('/')  #Working on the crop name...
    #     imageName = imageName[len(imageName)-1]
    #     filename = (imageName[:-4]+'-'+str(x)+'.jpg')
    #     io.imsave('Results/'+filename, subImages[randomIndex])  #Save the crop
    #end To save backgorund crops

    # To save bootstrapping windows
    numSubImages = len(subImages)
    length = min(10, len(subImages))
    for x in range(0,length) : #Save windows with detections (max 10)
        if numSubImages == 1:
            randomIndex = 0
        else:
            randomIndex = random.randint(1, numSubImages-1) #Get a random window index
        imageName = imagePath.split('/')  #Working on the crop name...
        imageName = imageName[len(imageName)-1]
        filename = (imageName[:-4]+'-'+str(x)+'.jpg')
        io.imsave('Bootstrapping/'+filename, subImages[randomIndex])  #Save the crop
    #end To save bootstrapping windows


    if applyNMS:
        #From all the bounding boxes that are overlapping, take those with maximum score.
        boxes, scores = nms.non_max_suppression_fast(boxes, scores, cfg.nmsOverlapThresh)

    return boxes, scores
Exemple #4
0
def testImage(imagePath,
              decisionThreshold=cfg.decision_threshold,
              applyNMS=True):

    file = open(cfg.modelPath)
    svc = pickle.load(file)

    image = io.imread(imagePath, as_grey=True)
    image = util.img_as_ubyte(
        image)  #Read the image as bytes (pixels with values 0-255)

    rows, cols = image.shape
    pyramid = tuple(pyramid_gaussian(image, downscale=cfg.downScaleFactor))

    scale = 0
    boxes = None
    scores = None

    for p in pyramid[0:]:
        #We now have the subsampled image in p

        #Add padding to the image, using reflection to avoid border effects
        if cfg.padding > 0:
            p = pad(p, cfg.padding, 'reflect')

        try:
            views = view_as_windows(p, cfg.window_shape, step=cfg.window_step)
        except ValueError:
            #block shape is bigger than image
            break

        num_rows, num_cols, width, height = views.shape
        for row in range(0, num_rows):
            for col in range(0, num_cols):

                #Get current window
                subImage = views[row, col]
                #Extract features
                feats = feature_extractor.extractFeatures(subImage)
                #Obtain prediction score
                decision_func = svc.decision_function(
                    np.array(feats).reshape(1, -1))

                if decision_func > decisionThreshold:
                    # Pedestrian found!
                    h, w = cfg.window_shape
                    scaleMult = math.pow(cfg.downScaleFactor, scale)

                    x1 = int(scaleMult * (col * cfg.window_step - cfg.padding +
                                          cfg.window_margin))
                    y1 = int(scaleMult * (row * cfg.window_step - cfg.padding +
                                          cfg.window_margin))
                    x2 = int(x1 + scaleMult * (w - 2 * cfg.window_margin))
                    y2 = int(y1 + scaleMult * (h - 2 * cfg.window_margin))

                    bbox = (x1, y1, x2, y2)
                    score = decision_func[0]

                    if boxes is not None:
                        boxes = np.vstack((bbox, boxes))
                        scores = np.hstack((score, scores))
                    else:
                        boxes = np.array([bbox])
                        scores = np.array([score])
        scale += 1

    if applyNMS:
        #From all the bounding boxes that are overlapping, take those with maximum score.
        boxes, scores = nms.non_max_suppression_fast(boxes, scores,
                                                     cfg.nmsOverlapThresh)

    return boxes, scores
def testImage(imagePath, decisionThreshold = cfg.decision_threshold, applyNMS=True):

    fileList = os.listdir(cfg.modelRootPath)

    # Filter all model files
    modelsList = filter(lambda element: '.model' in element, fileList)

    # Filter our specific feature method
    currentModel = cfg.model+'_'+cfg.modelFeatures
    currentModelsList = filter(lambda element: currentModel in element, modelsList)


    models = []
    rectangleModel = []
    subImages = [] #To save backgorund crops

    for modelname in currentModelsList:

        file = open(cfg.modelRootPath + modelname, 'r')
        svc = pickle.load(file)

        if 'Rect' in modelname:
            rectangleModel.append(svc)
        else:
            models.append(svc)

        file.close()

    image = io.imread(imagePath, as_grey=True)
    image = util.img_as_ubyte(image) #Read the image as bytes (pixels with values 0-255)

    rows, cols = image.shape
    pyramid = tuple(pyramid_gaussian(image, downscale=cfg.downScaleFactor))

    scale = 0
    boxes = None
    scores = None

    #
    # for p in pyramid[0:]:
    #     #We now have the subsampled image in p
    #     window_shape = (64,64)
    #
    #     #Add padding to the image, using reflection to avoid border effects
    #     if cfg.padding > 0:
    #         p = pad(p,cfg.padding,'reflect')
    #
    #     try:
    #         views = view_as_windows(p, window_shape, step=cfg.window_step)
    #     except ValueError:
    #         #block shape is bigger than image
    #         break
    #
    #     num_rows, num_cols, width, height = views.shape
    #
    #     for row in range(0, num_rows):
    #         for col in range(0, num_cols):
    #             #Get current window
    #             subImage = views[row, col]
    #             # subImages.append(subImage)   #To save backgorund crops: Accumulate them in an array
    #             #Extract features
    #             feats = feature_extractor.extractFeatures(subImage)
    #
    #             #Obtain prediction score for each model
    #             for model in models:
    #
    #                 decision_func = model.decision_function(feats)
    #
    #                 if decision_func > 0.4:
    #                     # Signal found!
    #                     h, w = window_shape
    #                     scaleMult = math.pow(cfg.downScaleFactor, scale)
    #
    #                     x1 = int(scaleMult * (col*cfg.window_step - cfg.padding + cfg.window_margin))
    #                     y1 = int(scaleMult * (row*cfg.window_step - cfg.padding + cfg.window_margin))
    #                     x2 = int(x1 + scaleMult*(w - 2*cfg.window_margin))
    #                     y2 = int(y1 + scaleMult*(h - 2*cfg.window_margin))
    #
    #                     #bootstrapping: Save image (if positive)
    #                     #subImages.append(subImage)
    #
    #                     bbox = (x1, y1, x2, y2)
    #                     score = decision_func[0]
    #
    #                     if boxes is not None:
    #                         boxes = np.vstack((bbox, boxes))
    #                         scores = np.hstack((score, scores))
    #                     else:
    #                         boxes = np.array([bbox])
    #                         scores = np.array([score])
    #                     break
    #
    #     scale += 1

    scale = 0
    for pR in pyramid[0:]:
        #We now have the subsampled image in p
        window_shape = (96,48)

        #Add padding to the image, using reflection to avoid border effects
        if cfg.padding > 0:
            pR = pad(pR,cfg.padding,'reflect')

        try:
            views = view_as_windows(pR, window_shape, step=cfg.window_step)
        except ValueError:
            #block shape is bigger than image
            break

        num_rows, num_cols, width, height = views.shape

        for row in range(0, num_rows):
            for col in range(0, num_cols):
                #Get current window
                subImage = views[row, col]
                # subImages.append(subImage)   #To save backgorund crops: Accumulate them in an array

                #Extract features
                feats = feature_extractor.extractFeatures(subImage)

                #Obtain prediction score for each model
                for model in rectangleModel:
                    decision_func = model.decision_function(feats)

                    if decision_func > 0.3:
                        # Signal found!
                        h, w = window_shape
                        scaleMult = math.pow(cfg.downScaleFactor, scale)

                        x1 = int(scaleMult * (col*cfg.window_step - cfg.padding + cfg.window_margin))
                        y1 = int(scaleMult * (row*cfg.window_step - cfg.padding + cfg.window_margin))
                        x2 = int(x1 + scaleMult*(w - 2*cfg.window_margin))
                        y2 = int(y1 + scaleMult*(h - 2*cfg.window_margin))

                        bbox = (x1, y1, x2, y2)
                        score = decision_func[0]

                        #bootstrapping: Save image (if positive)
                        subImages.append(subImage)

                        if boxes is not None:
                            boxes = np.vstack((bbox, boxes))
                            scores = np.hstack((score, scores))
                        else:
                            boxes = np.array([bbox])
                            scores = np.array([score])
                        break

        scale += 1

    # To save backgorund crops
    # numSubImages = len(subImages)
    # for x in range(0,10): #Save 10 crops for each background image
    #     randomIndex = random.randint(1,numSubImages-1) #Get a random window index
    #     imageName = imagePath.split('/')  #Working on the crop name...
    #     imageName = imageName[len(imageName)-1]
    #     filename = (imageName[:-4]+'-'+str(x)+'.jpg')
    #     io.imsave('Results/'+filename, subImages[randomIndex])  #Save the crop
    #end To save backgorund crops

    # To save bootstrapping windows
    numSubImages = len(subImages)
    length = min(10, len(subImages))
    for x in range(0,length) : #Save all windows with detections
        if numSubImages == 1:
            randomIndex = 0
        else:
            randomIndex = random.randint(1, numSubImages-1) #Get a random window index
        imageName = imagePath.split('/')  #Working on the crop name...
        imageName = imageName[len(imageName)-1]
        filename = (imageName[:-4]+'-'+str(x)+'_bootstrapping'+'.jpg')
        io.imsave('Bootstrapping/'+filename, subImages[randomIndex])  #Save the crop
    #end To save bootstrapping windows


    if applyNMS:
        #From all the bounding boxes that are overlapping, take those with maximum score.
        boxes, scores = nms.non_max_suppression_fast(boxes, scores, cfg.nmsOverlapThresh)

    return boxes, scores
def testImage(imagePath, decisionThreshold = cfg.decision_threshold, applyNMS=True):

    file = open(cfg.modelPath, 'r')
    svc = pickle.load(file)

    image = io.imread(imagePath, as_grey=True)
    image = util.img_as_ubyte(image) #Read the image as bytes (pixels with values 0-255)

    rows, cols = image.shape
    pyramid = tuple(pyramid_gaussian(image, downscale=cfg.downScaleFactor))

    scale = 0
    boxes = None
    scores = None

    for p in pyramid[0:]:
        #We now have the subsampled image in p

        #Add padding to the image, using reflection to avoid border effects
        if cfg.padding > 0:
            p = pad(p,cfg.padding,'reflect')

        try:
            views = view_as_windows(p, cfg.window_shape, step=cfg.window_step)
        except ValueError:
            #block shape is bigger than image
            break

        num_rows, num_cols, width, height = views.shape
        for row in range(0, num_rows):
            for col in range(0, num_cols):

                #Get current window
                subImage = views[row, col]
                #Extract features
                feats = feature_extractor.extractFeatures(subImage)
                #Obtain prediction score
                decision_func = svc.decision_function(feats)

                if decision_func > decisionThreshold:
                    # Pedestrian found!
                    h, w = cfg.window_shape
                    scaleMult = math.pow(cfg.downScaleFactor, scale)

                    x1 = int(scaleMult * (col*cfg.window_step - cfg.padding + cfg.window_margin))
                    y1 = int(scaleMult * (row*cfg.window_step - cfg.padding + cfg.window_margin))
                    x2 = int(x1 + scaleMult*(w - 2*cfg.window_margin))
                    y2 = int(y1 + scaleMult*(h - 2*cfg.window_margin))

                    bbox = (x1, y1, x2, y2)
                    score = decision_func[0]

                    if boxes is not None:
                        boxes = np.vstack((bbox, boxes))
                        scores = np.hstack((score, scores))
                    else:
                        boxes = np.array([bbox])
                        scores = np.array([score])
        scale += 1

    if applyNMS:
        #From all the bounding boxes that are overlapping, take those with maximum score.
        boxes, scores = nms.non_max_suppression_fast(boxes, scores, cfg.nmsOverlapThresh)

    return boxes, scores