def main(video_file='soccer.avi', roi=((140, 100), (500, 600))):
    # open video file
    if path.isfile(video_file):
        video = cv2.VideoCapture(video_file)
    else:
        print 'File "' + video_file + '" does not exist.'
        raise SystemExit

    # initialize tracker
    mot = MultipleObjectsTracker()

    while True:
        # grab next frame
        success, img = video.read()
        if success:
            if roi:
                # original video is too big: grab some meaningful ROI
                img = img[roi[0][0]:roi[1][0], roi[0][1]:roi[1][1]]

            # generate saliency map
            sal = Saliency(img, use_numpy_fft=False, gauss_kernel=(3, 3))

            cv2.imshow('original', img)
            cv2.imshow('saliency', sal.get_saliency_map())
            cv2.imshow('objects', sal.get_proto_objects_map(use_otsu=False))
            cv2.imshow('tracker', mot.advance_frame(img,
                       sal.get_proto_objects_map(use_otsu=False)))

            if cv2.waitKey(100) & 0xFF == ord('q'):
                break
        else:
            break
Ejemplo n.º 2
0
def main(video_file='soccer.avi', roi=((140, 100), (500, 600))):
    # open video file
    if path.isfile(video_file):
        video = cv2.VideoCapture(video_file)
    else:
        print 'File "' + video_file + '" does not exist.'
        raise SystemExit

    # initialize tracker
    mot = MultipleObjectsTracker()

    while True:
        # grab next frame
        success, img = video.read()
        if success:
            if roi:
                # original video is too big: grab some meaningful ROI
                img = img[roi[0][0]:roi[1][0], roi[0][1]:roi[1][1]]

            # generate saliency map
            sal = Saliency(img, use_numpy_fft=False, gauss_kernel=(3, 3))

            cv2.imshow('original', img)
            cv2.imshow('saliency', sal.get_saliency_map())
            cv2.imshow('objects', sal.get_proto_objects_map(use_otsu=False))
            cv2.imshow(
                'tracker',
                mot.advance_frame(img,
                                  sal.get_proto_objects_map(use_otsu=False)))

            if cv2.waitKey(100) & 0xFF == ord('q'):
                break
        else:
            break
Ejemplo n.º 3
0
def main(video_file='jet.mp4', region=((0, 0), (350, 450))):
    # open video file

    if path.isfile(video_file):
        video = cv2.VideoCapture(video_file)
    else:
        print 'File "' + video_file + '" does not exist.'
        raise SystemExit

    # initialize tracker
    mot = MultipleObjectsTracker()

    while True:
        # grab next frame
        success, img = video.read()
        if success:
            if region:
                #original video is too big: grab some meaningful region of interest
                img = img[region[0][0]:region[1][0], region[0][1]:region[1][1]]

            # generate saliency map
            sal = Saliency(img)

            cv2.imshow('original', img)
            cv2.imshow('saliency', sal.get_saliency_map())
            cv2.imshow('objects', sal.get_proto_objects_map())
            cv2.imshow('tracker',
                       mot.advance_frame(img, sal.get_proto_objects_map()))

            if cv2.waitKey(100) & 0xFF == ord('q'):
                break
        else:
            break
Ejemplo n.º 4
0
def get_saliency_map(arr):
    size = arr.shape[0] * arr.shape[1]
    sali = Saliency(arr)
    m = sali.get_saliency_map()
    sali_stats = getstats(m)
    cnt = (m > 0.80).sum()
    cnt2 = (m > 0.9).sum()
    cnt3 = (m < 0.1).sum()
    cnt4 = (m < 0.2).sum()
    return sali_stats + [
        cnt, cnt / size, cnt2, cnt2 / size, cnt3, cnt3 / size, cnt4,
        cnt4 / size
    ]
Ejemplo n.º 5
0
def sal_image(frame_queue, sal_queue, queue_lock):
  print "sal_image called"
  frame = 0
  while True:
    if not frame_queue.empty():       
        queue_lock.acquire()
        img = frame_queue.get()
        queue_lock.release()
        
        sal = Saliency(img)
        sal_map = sal.get_saliency_map()
        sal_img = (sal_map*255).round().astype(np.uint8)
        img = cv2.cvtColor(sal_img, cv2.COLOR_GRAY2BGR)

        queue_lock.acquire()
        if sal_queue.qsize() < 3:
          sal_queue.put(img)
        else:
          time.sleep(1)
        queue_lock.release()
        frame = frame + 1
        print "SAL _ FRAME : ", frame
Ejemplo n.º 6
0
import time

BUFFER = 40

# read an image
img_original = cv2.imread("test_image.jpg")
img_grayscale = cv2.cvtColor(img_original, cv2.COLOR_BGR2GRAY)
# img_sized = cv2.resize(img_original,(1920,1080))
img_sized = cv2.resize(img_original, (1280, 720))
img_size_exclusion = img_sized.copy()
img_hist = img_sized.copy()

start = time.time()
# find the saliency map
sal = Saliency(img_sized)
sal_map = sal.get_saliency_map()

end = time.time()

test_duration = end - start

# convert to the correct type
sal_conv = (sal_map * 255).round().astype(np.uint8)

# find the contours
ret, thresh = cv2.threshold(sal_conv, 100, 255, 0)
_, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

color = ("b", "r", "g")

for cnt in contours: