コード例 #1
0
ファイル: bbox.py プロジェクト: perizen1/CarND-Capstone
def lable_img(img, data):
	stop = False
	r = cv2.selectROI("Image", img, fromCenter=False)
	r = [ int(x) for x in r ]
	print("press 1 - for red, 2 - for yellow, 3 - for green")
	a=[]
	key = cv2.waitKey(0)
	if key == 49: # 1 is pressed
		a = [0]
	elif key == 50: # 2
		a = [1]
	elif key == 51: # 3
		a = [2]
	a.extend(r)
	data.append({"yolo": a})
	# User input if we need more bounding boxes
	print("Press \"n\" for new bounding box or \"r\" to relable or \"x\" to exit or ENTER to go to the next image")
	key = cv2.waitKey(0)
	if key == 110: # n - Add new boundin box on the image
		lable_img(img, data)
	if key == 114: # r - Relabele the image again
		print("Redraw labels for the frame")
		data = []
		lable_img(img, data)
	if key == 120: # x - stop the process as is and dump the json
		stop = True
	return data, stop
コード例 #2
0
ファイル: screen.py プロジェクト: ipanivko/europilot
    def select_screen_area():
        """Use opencv to select game window from entire screen and return
        `box` object corresponding to game window.
        This method uses `selectROI` method in opencv tracking api.
        See http://docs.opencv.org/master/d7/dfc/group__highgui.html
        NOTE that opencv 3.0 (or above) is required.

        """
        try:
            import cv2
            cv2.selectROI
        except ImportError:
            raise ScreenException('opencv is not Found')
        except AttributeError:
            raise ScreenException('`selectROI` not found.' +
                ' Try reinstalling opencv with `--with-contrib` option')

        # 1. Capture entire screen in primary monitor.
        monitors = ScreenUtils.get_local_monitors()
        # Use primary monitor to create box
        box = Box.from_monitor(monitors[0])
        local_grab = LocalScreenGrab(box)
        entire_screen = local_grab.grab()
        entire_screen = entire_screen.reshape(box.numpy_shape)

        # 2. Select game window from entire screen.
        window_name = 'select_screen_area'
        region = cv2.selectROI(window_name, entire_screen)

        # `region` is tuple for (x1, y1, x2 - x1, y2 - y1) according to `Box`
        # coordinate system.
        return Box(
            region[0], region[1],
            region[0] + region[2], region[1] + region[3]
        )
コード例 #3
0
def initializeTracker(frame):
    # tracker = cv2.TrackerBoosting_create()
    # tracker = cv2.TrackerMIL_create()
    # tracker = cv2.TrackerKCF_create()
    # tracker = cv2.TrackerTLD_create()
    # tracker = cv2.TrackerMedianFlow_create()
    tracker = cv2.TrackerCSRT_create()
    # tracker = cv2.TrackerMOSSE_create()
    
    bbox = cv2.selectROI(frame, False)
    ok = tracker.init(frame, bbox)
    
    if not ok:
        print('Error: Unable to initialize tracker!')
        sys.exit()
        
    return tracker
コード例 #4
0
ファイル: track.py プロジェクト: markmliu/20XX-Stats-Pack
def main(argv = sys.argv):
    # is 1400, for falconDitto
    file_name, frames_to_start = get_args(argv)
    (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

    cap = cv2.VideoCapture(file_name)
    fps = get_fps(cap, major_ver)
    for i in xrange(frames_to_start):
        cap.grab()
    ret, frame = cap.read()
    initial_bbox = cv2.selectROI(frame, False)
    cap.release()

    tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW']
    # tracker_types = ['KCF', 'TLD', 'MEDIANFLOW']
    for tracker_type in tracker_types:
        #  get a fresh cap.
        cap = cv2.VideoCapture(file_name)
        for i in xrange(frames_to_start):
            cap.grab()
        ret, frame = cap.read()

        w = int(cap.get(3))
        h = int(cap.get(4))
        print "w: ", w
        print "h: ", h
        print "tracker type: ", tracker_type
        tracker = get_tracker(tracker_type, minor_ver)
        print "created tracker!"
        out = cv2.VideoWriter(tracker_type + '.avi',
                      cv2.VideoWriter_fourcc(*'XVID'), 30.0, (w, h))

        # Initialize tracker with first frame and bounding box
        ok = tracker.init(frame, initial_bbox)
        # bbox is of form: xmin, ymin, x offset, y offset
        # rectangle takes top_left, bottom_right

        top_left = (int(initial_bbox[0]), int(initial_bbox[1]))
        bottom_right = (int(initial_bbox[0] + initial_bbox[2]),
                        int(initial_bbox[1] + initial_bbox[3]))

        cv2.rectangle(frame, top_left, bottom_right, 255, 2)

        cv2.imshow('frame', frame)
        cv2.waitKey(0)

        frames_elapsed = 0
        # capture up to 5 seconds
        while(cap.isOpened() and frames_elapsed < (fps * 5)):

            ret, frame = cap.read()

            ok, bbox = tracker.update(frame)
            # Draw bounding box
            if ok:
                # Tracking success
                p1 = (int(bbox[0]), int(bbox[1]))
                p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
                cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
            else :
                # Tracking failure
                cv2.putText(frame, "Tracking failure detected", (100,80),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
            # Display tracker type on frame
            cv2.putText(frame, tracker_type + " Tracker", (100,20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);

            cv2.imshow('frame', frame)
            out.write(frame)
            if cv2.waitKey(25) & 0xFF == ord('q'):
                break
            frames_elapsed += 1
        out.release()
        cv2.destroyAllWindows()
        cap.release()
コード例 #5
0
out_filepath = "out_" + filename + ".avi"
video = cv2.VideoCapture(filename+".mp4")

# Read first frame
success, frame = video.read()
if not success:
    print("Failed to read video")
video_size = (frame.shape[1], frame.shape[0])    
# Define ROI - Region of Interest
bboxes = []
colors = []
while True:
    k = cv2.waitKey(0) & 0xFF
    if (k == 113): # q is pressed
        break
    bbox = cv2.selectROI("MultiTracker", frame)
    bboxes.append(bbox)
    colors.append((randint(0, 255), randint(0, 255), randint(0, 255)))
cv2.destroyAllWindows()    
print("Selected bounding boxes {}".format(bboxes))

multi_tracker = cv2.MultiTracker_create()
tracking_boxes = []
for bbox in bboxes:
    multi_tracker.add(create_tracker_by_name("CSRT"), frame, bbox)
out_frames = []
start_tracking_time = time.time()
while video.isOpened():
    success, frame = video.read()
    if not success:
        break
コード例 #6
0
    # Read first frame
    ok, frame = video.read()
    height, width, channels = frame.shape
    if not ok:
        print('Cannot read video file')
        sys.exit()

    frame = cv2.resize(frame, (1024, 1024))

    # Define an initial bounding box
#    bbox = (1035,543,100,100)
    #bbox = (544,483,100,120)
    #bbox = (904,302,970-904,365-302)

    # Uncomment the line below to select a different bounding box
    bbox = cv2.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)
    success = True
    count = 0

    while success:

        # Read a new frame
        success, frame = video.read()
        if not success:
            break

        frame = cv2.resize(frame, (1024, 1024))
コード例 #7
0
    def openVideo(self, source):
        self.frameCounter = 0
        if source == 'arquivo':
            ftypes = [('Vídeos mp4', '*.mp4'), ('All files', '*')]
            path = tk.filedialog.askopenfilename(filetypes=ftypes)
            if path == '':
                return
            print('Arquivo escolhido:', path)
            print("Iniciando, por favor aguarde...")
            try:
                self.vc.release()
                self.isUpdating = False
                time.sleep(1)
                del self.vc
            except:
                print('Fonte de dados existente foi eliminada!')
            self.vc = visao.VideoCamera('video', path)
            #exibe FPS do vídeo
            fps = self.vc.video.get(cv.CAP_PROP_FPS)
            self.delay = int(
                1000 // fps
            )  #velocidade de atualização da tela, ou seja, captura de um novo frame
            print(
                "FPS do vídeo em video.get(cv.CAP_PROP_FPS): {0}".format(fps))
            print("self.delay:", self.delay)
            self.frameCount = int(self.vc.video.get(cv.CAP_PROP_FRAME_COUNT))
            print("self.frameCount do vídeo:", self.frameCount)
        elif source == 'camera':
            try:
                self.vc.release()
                self.isUpdating = False
                time.sleep(1)
                del self.vc
            except:
                print('Fonte de dados existente foi eliminada!')
            self.vc = visao.VideoCamera('camera', 0)
            self.delay = 15  # para camera um delay de 15 deve ser suficiente
            self.frameCount = 0
        self.engine = visao.Engine()
        success, frame = self.vc.get_frame()
        if success:
            # Seta ROI (Region of Interest)
            messagebox.showinfo(
                "Atenção!",
                "Selecione a área de interesse para o processamento e pressione a tecla [Enter]."
            )
            roi = cv.selectROI(
                frame[::2, ::2]
            )  #diminui a imagem para escolher regiao de interesse
            #print('>> ROI:', roi)
            self.roi = (roi[0] * 2, roi[1] * 2, roi[2] * 2, roi[3] * 2)
            #print('>> ROI:', roi)
            #roi = (851, 402, 750, 470) #utilizado apenas para testes
            cv.destroyAllWindows()
            #Cria Canvas
            frame = frame[int(self.roi[1]):int(self.roi[1] + self.roi[3]),
                          int(self.roi[0]):int(self.roi[0] + self.roi[2])]
            img_width, img_height = frame.shape[1], frame.shape[0]
            print('Shape:', img_width, img_height)

            try:
                self.canvas.delete("all")
                self.canvas.update()
                del self.canvas
            except:
                print('Canvas antigo eliminado!')
            self.canvas = tk.Canvas(self.master,
                                    width=img_width * 2,
                                    height=img_height * 2,
                                    bg='white')
            self.canvas['borderwidth'] = 0
            self.canvas.grid(column=3, row=1, rowspan=6, padx=1, pady=1)
            self.image_on_canvas = self.canvas.create_image(0, 0, anchor=tk.NW)
            # chama método update() para atualizar frames na tela
            self.isUpdating = True
            self.update()
        else:
            messagebox.showinfo(
                "Erro acessando fonte de dados!",
                "Erro acessando fonte de dados: Caso seja um arquivo de vídeo confira se o arquivo não esta corrompido. Caso seja uma câmera, confira se ela esta ligada corretamente ao computador."
            )
            print('!! Erro acessando fonte de dados')
コード例 #8
0
import cv2

img1 = cv2.imread("image.jpg", 1)

img2 = img1.copy()

rect = cv2.selectROI("ROI Selector", img2)
コード例 #9
0
import cv2
import imutils

img = cv2.imread("image.jpg")
img = imutils.resize(img, width=500)

roi = cv2.selectROI(windowName="roi",
                    img=img,
                    showCrosshair=True,
                    fromCenter=False)
x, y, w, h = roi

cv2.rectangle(img=img,
              pt1=(x, y),
              pt2=(x + w, y + h),
              color=(0, 0, 255),
              thickness=2)
cv2.imshow("roi", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #10
0
#width = len(image[0])
#length = len(image)
height, width = image.shape[:2]
cy = int(height / 2)
cx = int(width / 2)
hx = 600
hy = 300
image = image[cy - hy:cy + hy, cx - hx:cx + hx]
n_height, n_width = image.shape[:2]
n_cy = int(n_height / 2)
n_cx = int(n_width / 2)

if not ok:
    print('Failed to read video')
    exit()
bbox = cv2.selectROI("tracking", image)
#tracker = cv2.TrackerMIL_create()
#tracker = cv2.TrackerBoosting_create()
tracker = cv2.TrackerCSRT_create()

init_once = False

#cx = int(width / 2)
#cy = int(length / 2)
dx = 0
dy = 0
telROIsize = (int)(telROIsize)
cv2.rectangle(image, (n_cy - telROIsize, n_cy - telROIsize),
              (n_cy + telROIsize, n_cy + telROIsize), (0, 0, 0), 5)

コード例 #11
0
    def start_tracking(self):
        # get the image of the first frame... (read as gray scale image...)
        init_img = cv2.imread(self.frame_lists[0])
        init_frame = cv2.cvtColor(init_img, cv2.COLOR_BGR2GRAY)
        init_frame = init_frame.astype(np.float32)
        # get the init ground truth.. [x, y, width, height]
        init_gt = cv2.selectROI('demo', init_img, False, False)
        init_gt = np.array(init_gt).astype(np.int64)
        # start to draw the gaussian response...
        response_map = self._get_gauss_response(init_frame, init_gt)
        # start to create the training set ...
        # get the goal..
        g = response_map[init_gt[1]:init_gt[1]+init_gt[3], init_gt[0]:init_gt[0]+init_gt[2]]
        fi = init_frame[init_gt[1]:init_gt[1]+init_gt[3], init_gt[0]:init_gt[0]+init_gt[2]]
        G = np.fft.fft2(g)
        # start to do the pre-training...
        Ai, Bi = self._pre_training(fi, G)
        # start the tracking...
        for idx in range(len(self.frame_lists)):
            current_frame = cv2.imread(self.frame_lists[idx])
            frame_gray = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
            frame_gray = frame_gray.astype(np.float32)
            if idx == 0:
                Ai = self.args.lr * Ai
                Bi = self.args.lr * Bi
                pos = init_gt.copy()
                clip_pos = np.array([pos[0], pos[1], pos[0]+pos[2], pos[1]+pos[3]]).astype(np.int64)
            else:
                Hi = Ai / Bi
                fi = frame_gray[clip_pos[1]:clip_pos[3], clip_pos[0]:clip_pos[2]]
                fi = pre_process(cv2.resize(fi, (init_gt[2], init_gt[3])))
                Gi = Hi * np.fft.fft2(fi)
                gi = linear_mapping(np.fft.ifft2(Gi))
                # find the max pos...
                max_value = np.max(gi)
                max_pos = np.where(gi == max_value)
                dy = int(np.mean(max_pos[0]) - gi.shape[0] / 2)
                dx = int(np.mean(max_pos[1]) - gi.shape[1] / 2)
                
                # update the position...
                pos[0] = pos[0] + dx
                pos[1] = pos[1] + dy

                # trying to get the clipped position [xmin, ymin, xmax, ymax]
                clip_pos[0] = np.clip(pos[0], 0, current_frame.shape[1])
                clip_pos[1] = np.clip(pos[1], 0, current_frame.shape[0])
                clip_pos[2] = np.clip(pos[0]+pos[2], 0, current_frame.shape[1])
                clip_pos[3] = np.clip(pos[1]+pos[3], 0, current_frame.shape[0])
                clip_pos = clip_pos.astype(np.int64)

                # get the current fi..
                fi = frame_gray[clip_pos[1]:clip_pos[3], clip_pos[0]:clip_pos[2]]
                fi = pre_process(cv2.resize(fi, (init_gt[2], init_gt[3])))
                # online update...
                Ai = self.args.lr * (G * np.conjugate(np.fft.fft2(fi))) + (1 - self.args.lr) * Ai
                Bi = self.args.lr * (np.fft.fft2(fi) * np.conjugate(np.fft.fft2(fi))) + (1 - self.args.lr) * Bi
            
            # visualize the tracking process...
            cv2.rectangle(current_frame, (pos[0], pos[1]), (pos[0]+pos[2], pos[1]+pos[3]), (255, 0, 0), 2)
            cv2.imshow('demo', current_frame)
            cv2.waitKey(100)
            # if record... save the frames..
            if self.args.record:
                frame_path = 'record_frames/' + self.img_path.split('/')[1] + '/'
                if not os.path.exists(frame_path):
                    os.mkdir(frame_path)
                cv2.imwrite(frame_path + str(idx).zfill(5) + '.png', current_frame)
コード例 #12
0
# -*- coding: utf-8 -*-
import cv2
import numpy as np

print(cv2.__version__)
img = cv2.imread('datasets/figura.jpg', 0)

#while (1):
bbox = cv2.selectROI(img, False)

p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(img, p1, p2, (0, 0, 255))
x1 = int(bbox[0])
y1 = int(bbox[1])
x2 = int(bbox[0]) + int(bbox[2])
y2 = int(bbox[1]) + int(bbox[3])
print(img.shape)
print(bbox)
img = img[y1:y2, x1:x2]
cv2.imshow('image', img)
cv2.waitKey(0)

cv2.destroyAllWindows()
コード例 #13
0
ファイル: let_it_rip.py プロジェクト: meistalampe/rPPG
def main():

    dataset, timestamps = load_thermal_file(
        _filename='ThermalData_18_06_2020_13_19_36.h5',
        _folder='E:\\GitHub\\CovPySourceFile')

    # region Control Variables
    is_writing = False
    is_drawing = False
    # endregion

    # region Data Pre-Processing

    # region Timestamps to Sampling Rate

    # # convert timestamps into datetime objects
    # dt_obj = [datetime.fromtimestamp(ts / 1000).time() for ts in timestamps]
    # # convert datetime objects into time strings
    # time_strings = [dt.strftime("%M:%S:%f") for dt in dt_obj]
    # # finally convert time strings into seconds
    # timestamp_in_seconds = []
    # for s in time_strings:
    #     date_time = datetime.strptime(s, "%M:%S:%f")
    #     a_timedelta = date_time - datetime(1900, 1, 1)
    #     in_seconds = a_timedelta.total_seconds()
    #     timestamp_in_seconds.append(in_seconds)
    #
    # # calculate the mean interval between samples from seconds
    # ts_mean = np.mean(np.diff(timestamp_in_seconds))
    # # finally calculate the mean sampling rate of the signal
    # fs = int(1 / ts_mean)
    # endregion

    # region Get Raw Thermal Data

    # get data set attributes
    n_frames, height, width, total_time_ms = [
        dataset.attrs[i] for i in list(dataset.attrs)
    ]
    # extract thermal frames from the hdf5 dataset
    thermal_frames = []
    # convert raw data into temperature values [deg Celsius]
    # temp_frames = []
    # normalize raw data for further processing steps [0 - 255]
    norm_frames = []
    for n in range(0, n_frames):
        raw_frame = load_frame_from_dataset(dataset, height, n)
        thermal_frames.append(raw_frame)
        # temp_frames.append(raw_frame * 0.1 - 273.15)
        norm_frames.append(
            cv2.normalize(raw_frame,
                          None,
                          alpha=0,
                          beta=255,
                          norm_type=cv2.NORM_MINMAX,
                          dtype=cv2.CV_8U))

    # get unsharpened img for edge detection later on
    unsharp_frames = []
    # for n, n_frame in enumerate(norm_frames):
    #     u_frame = unsharp_mask(image=n_frame, radius=3, amount=2)
    #     unsharp_frames.append(u_frame)
    #
    #     if is_writing:
    #         cv2.imwrite('E:\\GitHub\\CovPySourceFile\\UnsharpenedMask\\UM_{}.png'.format(n), u_frame)
    #
    #     if is_drawing:
    #         fig, ax = plt.subplots(nrows=1, ncols=2, figsize=(10, 3.5))
    #
    #         # Plotting the original image.
    #         ax[0].imshow(norm_frames[n])
    #         ax[0].set_title('Thermal Data - Normalized')
    #
    #         # ax[1].imshow(temp_frames[n])
    #         # ax[1].set_title('Temp Frame [C]')
    #
    #         ax[1].imshow(unsharp_frames[n])
    #         ax[1].set_title('Unsharpened Image')
    #
    #         # ax[1].imshow(norm_frames[n])
    #         # ax[1].set_title('Thermal Data - Normalized [0-255]')
    #
    #         plt.subplots_adjust()
    #         plt.show()
    #
    # if is_drawing:
    #     plt.close('all')

    # endregion

    # endregion

    # region Feature Extraction Algorithm

    # region Automatic ROI Detection

    # face segmentation using multi-level Otsu
    otsu_masks = multi_level_otsu(images=norm_frames,
                                  n_regions=4,
                                  target_region=3,
                                  method=OtsuMethods.BINARY,
                                  write=is_writing,
                                  draw=is_drawing)

    # to proceed the masks need to be converted into 3d array
    empty_array = np.zeros((height, width))
    _3d_otsu_masks = [
        np.dstack((mask, empty_array, empty_array)) for mask in otsu_masks
    ]

    # use binary otsu mask to detect the face
    (major_ver, minor_ver, subminor_ver) = cv2.__version__.split('.')

    # Set up tracker
    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[4]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    # video = cv2.VideoCapture('E:\\GitHub\\CovPySourceFile\\Video\\OtsuMask.avi')
    #
    # # Exit if video not opened.
    # if not video.isOpened():
    #     print("Could not open video file!")
    #     sys.exit()
    #
    # # Read first frame
    # ok, frame = video.read()
    # if not ok:
    #     print("Could not read video file!")
    #     sys.exit()

    tracked_frame = _3d_otsu_masks[0]
    # Define initial bounding box from roi
    bbox = cv2.selectROI(tracked_frame, showCrosshair=True, fromCenter=False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(tracked_frame, bbox)

    # roi points
    roi_points = []
    tracked_frames = []
    # while True:
    # # Read a new frame
    # ok, frame = video.read()
    # if not ok:
    #     break
    for mask in _3d_otsu_masks:
        tracked_frame = mask
        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(tracked_frame)
        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2])), int(bbox[1])
            p3 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            p4 = (int(bbox[0]), int(bbox[1] + bbox[3]))
            cv2.rectangle(tracked_frame, p1, p3, (255, 0, 0), 2, 1)
            points = [p1, p2, p3, p4]
            # roi_values = get_values_from_roi(points, t_frame)
            roi_points.append(points)
        else:
            # Tracking failure
            cv2.putText(tracked_frame, "Tracking failure detected", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
            roi_points.append([])

        # Display tracker type on frame
        cv2.putText(tracked_frame, tracker_type + " Tracker", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        # Display FPS on frame
        cv2.putText(tracked_frame, "FPS : " + str(int(fps)), (100, 50),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        tracked_frames.append(tracked_frame)
        # Display result
        cv2.imshow("Tracking", tracked_frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27:
            break

    is_writing = True
    if is_writing:
        for n, img in enumerate(tracked_frames):
            cv2.imwrite(
                'E:\\GitHub\\CovPySourceFile\\TrackedFrames\\TF_{}.png'.format(
                    n), img)

    norm_face_rois = []
    for n in range(0, len(roi_points)):
        # get values inside of roi
        norm_roi_values = get_values_from_roi(roi_points[n], norm_frames[n])
        # my_roi = np.zeros((roi_shapes[n][2], roi_shapes[n][3]))
        x1 = roi_points[n][0][0]
        x2 = roi_points[n][2][0]
        y1 = roi_points[n][0][1]
        y2 = roi_points[n][2][1]

        norm_face_roi = norm_roi_values[y1:y2, x1:x2]

        if is_drawing:
            cv2.imshow("ROI", norm_face_roi)

            # Exit if ESC pressed
            k = cv2.waitKey(1) & 0xff
            if k == 27:
                break

        norm_face_rois.append(norm_face_roi)

    if is_writing:
        for n, img in enumerate(tracked_frames):
            cv2.imwrite(
                'E:\\GitHub\\CovPySourceFile\\FaceROI\\TF_{}.png'.format(n),
                img)
    # endregion

    # endregion

    print('Bye Bye')
コード例 #14
0
while True:
    timer = cv2.getTickCount()
    img = np.zeros((512, 512, 3), np.uint8)

    imgL, imgR = feed(cap1, cap2)
    dispmap = new(imgL, imgR)

    try:
        firstFrame
    except (NameError):
        firstFrame = dispmap
    try:
        roi
    except (NameError):
        roi = cv2.selectROI(dispmap)

    frameDeltaROI, dispmapROI = diff(firstFrame, dispmap, roi)

    while savedMeanDelta_danger == 0:
        imgL, imgR = feed(cap1, cap2)
        dispmap = new(imgL, imgR)
        frameDeltaROI, dispmapROI = diff(firstFrame, dispmap, roi)
        cv2.imshow("dispmap-calibrate", dispmapROI)
        cv2.putText(dispmapROI, "Kalibrace DANGER", (100, 20),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
        savedMeanDelta_danger, savedMeanDispmap_danger = calibrate2safedistance(
            frameDeltaROI, dispmapROI)
        savedMeanDelta_danger = savedMeanDelta_danger

    while savedMeanDelta_stop == 0:
コード例 #15
0
ファイル: run.py プロジェクト: abbasi-crypton/kcf-tracker
import argparse
import cv2
from kcf import Tracker

if __name__ == '__main__':
    cap = cv2.VideoCapture(0)
    tracker = Tracker()
    ok, frame = cap.read()
    if not ok:
        print("error reading video")
        exit(-1)
    roi = cv2.selectROI("tracking", frame, False, False)
    #roi = (218, 302, 148, 108)
    tracker.init(frame, roi)
    while cap.isOpened():
        ok, frame = cap.read()
        if not ok:
            break
        x, y, w, h = tracker.update(frame)
        cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 1)
        cv2.imshow('tracking', frame)
        c = cv2.waitKey(1) & 0xFF
        if c == 27 or c == ord('q'):
            break
    cap.release()
    cv2.destroyAllWindows()
コード例 #16
0
import cv2
import numpy as np

if __name__ == '__main__' :
    
    #image file path
    image="image.jpeg"
    
    # Read image
    im = cv2.imread(image)

    # Select ROI
    r = cv2.selectROI(im)

    # Crop image
    imCrop = im[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]

    # Display cropped image
    cv2.imshow("Image", imCrop)
    cv2.waitKey(0)
コード例 #17
0
ファイル: tracker.py プロジェクト: AutomanHan/opencv_contrib
import numpy as np
import cv2 as cv
import sys

if len(sys.argv) != 2:
    print('Input video name is missing')
    exit()

cv.namedWindow("tracking")
camera = cv.VideoCapture(sys.argv[1])
ok, image=camera.read()
if not ok:
    print('Failed to read video')
    exit()
bbox = cv.selectROI("tracking", image)
tracker = cv.TrackerMIL_create()
init_once = False

while camera.isOpened():
    ok, image=camera.read()
    if not ok:
        print 'no image to read'
        break

    if not init_once:
        ok = tracker.init(image, bbox)
        init_once = True

    ok, newbox = tracker.update(image)
    print ok, newbox
视频分析(对象移动轨迹绘制)
"""

import cv2 as cv
import numpy as np

cap = cv.VideoCapture('images/balltest.mp4')
if not cap.isOpened():
    print("could not read video")
    exit(0)

# 读取第一帧
ret, frame = cap.read()

# 选择ROI区域
x, y, w, h = cv.selectROI("CAM Demo", frame, True, False)
track_window = (x, y, w, h)

# 获取ROI直方图
roi = frame[y:y + h, x:x + w]
hsv_roi = cv.cvtColor(roi, cv.COLOR_BGR2HSV)
mask = cv.inRange(hsv_roi, (26, 43, 46), (34, 255, 255))
roi_hist = cv.calcHist([hsv_roi], [0], mask, [180], [0, 180])
cv.normalize(roi_hist, roi_hist, 0, 255, cv.NORM_MINMAX)

# 搜索跟踪分析
tracking_path = []
term_crit = (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1)
while True:
    ret, frame = cap.read()
    if not ret:
コード例 #19
0
def main():

    parser = argparse.ArgumentParser(description="Label images YOLO style")
    parser.add_argument("inputFolder",
                        help="Path of folder containing images to label",
                        type=str)
    parser.add_argument(
        "outputFolder",
        help=
        "Path of folder where labeled images will be sent and label files will be saved",
        type=str)

    args = parser.parse_args()

    data_folder = Path(args.inputFolder)
    final_folder = Path(args.outputFolder)

    if not data_folder.is_dir():
        print("{} is not a directory".format(data_folder))
        sys.exit(1)

    if not final_folder.is_dir():
        print("{} is not a directory".format(final_folder))
        sys.exit(1)

    # Recognize jpg or jpeg images
    images = list(data_folder.glob('*.jpg'))
    images.extend(list(data_folder.glob('*.jpeg')))
    random.shuffle(images)

    for file in images:
        print('Processing image {}'.format(file))

        img = cv.imread(str(file))

        boolDone = False

        fid = open(final_folder.joinpath(file.stem + '.txt'), 'w')

        while not boolDone:
            cv.imshow('image', img)
            roi = cv.selectROI('image', img, False)

            if np.max(roi) > 0:
                print(roi)
                x = np.round(
                    (float(roi[0]) + (float(roi[2])) / 2) / img.shape[1], 2)
                y = np.round(
                    (float(roi[1]) + (float(roi[3]) / 2)) / img.shape[0], 2)
                width = np.round(float(roi[2]) / img.shape[1], 2)
                height = np.round(float(roi[3]) / img.shape[0], 2)

                annClass = easygui.integerbox('What class? class 0 to quit',
                                              'Class box',
                                              lowerbound=0,
                                              upperbound=100)

                if annClass == 0:
                    boolDone = True
                else:
                    fid.write(
                        str(annClass - 1) + ' ' + str(x) + ' ' + str(y) + ' ' +
                        str(width) + ' ' + str(height) + '\n')
                    if annClass == 1:
                        cv.rectangle(img, (roi[0], roi[1]),
                                     (roi[0] + roi[2], roi[1] + roi[3]),
                                     (0, 255, 0), 2)
                    elif annClass == 2:
                        cv.rectangle(img, (roi[0], roi[1]),
                                     (roi[0] + roi[2], roi[1] + roi[3]),
                                     (0, 255, 255), 2)
                    elif annClass == 3:
                        cv.rectangle(img, (roi[0], roi[1]),
                                     (roi[0] + roi[2], roi[1] + roi[3]),
                                     (0, 0, 255), 2)
                    elif annClass == 4:
                        cv.rectangle(img, (roi[0], roi[1]),
                                     (roi[0] + roi[2], roi[1] + roi[3]),
                                     (255, 255, 0), 2)
            else:
                boolDone = True

        fid.close()
        shutil.move(data_folder.joinpath(file),
                    final_folder.joinpath(file.stem + '.jpg'))
コード例 #20
0
vidcap = cv2.VideoCapture("Souf.m4v")
success, frame = vidcap.read()


targets = [
    Target((353, 123, 51, 22), frame),
    Target((509, 108, 104, 73), frame)
]

phones = [
    Phone("Red joycon", [161, 155, 84], [179, 255, 255], [79, 280, 66, 70]),
    Phone("Blue joycon", [94, 80, 2], [126, 255, 255], [203, 225, 59, 90])
]

if len(sys.argv) > 1:
    print(cv2.selectROI("_", frame));cv2.destroyWindow("_")
    quit(0)

frameCount = 0
while True:
    if cv2.waitKey(1) == ord('q') or not success:
        break
    for t in targets:
        t.update(frame)


    # Only check for joycons N many frames
    if frameCount % 30 == 0:
        DrawRect(frame, 0, 0, 10, 10, (0, 0, 200))

コード例 #21
0
    print('Input video name is missing')
    exit()

print('Select 3 tracking targets')

cv.namedWindow("tracking")
camera = cv.VideoCapture(sys.argv[1])
tracker = cv.legacy.MultiTracker_create()
init_once = False

ok, image = camera.read()
if not ok:
    print('Failed to read video')
    exit()

bbox1 = cv.selectROI('tracking', image)
bbox2 = cv.selectROI('tracking', image)
bbox3 = cv.selectROI('tracking', image)

while camera.isOpened():
    ok, image = camera.read()
    if not ok:
        print('no image to read')
        break

    if not init_once:
        ok = tracker.add(cv.legacy.TrackerMIL_create(), image, bbox1)
        ok = tracker.add(cv.legacy.TrackerMIL_create(), image, bbox2)
        ok = tracker.add(cv.legacy.TrackerMIL_create(), image, bbox3)
        init_once = True
コード例 #22
0
def main():

    tracker_types = [
        'BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE',
        'CSRT'
    ]
    tracker_type = tracker_types[2]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()
        if tracker_type == 'MOSSE':
            tracker = cv2.TrackerMOSSE_create()
        if tracker_type == "CSRT":
            tracker = cv2.TrackerCSRT_create()

    x = 200
    y = 200
    w = 224
    h = 224
    track_window = (x, y, w, h)
    # Reference Distance
    L0 = 100
    S0 = 50176  #224x224 #take#here.

    # Base Distance
    LB = 100
    # Define an initial bounding box
    bbox = (x, y, w, h)  #(287, 23, 86, 320)
    #CX=int(bbox[0]+0.5*bbox[2]+3) #adding
    #CY=int(bbox[1]+0.5*bbox[3]+3) #adding

    drone = tellopy.Tello()

    try:
        drone.connect()
        drone.wait_for_connection(60.0)

        retry = 3
        container = None
        while container is None and 0 < retry:
            retry -= 1
            try:
                container = av.open(drone.get_video_stream())
            except av.AVError as ave:
                print(ave)
                print('retry...')

        #drone.takeoff()
        #sleep(5)
        #drone.land()

        # skip first 300 frames
        frame_skip = 300
        while True:
            #------------------------------------------for start
            for frame in container.decode(video=0):

                if 0 < frame_skip:
                    frame_skip = frame_skip - 1
                    continue

                image = cv2.cvtColor(numpy.array(frame.to_image()),
                                     cv2.COLOR_RGB2BGR)

                # Start timer
                timer = cv2.getTickCount()

                #start_time = time.time()

                #cv2.imshow('Canny', cv2.Canny(image, 100, 200))
                #cv2.waitKey(1)

                #if frame.time_base < 1.0/60:
                #    time_base = 1.0/60
                #else:
                #    time_base = frame.time_base
                #frame_skip = int((time.time() - start_time)/time_base)

                # Update tracker
                ok, bbox = tracker.update(image)

                # Calculate Frames per second (FPS)
                fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

                # Draw bounding box
                if ok:
                    #print('Tracking ok')
                    (x, y, w, h) = (int(bbox[0]), int(bbox[1]), int(bbox[2]),
                                    int(bbox[3]))
                    CX = int(bbox[0] + 0.5 * bbox[2])  #Center of X
                    CY = int(bbox[1] + 0.5 * bbox[3])
                    S0 = bbox[2] * bbox[3]
                    print("CX,CY,S0,x,y=", CX, CY, S0, x, y)
                    # Tracking success
                    p1 = (x, y)
                    p2 = (x + w, y + h)
                    cv2.rectangle(image, p1, p2, (255, 0, 0), 2, 1)

                else:
                    # Tracking failure
                    #print('Tracking failure')
                    cv2.putText(image, "Tracking failure detected", (100, 80),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

                cv2.imshow('Original', image)

                key = cv2.waitKey(1) & 0xff
                if key == ord('q'):
                    print('Q!')
                    break

                if key == ord('r'):
                    bbox = cv2.selectROI(image, False)
                    print(bbox)
                    (x, y, w, h) = (int(bbox[0]), int(bbox[1]), int(bbox[2]),
                                    int(bbox[3]))
                    # Initialize tracker with first frame and bounding box
                    ok = tracker.init(image, bbox)


#-------------------------------------------------for end
            break
        print('stop fly')

    except Exception as ex:
        exc_type, exc_value, exc_traceback = sys.exc_info()
        traceback.print_exception(exc_type, exc_value, exc_traceback)
        print(ex)
    finally:
        drone.quit()
        drone.land()
        cv2.destroyAllWindows()
コード例 #23
0
from collections import deque
import numpy as np

tracker = cv2.TrackerKCF_create()  # inicializa tracker
boundingBox = None  # inicializa ROI vazia
vs = cv2.VideoCapture(
    "../src/services/uploads/biceps.mp4")  # incializa a leitura do vídeo
pts = deque(maxlen=200)  # inicializa lista de pontos a serem renderizados

grayTreatment = True

while True:
    key = cv2.waitKey(1) & 0xFF  # realiza a leitura de uma possível tecla
    if key == ord("s"):  # se a tecla for a letra s
        boundingBox = cv2.selectROI(
            "Frame", frame, fromCenter=False,
            showCrosshair=True)  # inicia a seleção da área de interesse
        tracker.init(frame, boundingBox)  # inicializa tracker

    elif key == ord("q"):  # caso aperte o botão q, finaliza o while
        break

    # realiza a leitura do frame
    frame = vs.read()
    frame = frame[1]
    if frame is None:
        break

    (H, W) = frame.shape[:2]

    if boundingBox is not None:  # caso já tenha sido selecionada uma área a acompanhar
コード例 #24
0
                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 3)

    # else
    ## we are not tracking any object currenty

    # display the frame
    cv2.imshow("Video Frame", frame)
    # wait for key to be pressed by user
    key = cv2.waitKey(1) & 0xFF

    # if 's' is pressed, pause the video
    ## select a bounding box around an object to track in the video
    if key == ord("s"):
        # Select (or draw) a bounding box around the object to track in the video
        init_BB = cv2.selectROI("Frame",
                                frame,
                                fromCenter=False,
                                showCrosshair=True)

        # execute the tracker on the selected object and start the fps counter.
        tracker.init(frame, init_BB)
        fps = FPS().start()

    # if 'q' is pressed, stop the video and terminate
    elif key == ord('q'):
        break

# stop capturing video
vs.release()
# destory all the windows and terminate
cv2.destroyAllWindows()
コード例 #25
0
    # all data collected in the dictionary and returns to the main code
    cbf_data = {"centers": centers, "perimeters": perimeters, "areas": areas, "radius": rads}
    return cbf_data


img_org = cv.imread("G:\Google\Madarek\Resume\CDRs\Pistachio Sorter By Ali\Basler images/3.jpg")
print("input shape array is: ", img_org.shape)

#Fix the format of the input picture
img = cv.cvtColor(img_org, cv.COLOR_BGR2GRAY)
fromCenter = False
Boundaries = []

# Select ROI that the our object is present, choose only one object for this trial or system only process one object
ROI_bounding = cv.selectROI("Image", img, fromCenter)

# To Finish the selection process.
ROI = img[int(ROI_bounding[1]):int(ROI_bounding[1] + ROI_bounding[3]),
      int(ROI_bounding[0]):int(ROI_bounding[0] + ROI_bounding[2])]
cv.imshow("ROI" + str(ROI_bounding), ROI)
cv.imwrite("ROI" + str(ROI_bounding) + ".jpg", ROI)

#To give enough time to show the above images
cv.waitKey(10)

# At this point we have the object
# Apply a few filters to remove noises, we suggest to add closing and opening methods as well.
img = cv.medianBlur(ROI, 5)
blur1 = cv.GaussianBlur(img, (5, 5), 10)
blur1 = cv.GaussianBlur(blur1, (5, 5), 10)
コード例 #26
0
ファイル: blend.py プロジェクト: puneetmanghwani/pyml
#!/usr/bin/python3
import cv2

im = cv2.imread("goku.jpg")
fromCenter = False
# Select ROI
r = cv2.selectROI(im, fromCenter)

#select roi
r1 = cv2.selectROI(im, fromCenter)

# Crop image

imCrop = im[int(r[1]):int(r[1] + r[3]), int(r[0]):int(r[0] + r[2])]
imCrop1 = im[int(r1[1]):int(r1[1] + r1[3]), int(r1[0]):int(r1[0] + r1[2])]

width, height = imCrop1.shape[1], imCrop1.shape[0]
image = cv2.resize(imCrop, (width, height))

rows, cols, channels = image.shape
roi = im[int(r1[1]):int(r1[1] + r1[3]), int(r1[0]):int(r1[0] + r1[2])]
# Now create a mask of logo and create its inverse mask also
img2gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
ret, mask = cv2.threshold(img2gray, 10, 255, cv2.THRESH_BINARY)
mask_inv = cv2.bitwise_not(mask)
# Now black-out the area of logo in ROI
img1_bg = cv2.bitwise_and(roi, roi, mask=mask_inv)
# Take only region of logo from logo image.
img2_fg = cv2.bitwise_and(image, image, mask=mask)
# Put logo in ROI and modify the main image
dst = cv2.add(img1_bg, img2_fg)
コード例 #27
0
def detect_moving_target(old_gray, new_gray):


# open the camera for warming up
cap = cv2.VideoCapture('slow.MOV')

# grab the first frame
old_frame = get_frame_from(cap)
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)

# initiate KCF tracker
tracker = cv2.TrackerKCF_create()

# possible movements ROI


# start the tracking
while True:
    # Record FPS
    timer = cv2.getTickCount()

    # read the current frame
    cur_frame = get_frame_from(cap)

    # covert to gray scale
    cur_gray = cv2.cvtColor(cur_frame, cv2.COLOR_BGR2GRAY)

    # Calculate Frames per second (FPS)
    fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
    # Display FPS on frame
    cv2.putText(cur_frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);
    # display cur frame
    cv2.imshow('video', cur_frame)
    # waitkey
    if cv2.waitKey(1) == ord('q'):
        break


#(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')

if __name__ == '__main__':

    # Set up tracker.
    # Instead of MIL, you can also use

    tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
    tracker_type = tracker_types[2]

    if int(minor_ver) < 3:
        tracker = cv2.Tracker_create(tracker_type)
    else:
        if tracker_type == 'BOOSTING':
            tracker = cv2.TrackerBoosting_create()
        if tracker_type == 'MIL':
            tracker = cv2.TrackerMIL_create()
        if tracker_type == 'KCF':
            tracker = cv2.TrackerKCF_create()
        if tracker_type == 'TLD':
            tracker = cv2.TrackerTLD_create()
        if tracker_type == 'MEDIANFLOW':
            tracker = cv2.TrackerMedianFlow_create()
        if tracker_type == 'GOTURN':
            tracker = cv2.TrackerGOTURN_create()

    # Read video
    video = cv2.VideoCapture('slow.MOV')

    # Exit if video not opened.
    if not video.isOpened():
        print
        "Could not open video"
        sys.exit()

    # Read first frame.
    ok, frame = video.read()
    frame = cv2.resize(frame, dsize=(0, 0), fx=0.5, fy=0.5)
    if not ok:
        print
        'Cannot read video file'
        sys.exit()

    # Define an initial bounding box
    bbox = (287, 23, 86, 320)

    # Uncomment the line below to select a different bounding box
    bbox = cv2.selectROI(frame, False)

    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)

    while True:
        # Read a new frame
        ok, frame = video.read()
        frame = cv2.resize(frame, dsize=(0, 0), fx=0.5, fy=0.5)
        if not ok:
            break

        # Start timer
        timer = cv2.getTickCount()

        # Update tracker
        ok, bbox = tracker.update(frame)

        # Calculate Frames per second (FPS)
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)

        # Display tracker type on frame
        cv2.putText(frame, tracker_type + " Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);

        # Display FPS on frame
        cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2);

        # Display result
        cv2.imshow("Tracking", frame)

        # Exit if ESC pressed
        k = cv2.waitKey(1) & 0xff
        if k == 27: break
コード例 #28
0
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Provide template ")
ap.add_argument("-n",
                "--names",
                required=True,
                help="Provide names in a txt file seperated by ', ' ")
args = vars(ap.parse_args())

file = open(args["names"], 'r')
lines = file.readlines()

list_word = []
for l in lines:
    list_word.append(l.split(", "))

im = cv2.imread(args["image"])

(x, y, a, b) = cv2.selectROI(im)

for item in list_word:
    for i in item:
        im2 = cv2.imread(args["image"])
        cv2.putText(im2,
                    str(i), (x, y),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    1.0, (0, 0, 0),
                    lineType=cv2.LINE_AA)
        cv2.imwrite(os.path.join('Generated', '%s.jpg' % str(i)), im2)

cv2.waitKey(0)
cv2.destroyAllWindows()
コード例 #29
0
import cv2

# frame_path = '/Users/marinaalonsopoal/Desktop/Tracking/Datasets/SMOT/juggling/img/000022.jpg'
frame_path = '/Users/marinaalonsopoal/Desktop/Tracking/Datasets/eSMOT/bolt1/img/00000024.jpg'

image = cv2.imread(frame_path)

# Display
cv2.imshow("output", image)

fromCenter = False
r = cv2.selectROI("Image", image, fromCenter)

xleft, ytop, xright, ybottom = r
print('r:', r)

cv2.destroyAllWindows()
コード例 #30
0
        # 역 투영 결과와 초기 추적 위치로 평균 이동 추적 ---③
        ret, (x,y,w,h) = cv2.meanShift(dst, (x,y,w,h), termination)
        # 새로운 위치에 사각형 표시 ---④
        cv2.rectangle(img_draw, (x,y), (x+w, y+h), (0,255,0), 2)
        # 컬러 영상과 역투영 영상을 통합해서 출력
        result = np.hstack((img_draw, cv2.cvtColor(dst, cv2.COLOR_GRAY2BGR)))
    else:  # 추적 대상 객체 히스토그램 등록 안됨
        cv2.putText(img_draw, "Hit the Space to set target to track", (10,30),cv2.FONT_HERSHEY_SIMPLEX, 1, (0,0,255), 1, cv2.LINE_AA)
        result = img_draw

    cv2.imshow(win_name, result)
    key = cv2.waitKey(1) & 0xff
    if  key == 27: # Esc
        break
    elif key == ord(' '): # 스페이스-바, ROI 설정
        x,y,w,h = cv2.selectROI(win_name, frame, False)
        if w and h :    # ROI가 제대로 설정됨
            # 초기 추적 대상 위치로 roi 설정 --- ⑤
            roi = frame[y:y+h, x:x+w]
            # roi를 HSV 컬러로 변경 ---⑥
            roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
            mask = None
            # roi에 대한 히스토그램 계산 ---⑦
            roi_hist = cv2.calcHist([roi], [0], mask, [180], [0,180])
            cv2.normalize(roi_hist, roi_hist, 0, 255, cv2.NORM_MINMAX)
        else:                       # ROI 설정 안됨
            roi_hist = None
else:
    print('no camera!')
cap.release()
cv2.destroyAllWindows()
コード例 #31
0
ファイル: ObjectTracking.py プロジェクト: Arya04/hello-world
			text = f"{k}: {v}"
			# write the information on the frame in blue color at the bottom left corner
			cv2.putText(frame, text, (10, Height - ((i * 20) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 3)

	# else
	## we are not tracking any object currenty

	# display the frame
	cv2.imshow("Video Frame", frame)
	# wait for key to be pressed by user
	key = cv2.waitKey(1) & 0xFF

	# if 's' is pressed, pause the video
	## select a bounding box around an object to track in the video
	if key == ord("s"):
		# Select (or draw) a bounding box around the object to track in the video
		init_BB = cv2.selectROI("Frame", frame, fromCenter = False, showCrosshair = True)

		# execute the tracker on the selected object and start the fps counter.
		tracker.init(frame, init_BB)
		fps = FPS().start()

	# if 'q' is pressed, stop the video and terminate
	elif key == ord('q'):
		break

# stop capturing video
vs.release()
# destory all the windows and terminate
cv2.destroyAllWindows()
コード例 #32
0
ファイル: tracker_test.py プロジェクト: jclundy/foosball
        print("Could not open video")
        sys.exit()
 
    # Read first frame.
    ok, frame = video.read()
    height, width = frame.shape[:2]
    if not ok:
        print('Cannot read video file')
        sys.exit()
     
    # Define an initial bounding box
    #bbox = (287, 23, 86, 320)
 
    # Uncomment the line below to select a different bounding box
    frame = cv2.resize(frame,(int(width/4),int(height/4)))
    bbox = cv2.selectROI(frame, False)
 
    # Initialize tracker with first frame and bounding box
    ok = tracker.init(frame, bbox)
 
    while True:
        # Read a new frame
        ok, frame = video.read()
        if not ok:
            break
        
        frame = cv2.resize(frame,(int(width/4),int(height/4))) 
        # Update tracker
        ok, bbox = tracker.update(frame)
 
        # Draw bounding box
コード例 #33
0
        # loop over the info tuples and display on frame
        for (i, (k, v)) in enumerate(info):
            text = "{}: {}".format(k, v)
            cv2.putText(frame, text, (10, H - ((i * 20) + 20)), 
                        cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
    
    # show the output frame
    cv2.imshow("Frame", frame)
    key = cv2.waitKey(1) & 0xFF

    # if the 's' key is selected, we are going to "select" a bounding
    # box to track
    if key == ord("s"):
        # select the bounding box of the object we want to track
        initBB = cv2.selectROI("Frame", frame, fromCenter = False)

        # start OpenCV Object Tracker using the supplied bounding box 
        # coordinates, then start the FPS throughput estimator as well
        tracker.init(frame, initBB)
        fps = FPS().start()
    
    elif key == ord("q"):
        break

# release webcam pointer
if not args.get("video", False):
    vs.stop()

# else, release the file pointer
else:
コード例 #34
0
ファイル: init.py プロジェクト: Yipsix/haleyVisma
known_face_encodings = []

for face_encoding in known_face_names:
    known_face_encodings.append(face_encoding[1])

print('done loading')

engine = pyttsx3.init()

cap = cv2.VideoCapture(1)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
im = cap.read()[1] #because reasons

r = cv2.selectROI(im)
process_this_frame = True

frameCount = 0

while (True):

    ret, frame = cap.read()
    # zoom in
    crop_img = frame[int(r[1]):int(r[1]+r[3]), int(r[0]):int(r[0]+r[2])]
    # Convert the image from BGR color (which OpenCV uses) to RGB color (which face_recognition uses)
    
    smallImg = cv2.resize(crop_img, (0,0), fx=0.25, fy=0.25) 
    rgb_small_frame = smallImg[:, :, ::-1]
    # Only process every other frame of video to save time
    if frameCount % 4 = 0:
コード例 #35
0
def main(args):
    root_cfg = cfg
    root_cfg.merge_from_file(args.config)
    logger.info("Load experiment configuration at: %s" % args.config)

    # resolve config
    root_cfg = complete_path_wt_root_in_cfg(root_cfg, ROOT_PATH)
    root_cfg = root_cfg.test
    task, task_cfg = specify_task(root_cfg)
    task_cfg.freeze()
    window_name = task_cfg.exp_name
    # build model
    model = model_builder.build(task, task_cfg.model)
    # build pipeline
    pipeline = pipeline_builder.build(task, task_cfg.pipeline, model)
    dev = torch.device(args.device)
    pipeline.set_device(dev)
    init_box = None
    template = None
    if len(args.init_bbox) == 4:
        init_box = args.init_bbox

    video_name = "untitled"
    vw = None
    resize_ratio = args.resize
    dump_only = args.dump_only

    # create video stream
    # from webcam
    if args.video == "webcam":
        logger.info("Starting video stream...")
        vs = cv2.VideoCapture(0)
        vs.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'))
        formated_time_str = time.strftime(r"%Y%m%d-%H%M%S", time.localtime())
        video_name = "webcam-{}".format(formated_time_str)
    # from image files
    elif not osp.isfile(args.video):
        logger.info("Starting from video frame image files...")
        vs = ImageFileVideoStream(args.video, init_counter=args.start_index)
        video_name = osp.basename(osp.dirname(args.video))
    # from video file
    else:
        logger.info("Starting from video file...")
        vs = cv2.VideoCapture(args.video)
        video_name = osp.splitext(osp.basename(args.video))[0]

    # create video writer to output video
    if args.output:
        # save as image files
        if not str(args.output).endswith(r".mp4"):
            vw = ImageFileVideoWriter(osp.join(args.output, video_name))
        # save as a single video file
        else:
            vw = VideoWriter(args.output, fps=20)

    # loop over sequence
    frame_idx = 0  # global frame index
    while vs.isOpened():
        key = 255
        ret, frame = vs.read()
        if ret:
            logger.debug("frame: {}".format(frame_idx))
            if template is not None:
                time_a = time.time()
                rect_pred = pipeline.update(frame)
                logger.debug(rect_pred)
                show_frame = frame.copy()
                time_cost = time.time() - time_a
                bbox_pred = xywh2xyxy(rect_pred)
                bbox_pred = tuple(map(int, bbox_pred))
                cv2.putText(show_frame,
                            "track cost: {:.4f} s".format(time_cost),
                            (128, 20), cv2.FONT_HERSHEY_COMPLEX, font_size,
                            (0, 0, 255), font_width)
                cv2.rectangle(show_frame, bbox_pred[:2], bbox_pred[2:],
                              (0, 255, 0))
                if template is not None:
                    show_frame[:128, :128] = template
            else:
                show_frame = frame
            show_frame = cv2.resize(
                show_frame,
                (int(show_frame.shape[1] * resize_ratio),
                 int(show_frame.shape[0] * resize_ratio)))  # resize
            if not dump_only:
                cv2.imshow(window_name, show_frame)
            if vw is not None:
                vw.write(show_frame)
        else:
            break
        # catch key if
        if (init_box is None) or (vw is None):
            logger.debug("Press key s to select object.")
            if (frame_idx == 0):
                wait_time = 5000
            else:
                wait_time = 30
            key = cv2.waitKey(wait_time) & 0xFF
        logger.debug("key: {}".format(key))
        if key == ord("q"):
            break
        # if the 's' key is selected, we are going to "select" a bounding
        # box to track
        elif key == ord("s"):
            # select the bounding box of the object we want to track (make
            # sure you press ENTER or SPACE after selecting the ROI)
            logger.debug("Select object to track")
            box = cv2.selectROI(window_name,
                                frame,
                                fromCenter=False,
                                showCrosshair=True)
            if box[2] > 0 and box[3] > 0:
                init_box = box
        elif key == ord("c"):
            logger.debug(
                "init_box/template released, press key s again to select object."
            )
            init_box = None
            template = None
        if (init_box is not None) and (template is None):
            template = cv2.resize(
                frame[int(init_box[1]):int(init_box[1] + init_box[3]),
                      int(init_box[0]):int(init_box[0] + init_box[2])],
                (128, 128))
            pipeline.init(frame, init_box)
            logger.debug(
                "pipeline initialized with bbox : {}".format(init_box))
        frame_idx += 1

    vs.release()
    if vw is not None:
        vw.release()
    cv2.destroyAllWindows()
コード例 #36
0
def main(reference_digits):
    vs = get_video_stream()
    time_parsable = False

    # Loop over frames from the video stream
    while True:
        # Grab the current frame, then handle if we are using a
        # VideoStream or VideoCapture object
        frame = vs.read()
        frame = frame[1] if args.get("video", False) else frame

        # Check to see if we have reached the end of the stream
        if frame is None:
            break

        # Grab the updated bounding box coordinates (if any) for each
        # object that is being tracked
        (success, boxes) = trackers.update(frame)

        # Loop over the bounding boxes and draw them on the frame
        for box in boxes:
            (x, y, w, h) = [int(v) for v in box]
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)

        if reference_digits:
            while time_parsable is False:
                ts_box = get_timestamp_box()
                frame_time = get_frame_time(frame, reference_digits, ts_box)
                if frame_time is not None:
                    time_parsable = True

            frame_time = get_frame_time(frame, reference_digits, ts_box)

            cv2.rectangle(
                frame, (int(ts_box[0]), int(ts_box[1])),
                (int(ts_box[0] + ts_box[2]), int(ts_box[1] + ts_box[3])),
                (0, 0, 255), 2)
            cv2.putText(frame, frame_time.strftime("%Y-%m-%d %H:%M:%S"),
                        (int(ts_box[0]) - 30, int(ts_box[1]) + 55),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)

        # Show the output frame
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(0) & 0xFF

        # If the 's' key is selected, we are going to "select" a bounding
        # box to track
        if key == ord("s"):
            # Select the bounding box of the object we want to track (make
            # sure to press ENTER or SPACE after selecting the ROI)
            box = cv2.selectROI("Object Tracker Selection",
                                frame,
                                fromCenter=False,
                                showCrosshair=True)

            if box == (0, 0, 0, 0):
                # Selection canceled
                continue

            # Create a new object tracker for the bounding box and add it
            # to our multi-object tracker
            tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
            trackers.add(tracker, frame, box)

        elif key == ord("q"):
            break

    # If we are using a webcam, release the pointer
    if not args.get("video", False):
        vs.stop()

    # Otherwise, release the file pointer
    else:
        vs.release()

    # Close all windows
    cv2.destroyAllWindows()
コード例 #37
0
ファイル: select_roi.py プロジェクト: 00lab/opencv-tutorial
import sys


# 文件路径
# img_path = 'blue-color-block.png'
img_path = sys.argv[1]

# 读入图片
img = cv2.imread(img_path)
# 创建一个窗口
cv2.namedWindow("image", flags= cv2.WINDOW_NORMAL | cv2.WINDOW_FREERATIO)
cv2.imshow("image", img)
# 是否显示网格 
showCrosshair = True

# 如果为Ture的话 , 则鼠标的其实位置就作为了roi的中心
# False: 从左上角到右下角选中区域
fromCenter = False
# Select ROI
rect = cv2.selectROI("image", img, showCrosshair, fromCenter)

print("选中矩形区域")
(x, y, w, h) = rect

# Crop image
imCrop = img[y : y+h, x:x+w]

# Display cropped image
cv2.imshow("image_roi", imCrop)
cv2.imwrite("image_roi.png", imCrop)
cv2.waitKey(0)
コード例 #38
0
    cv2.namedWindow("image", flags=cv2.WINDOW_NORMAL | cv2.WINDOW_FREERATIO)
    cv2.imshow("image", img)
    # 是否显示网格
    showCrosshair = False
    # 如果为Ture的话 , 则鼠标的其实位置就作为了roi的中心
    # False: 从左上角到右下角选中区域
    fromCenter = False
    # Select ROI
    ch = cv2.waitKey(0)
    #如果按键为n,则跳过该张图片
    if ch == 110:

        continue
    else:
        #用空格确认选择的矩形
        rect = cv2.selectROI("image", img, showCrosshair, fromCenter)
        if rect == (0, 0, 0, 0):
            continue
        else:
            print("选中 %s 矩形区域" % (img_path))
            x, y, w, h = rect
            if h < w:
                # Crop image
                if y + h / 2 - w / 2 < 0:
                    imCrop = img[0:0 + w, x:x + w, :]
                    rect = [x, 0, x + w, w]
                elif y + h / 2 + w / 2 > img.shape[0]:
                    imCrop = img[img.shape[0] - w:img.shape[0], x:x + w, :]
                    rect = [x, img.shape[0] - w, x + w, img.shape[0]]
                else:
                    imCrop = img[int(y + h / 2 - w / 2):int(y + h / 2 + w / 2),
コード例 #39
0
    print('Input video name is missing')
    exit()

print('Select 3 tracking targets')

cv.namedWindow("tracking")
camera = cv.VideoCapture(sys.argv[1])
tracker = cv.MultiTracker_create()
init_once = False

ok, image=camera.read()
if not ok:
    print('Failed to read video')
    exit()

bbox1 = cv.selectROI('tracking', image)
bbox2 = cv.selectROI('tracking', image)
bbox3 = cv.selectROI('tracking', image)

while camera.isOpened():
    ok, image=camera.read()
    if not ok:
        print 'no image to read'
        break

    if not init_once:
        ok = tracker.add(cv.TrackerMIL_create(), image, bbox1)
        ok = tracker.add(cv.TrackerMIL_create(), image, bbox2)
        ok = tracker.add(cv.TrackerMIL_create(), image, bbox3)
        init_once = True
コード例 #40
0
#tracker = cv2.TrackerMIL_create()
#tracker = cv2.TrackerKCF_create()
tracker = cv2.TrackerTLD_create()
#tracker = cv2.TrackerMedianFlow_create()

# Get tracker name
tracker_name = str(tracker).split()[0][1:]

# init i counter
i = 0

# Start from specific frame
while(i<725):
    ret, frame = cap.read()
    i=i+1
roi = cv2.selectROI(frame, False)
ret = tracker.init(frame, roi)


while(cap.isOpened()):
    ret, frame = cap.read()
    success, roi = tracker.update(frame)
    (x,y,w,h) = tuple(map(int, roi))
    if success:
        p1 = (x,y)
        p2 = (x+w, y+h)
        cv2.rectangle(frame, p1, p2, (0,255,0), 3)
    else:
        cv2.putText(frame, "Failure to Detect Tracking!!", (100,200), cv2.FONT_HERSHEY_SIMPLEX, 1,(0,0,255),3)
    # Display tracker type on frame
    cv2.putText(frame, tracker_name, (20,400), cv2.FONT_HERSHEY_SIMPLEX, 1, (0,255,0),3)
コード例 #41
0
            coordinates = coordinates + [y + h/2]
        print(coordinates)
        dataframe = pd.DataFrame([coordinates], columns =['R1x', 'R1y','R2x','R2y','R3x','R3y','L1x', 'L1y','L2x','L2y','L3x','L3y' ]) 
        print(dataframe)
        dataframe.to_csv(r'testing.csv',mode='a', header=False)

            
    # show the output frame
    cv2.imshow("Frame", displayed_image)
    key = cv2.waitKey(1) & 0xFF
    # if the 's' key is selected, we are going to "select" a bounding
    # box to track
    if key == ord("s"):
        # select the bounding box of the object we want to track (make
        # sure you press ENTER or SPACE after selecting the ROI)
        box = cv2.selectROI("Frame", displayed_image, fromCenter=False,
                               showCrosshair=True)

        # start OpenCV object tracker using the supplied bounding box
        # coordinates, then start the FPS throughput estimator as well
        initBB.append(box)
        tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
        trackers.add(tracker, frame, box)
        fps = FPS().start()

    # if the `q` key was pressed, break from the loop
    elif key == ord("q"):
        break
# if we are using a webcam, release the pointer
if not args.get("video", False):
    vs.stop()
コード例 #42
0
if not success:
  print('Failed to read video')
  sys.exit(1)

  ## Select boxes
bboxes = []
colors = [] 

  # OpenCV's selectROI function doesn't work for selecting multiple objects in Python
  # So we will call this function in a loop till we are done selecting all objects
while True:

    # draw bounding boxes over objects
    # selectROI's default behaviour is to draw box starting from the center
    # when fromCenter is set to false, you can draw box starting from top left corner
  bbox = cv2.selectROI('MultiTracker', frame)
  bboxes.append(bbox)
  colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))
  print("Press q to quit selecting boxes and start tracking")
  print("Press any other key to select next object")
  k = cv2.waitKey(0) & 0xFF
  if (k == 113):  # q is pressed
    break
  
print('Selected bounding boxes {}'.format(bboxes))

  ## Initialize MultiTracker
  # There are two ways you can initialize multitracker
  # 1. tracker = cv2.MultiTracker("CSRT")
  # All the trackers added to this multitracker
  # will use CSRT algorithm as default