예제 #1
0
def main():
    img = cv2.imread(os.path.abspath(os.path.dirname(__file__)) + '/bolt.png')
    split = 8
    shape = get_size(img)
    tracker = Tracker(split, shape)

    init = False


    top_left = np.array((0, 0))
    bottom_right = shape

    while 1:
        draw = img.copy()
        # img = np.zeros(img.shape)
        if init:
            draw = tracker.track_hog(img)
            break
        else:
            pass
            # draw = cv2.rectangle(draw, tuple(top_left),
            #                      tuple(bottom_right), (0, 255, 0),
            #                      thickness=2)

        cv2.imshow('Video', draw)
        retval = cv2.waitKey(10)

        if retval == 27:
            break
        elif retval == 32:
            if not init:
                tracker.set_reference(img, top_left, bottom_right)
                init = True
        elif retval == 112:
            cv2.waitKey(0)
예제 #2
0
    def __init__(self, status=ax.State.START_SESSION):
        self.tracker = Tracker()
        self.tracker.appendContainer(ax.State.STARTING_STATE)
        self.startTime = time.localtime(time.time())[:]
        self.endTime = -1
        self.sessionName = 'PlaceHolder'
        #load settings from setting.txt
        f = open('userSettings.txt', 'r')
        fileData = f.read().split('\n')
        f.close()
        lastSessionName = fileData[0].split(' = ')[-1]
        self.numberOfSessions = int(fileData[1].split(' = ')[-1])

        if lastSessionName == 'None':  #no last sessions
            self.status = ax.State.START_SESSION
            #name is given by: ###_dd_mm_yyyy
            self.sessionName = str(self.numberOfSessions + 1) + '_' + str(
                self.startTime[2]) + '_' + str(self.startTime[1]) + '_' + str(
                    self.startTime[0])
        else:  #load the pickle to see if the previous session ended
            lastSession = loadPickle(lastSessionName)  #loadPickle(lastSession)

            if lastSession.status == ax.State.END_SESSION:  #start a new session
                self.status = ax.State.START_SESSION
                self.sessionName = str(self.numberOfSessions + 1) + '_' + str(
                    self.startTime[2]) + '_' + str(
                        self.startTime[1]) + '_' + str(self.startTime[0])
            else:  #otherwise this session is the previous session
                for i in lastSession.__dict__.keys():
                    self.__dict__[i] = lastSession.__dict__[i]
예제 #3
0
def trackvid(filename, min_osize, thresh_val):
    # contours tracking etc.
    video = Tracker(filename, min_object_size= min_osize)
    try:
        if video.file_exists == False:

            video.threshold_val = thresh_val
            video.load_video()
            video.compute_background()          # form background image
            video.remove_background()           # remove background
            video.threshold()                   # threshold to segment features
            video.find_distance()               # takes, dist tranform, finds peaks, associates to ants, finds head/gaster
            video.morpho_closing()
            video.find_objects()
            video.draw_contours()
            video.save_JSON()
            video.associate_contours(max_covariance=10,
                             max_velocity=100,
                             n_covariances_to_reject=20, 
                             max_tracked_objects=100,
                             kalman_state_cov=1,
                             kalman_init_cov=0.2,
                             kalman_measurement_cov=1)
            video.save_association_JSON()

            print('---Contours and tracks saved')
        else:
            print('---Tracked files already exist')
    except:
        print('---error! ') #, filename)
예제 #4
0
def main():
    """ Executes the program.
    """
    tracker = Tracker()
    plt.ion()
    last_total_data_used = 0

    while True:
        # Retrieve the up and down speeds
        time.sleep(0.5)
        down_speed = 8 * (tracker.get_current_download_speed() / (2**20))
        up_speed = 8 * (tracker.get_current_upload_speed() / (2**20))

        # Store it
        add_data(down_speed, up_speed)

        # Data used
        total_data_used = round(tracker.get_total_data_used() / (2**20), 3)
        write_data_used(last_total_data_used, total_data_used)
        last_total_data_used = total_data_used

        # Update & display the plot
        recv_curve, = plt.plot(times, speeds_recv)
        sent_curve, = plt.plot(times, speeds_sent)

        plt.legend([recv_curve, sent_curve], ['Download', 'Upload'])
        plt.ylabel('Mb/s', fontsize=8)
        ax = plt.gca()
        ax.tick_params(axis='x', labelsize=6)
        ax.tick_params(axis='y', labelsize=6)
        ax.get_figure().canvas.set_window_title('Internet speed - N3RO')

        plt.draw()
        plt.pause(0.0001)
        plt.clf()
예제 #5
0
파일: main.py 프로젝트: Dhawgupta/ALRS
def main():
    ap = argparse.ArgumentParser()
    ap.add_argument("-v", "--videopath", help="Path to video")
    ap.add_argument("-d", "--detector", help="Detection Algo")

    args = vars(ap.parse_args())
    if (args["videopath"] == "0"):
        source = 0
    elif (args["videopath"] == "1"):
        source = 1
    else:
        source = args["videopath"]

    if (args["detector"] == "HG" or args["detector"] == "HC"):
        detector_algo = args["detector"]
    else:
        print " Detector algo not correct"
        quit()

############ Detection Part starts here ##############
    dtector = Detector(src=source, detector=detector_algo).start()
    while True:
        frame = dtector.read()
        frame = imutils.resize(frame, width=400)
        cv2.imshow("Detection", frame)
        key = cv2.waitKey(20) & 0xFF
        if key == 27:
            break
    dtector.stop()
    rect, img = dtector.get_roi()

    cv2.destroyAllWindows()
    # print rect

    ############ Detection Part ends here ##############

    ############ Tracking Part starts here ##############

    global stop_arduino_thread
    q = Queue()
    tracker = Tracker(rect, img, src=source).start()
    print tracker
    data = tracker.get_points()
    q.put(data)
    thread_arduino = Thread(target=send_arduino, args=(q, ))
    thread_arduino.start()
    while True:
        frame = tracker.read()
        frame = imutils.resize(frame, width=400)
        cv2.imshow("Frame", frame)
        data = tracker.get_points()
        q.put(data)
        key = cv2.waitKey(50) & 0xFF
        if key == 27:
            break
    stop_arduino_thread = True
    tracker.stop()
    cv2.destroyAllWindows()
예제 #6
0
    def __init__(self, algorithm):
        self.threshold_filter = ThresholdFilter(
            np.array([24, 125, 100], dtype=np.uint8),
            np.array([36, 255, 255], dtype=np.uint8))
        self.algo = algorithm()

        # Camera Setups
        self.left_tracker = Tracker(1)
        self.right_tracker = Tracker(2)
        self.horizontal_fov = 120.0
        self.vertical_fov = 60.0
        self.d = 100

        self.centroid_algo = Centroid()
        self.left_transformed_image = np.copy(self.left_tracker.image)
        self.right_transformed_image = np.copy(self.right_tracker.image)

        self.valid = True
    def __init__(self, *args):
        self.log = Logs().getLog()
        super().__init__()
        self.point_scale = 20
        self.distance_scale = 40
        # self.label_y_offset = 10

        self.DB = Database()
        self.tracker = Tracker(self.DB)

        self.track_unknown = True
예제 #8
0
 def moveAtoB(self, net, start, finish):
     self.current_vertex = start.name
     self.start_vertex = start.name
     self.finish_vertex = finish.name
     self.tracker = Tracker(self)
     self.report_velocity()
     self.drawer.carList.append(self.tracker)
     self.movement(net)
     self.drawer.carList.remove(self.tracker)
     self.removeCar()
     start.setUnallocated()
     finish.setUnallocated()
예제 #9
0
 def __init__(self, addr, t_reactor):
     self.reactor = t_reactor
     self.file_addr = addr
     self.metainfo = Metainfo(self.file_addr)
     self.tracker = Tracker(self, self.metainfo)
     self.handshake_message = Handshake(self.metainfo, self.tracker)
     self.file_handler = FileReadWrite(self.metainfo)
     self.requester = Requester(self, self.metainfo)
     self.bitfield = bitarray(0 * self.metainfo.no_of_pieces)
     self.peer_list = list()
     self.buildPeerList()
     self.protocol_factory = CoreTCP.PeerConnectionFactory(self)
     print self.peer_list
예제 #10
0
파일: win3d.py 프로젝트: drewp/headtrack
    def __init__(self, argv):
        FvwmModule.__init__(self, argv)
        self.set_mask()
        self.send("Set_Mask 4294967295")

        self.send("Move 2963p 178p", window=0x3e00004)

        # pager sends this
        ##   SetMessageMask(fd,
        ##                  M_VISIBLE_NAME |
        ##                  M_ADD_WINDOW|
        ##                  M_CONFIGURE_WINDOW|
        ##                  M_DESTROY_WINDOW|
        ##                  M_FOCUS_CHANGE|
        ##                  M_NEW_PAGE|
        ##                  M_NEW_DESK|
        ##                  M_RAISE_WINDOW|
        ##                  M_LOWER_WINDOW|
        ##                  M_ICONIFY|
        ##                  M_ICON_LOCATION|
        ##                  M_DEICONIFY|
        ##                  M_RES_NAME|
        ##                  M_RES_CLASS|
        ##                  M_CONFIG_INFO|
        ##                  M_END_CONFIG_INFO|
        ##                  M_MINI_ICON|
        ##                  M_END_WINDOWLIST|
        ##                  M_RESTACK);
        ##   SetMessageMask(fd,
        ##                  MX_VISIBLE_ICON_NAME|
        ##                  MX_PROPERTY_CHANGE);

        self.register("M_CONFIGURE_WINDOW", self.ConfigureWindow)

        log("windowlist")
        #self.tracker = self.get_windowlist()
        self.tracker = Tracker()
        log("windowlist done")

        #        for win in self.tracker.get_windows():
        #            log((win, win.name, win.x, win.y, win.width, win.height, win.desk))

        self.send("Send_WindowList")

        # pager sends this
        self.send("NOP FINISHED STARTUP")
        self.lastSend = None
예제 #11
0
def main():
    vid = 'OJ'
    cap = cv2.VideoCapture(
        os.path.abspath(os.path.dirname(__file__)) + '/%s.gif' % vid)
    if not cap.isOpened():
        return -1

    split = 50
    _, frame = cap.read()
    size = get_size(frame)
    tracker = Tracker(split, size)
    init = False

    if vid == 'OJ':
        top_left = np.array(([120, 90]))
        bottom_right = np.array(([175, 124]))
    elif vid == 'bmw':
        top_left = np.array(([66, 140]))
        bottom_right = np.array(([125, 190]))

    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break

        draw = frame.copy()
        if not init:
            draw = cv2.rectangle(draw,
                                 tuple(top_left),
                                 tuple(bottom_right), (0, 255, 0),
                                 thickness=2)
            tracker.set_reference(frame, top_left, bottom_right)
        else:
            draw = tracker.track(frame)

        cv2.imshow('fame', draw)
        duration = 100 if init else 0
        key = cv2.waitKey(duration)

        if key == 27:
            break
        elif key == 32:
            init = True

    cap.release()
    cv2.destroyAllWindows()
예제 #12
0
def do_all_preparations(sequence_name, result_path, debug_path, base_vot_path,
                        params):
    sequence_path, result_path, debug_path = prepare_all_paths(
        sequence_name, result_path, debug_path, base_vot_path)
    polygon_matrix = prepare_poly_matrix(sequence_path)
    poly_array = polygon_matrix[0, :]
    image_paths = get_all_image_paths(sequence_path + 'color' + '/')
    handle_first_image(image_paths, result_path, poly_array)
    test_tracker = Tracker(uf.read_image(image_paths[0]), poly_array, None,
                           **params)
    run_params_data = dict({
        'test_tracker': test_tracker,
        'polygon_matrix': polygon_matrix,
        'image_paths': image_paths,
        'result_path': result_path
    })
    Tracker_params['debug_images_path_temp'] = debug_path
    return run_params_data
예제 #13
0
    def addTracker(self):

        name = input('Enter tracker name: ')
        url = input('Enter tracker url: ')
        email = input('Enter email to notify: ')
        desirePrice = float(input('Enter desire price: '))

        for web in self.__webs.keys():

            if re.search(web, url):

                newTracker = Tracker(name, url, self.__webs[web], email,
                                     desirePrice)

                self.__trackers.append(newTracker)

                if self.__thread:

                    self.__thread.updateTrackers(self.__trackers)
예제 #14
0
def do_all_preparations(sequence_name, result_path, debug_path, base_vos_path,
                        params):
    images_path, masks_path, result_path, debug_path = prepare_all_paths(
        sequence_name, result_path, debug_path, base_vos_path)
    image_paths = get_all_image_paths(images_path)
    mask_paths = get_all_image_paths(masks_path)
    initial_mask = uf.read_image(mask_paths[0]) / 255
    initial_mask = np.expand_dims(initial_mask, axis=-1)
    if Tracker_params['save_images']:
        uf.save_mask_image(initial_mask, result_path,
                           os.path.basename(image_paths[0]))
    test_tracker = Tracker(uf.read_image(image_paths[0]), None, initial_mask,
                           **params)
    run_params_data = dict({
        'test_tracker': test_tracker,
        'image_paths': image_paths,
        'result_path': result_path
    })
    Tracker_params['debug_images_path_temp'] = debug_path
    return run_params_data
def trackHeadTask():
    global tracker
    if tracker is None:
        tracker = Tracker()
    file = request.files['file']
    # imageBytes = base64.b64encode(file.read())
    try:
        result_exc = celery_app.task.headsDet.s(file.read()).delay()
        heads = result_exc.get(timeout=15)
        if heads is None:
            return (jsonify([]), 200)
        heads1 = np.ones((len(heads), 7))
        for i in range(len(heads)):
            head = heads[i]
            for j in range(len(head)):
                heads1[i, j] = head[j]
        trackMsg = tracker.traceHead(heads1)
    except Exception as e:
        trackMsg=[]
    return (jsonify(trackMsg), 200)
예제 #16
0
    matplotlib.pyplot.scatter(destination_points[:, 0],
                              destination_points[:, 1],
                              marker="x",
                              color="red",
                              s=200)
    write_location = 'test_images_output/destination_points_' + str(
        file_name)[len('test_images/'):]
    matplotlib.pyplot.savefig(write_location)
    matplotlib.pyplot.close('all')

    window_width = 30
    window_height = 40

    # set up the overall class to do all the tracking
    curve_centers = Tracker(Mywindow_width=window_width,
                            Mywindow_height=window_height,
                            Mymargin=55,
                            Mysmooth_factor=1)
    window_centroids = curve_centers.find_window_centroids(binary_bird_view)

    # points used to draw all the left and the right windows
    l_points = np.zeros_like(binary_bird_view)
    r_points = np.zeros_like(binary_bird_view)

    # Points used to find the left and right lanes
    rightx = []
    leftx = []

    # go through each level and draw the windows
    for level in range(0, len(window_centroids)):
        # window_mask is a function to draw window areas
        leftx.append(window_centroids[level][0])
예제 #17
0
def run_machine_vision():
    """
    Manage both the detector (neural net) and the tracker in a
    seperate process than the animation.

    """

    #use imutils.FileVideoStream to read video from a file for testing
    #vs = FileVideoStream('no_vis_light.mp4').start()

    #use imutils.VideoStream to read video from a webcam for testing
    vs = VideoStream(src=0).start()

    #Threaded application of PTGrey Camera-- Use PTCamera_Threaded
    #vs = PTCamera(resolution = video_dims).start()

    #Non-threaded application of PTGrey Camera
    #vs = PTCamera(resolution = video_dims)

    #Let the camera warm up and set configuration
    time.sleep(2)
    print("[INFO] loading model...")
    #create an insance of the detector
    net = Deep_Detector('deploy.prototxt.txt',
                        'res10_300x300_ssd_iter_140000.caffemodel',
                        refresh_rate=2,
                        confidence=.4)

    #initialize a tracker
    print("[INFO] initializing tracker")
    tracker = Tracker(quality_threshold=6)

    last_detector_update_time = time.time()
    current_time = time.time()
    tracking_face = False
    tracked_center = (0, 0)
    running = True
    start_machine_vision_time = time.time()
    #count = 0
    #detector_count = 0

    #check to make sure that the identified face is of a reasonable size; For the PTGrey Camera, I found ~50 works well.
    #other cameras will require other thresholds
    face_width_threshold = 200
    no_frame_count = 0

    while running:
        current_time = time.time()
        #Reading from the camera is I/O gating.
        frame = vs.read()
        if frame.all() != None:
            no_frame_count = 0
            frame = imutils.resize(frame, width=300)

            if not tracking_face or current_time - last_detector_update_time > net.get_refresh_rate(
            ):
                last_detector_update_time = current_time
                tracking_face = run_detector(net, frame, tracker,
                                             face_width_threshold)
                #count += 1
                #detector_count += 1

            if tracking_face:
                #count += 1
                track_quality = tracker.get_track_quality(frame)
                if track_quality >= tracker.get_quality_threshold():
                    run_tracker(tracker, frame)
                else:
                    tracking_face = False

            #Wait sixteen milliseconds before looping again. OpenCV will freeze if this number
            #is too low or the waitKey call is omitted. If waitKey is called with no params,
            #the program will wait indefinitely for the user to hit a key before it
            #runs another loop; nice for debugging.
            #Quit the program if the user hits the "q" key on the keyboard
            if cv2.waitKey(16) == ord('q'):
                break

        else:
            no_frame_count += 1
            if no_frame_count == 50:
                print(
                    'Received too many null frames; exiting machine_vision_subprocess'
                )
                break

    end_machine_vision_time = time.time()
    #fps = count / (end_machine_vision_time - start_machine_vision_time)
    #print('Machine Vision fps: ' + str(fps))
    vs.stop()
    cv2.destroyAllWindows()
from Tracker import Tracker
import os

if __name__ == "__main__":
    # change current directory to the file's directory
    if os.path.dirname(__file__) != "":
        try:
            os.chdir(os.path.dirname(__file__))
        except OSError:
            pass
    Tracker().start()
from __future__ import division, print_function

import numpy as np

from Tracker import Tracker

i = 1


def main(points):
    global i
    A = np.array([[p.x for p in points], [p.y for p in points], [p.t for p in points]]).T
    np.save(str(i) + ".npy", A)
    i += 1


if __name__ == '__main__':
    Tracker(main, 20)
예제 #20
0
    def _Deserialize(self, bytes):
        """Deserialize given bytes to an object"""
        return json.loads(bytes.decode())


# Configure logger
logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.info("PPSPP Tracker server starting")

# Asyncio event loop
loop = asyncio.get_event_loop()
loop.set_debug(True)

# Create a tracker instance
tracker = Tracker()

coro = loop.create_server(lambda: TrackerServerProtocol(tracker), '0.0.0.0',
                          6777)
server = loop.run_until_complete(coro)

# Schedule wakeups to catch Ctrl+C in Win32
# This should be fixed in Python 3.5
# Ref: http://stackoverflow.com/questions/24774980/why-cant-i-catch-sigint-when-asyncio-event-loop-is-running
if os.name == 'nt':

    def wakeup():
        # Call again later
        loop.call_later(0.5, wakeup)

    loop.call_later(0.5, wakeup)
예제 #21
0
from Tracker import Tracker
x = Tracker()
x.mainloop()
예제 #22
0
from Tracker import Tracker
import sys

if len(sys.argv) == 2:
    file_path = str(sys.argv[1])
    tracker = Tracker(file_path)
    tracker.track()
elif len(sys.argv) == 1:
    tracker = Tracker("Config.json")
    tracker.track()
else:
    print(
        "Error: Wrong number of arguments. Expects either none or file path to config json file."
    )
예제 #23
0
 def __init__(self, classifier):
     self.classifier = classifier
     self.tracker = Tracker()
 def setUp(self) -> None:
     self.cap = None
     self.tracker = Tracker(self.cap)
def processingInit(processingPipe, endQueue):
    # Receive information
    frame = processingPipe.recv()
    if not processingPipe.recv() == "Frame Sended":
        print("Error in Pipe multiprocessing")

    [zone, zoneState, zoneCondition, span, coordinate] = processingPipe.recv()
    if not processingPipe.recv() == "Zone Sended":
        print("Error in Pipe multiprocessing")


    # Tracker objects
    centroidTracker = Tracker(maxDisappeared = 20, maxDistance = 40)
    trackableObjects = {}
    totalUp = totalDown = totalLeft = totalRight = 0
    lock = False
    

    # Visualize Process
    processVisualizePipe, visualizeProcessPipe = Pipe()
    visualizeInitProcess = Process(target = visualizeInit, args = [visualizeProcessPipe, endQueue])
    visualizeInitProcess.start()



    # MQTT Process
    processMQTTPipe, mqttProcessPipe = Pipe()
    mqttInitProcess = Process(target = mqttInit, args = [mqttProcessPipe, endQueue])
    mqttInitProcess.start()
    
    while True:
        processingFrame = processingPipe.recv()
        if not endQueue.empty():
            processMQTTPipe.send(None)
            break

        ret = processingFrame.isSomeone
        newRectList = processingFrame.box
        updateRect = []
        updateColor = []
        frame = processingFrame.image
        start = time.time()
        objects = centroidTracker.update(newRectList)
        if not ret:
            processingFrame.total = [totalUp, totalDown, totalLeft, totalRight]
            processVisualizePipe.send(processingFrame)
            continue
        
        for (objectID, centroid) in objects.items():
            to = trackableObjects.get(objectID, None)

            if to is None:
                to = TrackableObject(objectID, centroid, len(zoneState))
                formatCentroid = np.append(centroid[0:4], [1])
                newState = np.logical_and.reduce(np.dot(zoneCondition, formatCentroid) > 0, axis = -1)
                stateFlag = np.logical_xor.reduce(newState, axis = 1)
                if np.logical_and.reduce(stateFlag):
                    pass
                else:
                    stateFlag = np.logical_not(stateFlag)
                    errorIndex = np.where(stateFlag)[0]
                    for i in errorIndex:
                        if zoneState[i]:
                            zoneIndex = 0
                        else:
                            zoneIndex = 2
                        changeCondition = zone[i, zoneIndex, 0] * centroid[4] + zone[i, zoneIndex, 1] * centroid[5] + zone[i, zoneIndex, 2] - span[i] < 0
                        if changeCondition:
                            newState[i] = [True, False]
                        else:
                            newState[i] = [False, True]
                        
                to.state = newState
                
            else:
                firstPos = to.landmarks[0]
                to.landmarks.append(centroid)
                formatCentroid = np.append(centroid[0:4], [1])
                newState = np.logical_and.reduce(np.dot(zoneCondition, formatCentroid) > 0, axis = -1)

                for i in range(0, len(zoneState)):
                    if not np.logical_xor(newState[i, 0], newState[i, 1]):
                        newState[i] = to.state[i]

                placeMap = np.logical_and(np.logical_xor(to.state, newState), newState)
                placeMap = np.where(placeMap == True)

                if np.logical_or.reduce(np.logical_xor(to.state, newState), axis = None):
                    if True in newState[:]:
                        lock = True


                to.state = newState

                if lock:
                    lock = False
                    line = int(placeMap[0])
                    direction = int(placeMap[1])
                    if zoneState[line]:
                        if direction == 0:
                            totalUp += 1
                            to.direction = "UP"
                        else:
                            totalDown += 1
                            to.direction = "DOWN"
                    else:
                        if direction == 0:
                            totalLeft += 1
                            to.direction = "LEFT"
                        else:
                            totalRight += 1
                            to.direction = "RIGHT"

                    processMQTTPipe.send([frame, line, direction, centroid])

                updateRect.append(centroid)
                if to.direction:
                    updateColor.append(to.direction)
                else:
                    updateColor.append("NONE")    

            trackableObjects[objectID] = to

        processingFrame.box = updateRect
        processingFrame.color = updateColor  
        processingFrame.processingTime["Processing"] = time.time() - start
        processingFrame.total = [totalUp, totalDown, totalLeft, totalRight]
        processVisualizePipe.send(processingFrame)
        
        newKeys = list(objects.keys())
        trackableObjects = dict([(key, trackableObjects[key]) for key in newKeys])
        objectList = [value.getRectList() for key, value in trackableObjects.items()]


    print("PROCESSING DONE")
예제 #26
0
    def __init__(self):

        self.__webs = {}

        with open("webs.json") as file:

            self.__webs = json.load(file)

        self.__trackers = []

        if not os.path.exists("trackers.json"):

            with open("trackers.json") as file:

                data = {}
                data["lastUpdateTimestamp"] = ""
                data["trackers"] = {}

                json.dump(data, file, indet="\t")

        else:

            with open("trackers.json") as file:

                obj = json.load(file)

                for tracker in obj["trackers"].values():

                    for web in self.__webs.keys():

                        if re.search(web, tracker["_Tracker__url"]):

                            t = Tracker(tracker["_Tracker__name"],
                                        tracker["_Tracker__url"],
                                        self.__webs[web],
                                        tracker["_Tracker__email"],
                                        tracker["_Tracker__desirePrice"],
                                        tracker["_Tracker__price"])
                            self.__trackers.append(t)

        self.__options = {
            -1: {
                "description": "Exit"
            },
            0: {
                "description": "See trackers",
                "method": self.displayTrackers
            },
            1: {
                "description": "Add tracker",
                "method": self.addTracker
            },
            2: {
                "description": "Remove tracker",
                "method": self.removeTracker
            },
            3: {
                "description": "Start thread",
                "method": self.startThread
            },
            4: {
                "description": "Stop thread",
                "method": self.stopThread
            }
        }

        self.__thread = None
예제 #27
0
# from SharedFile import SharedFile
from Tracker import Tracker

# print("1. Create first tracker and config file")
# print("2. Create tracker from config file")
# choice = input("->")
# path = input("Enter path to file to share")
# if choice == '1':
#    my_tracker = Tracker(path)
#my_tracker = Tracker('/home/misterk/Desktop/lorem.txt')
my_tracker = Tracker('/home/misterk/Desktop/lorem.txt', 1024,
                     'tracker_config.json')
예제 #28
0
def main():
    stitcher = Stitcher()
    if config_scale:
        background = cv2.imread('images/background_scaled.jpg')
    else:
        background = cv2.imread('images/background.jpg')

    transformer = Transformer(config_scale)

    cap_left = cv2.VideoCapture(videos_path + videos[0])
    cap_mid = cv2.VideoCapture(videos_path + videos[1])
    cap_right = cv2.VideoCapture(videos_path + videos[2])

    frame_width = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_HEIGHT))
    frame_count = int(cap_mid.get(cv.CV_CAP_PROP_FRAME_COUNT))

    init_points = {'C0': (71, 1153), \
                   'R0': (80, 761), 'R1': (80, 1033), 'R2': (95, 1127), 'R3': (54, 1156), 'R4': (65, 1185),
                   'R5': (61, 1204), 'R6': (56, 1217), 'R7': (69, 1213), 'R8': (67, 1253), 'R9': (75, 1281),
                   'R10': (92, 1347), \
                   'B0': (71, 1409), 'B1': (72, 1016), 'B2': (47, 1051), 'B3': (58, 1117), 'B4': (74, 1139),
                   'B5': (123, 1156), 'B6': (61, 1177), 'B7': (48, 1198), 'B8': (102, 1353)}

    points = init_points.values()
    tracker = Tracker(background, config_scale, init_points.values())

    # cap_left.set(cv.CV_CAP_PROP_POS_FRAMES, 1400)
    # cap_mid.set(cv.CV_CAP_PROP_POS_FRAMES, 1400)
    # cap_right.set(cv.CV_CAP_PROP_POS_FRAMES, 1400)
    for fr in range(frame_count):
        print(fr)
        status_left, frame_left = cap_left.read()
        status_mid, frame_mid = cap_mid.read()
        status_right, frame_right = cap_right.read()

        scaled_size = (frame_width / image_down_scale_factor,
                       frame_height / image_down_scale_factor)
        frame_left = cv2.resize(frame_left, scaled_size)
        frame_mid = cv2.resize(frame_mid, scaled_size)
        frame_right = cv2.resize(frame_right, scaled_size)

        # Adjust the brightness difference.
        frame_mid = cv2.convertScaleAbs(frame_mid, alpha=0.92)

        if status_left and status_mid and status_right:
            warped_left_mid = stitcher.stitch(frame_mid, frame_left,
                                              H_left_mid)
            warped_left_mid_right = stitcher.stitch(warped_left_mid,
                                                    frame_right, H_mid_right)
            warped_left_mid_right_cropped = crop_img(warped_left_mid_right)

            # plt.imshow(warped_left_mid_right_cropped)
            # plt.show()
            # cv2.waitKey(0)

            points = tracker.tracking(warped_left_mid_right_cropped)
            for i in range(len(points)):
                cv2.circle(warped_left_mid_right_cropped,
                           (points[i][1], points[i][0]), 3, (0, 0, 255), -1)

            height, width = warped_left_mid_right_cropped.shape[:2]
            warped_left_mid_right_cropped = cv2.resize(
                warped_left_mid_right_cropped, (width / 2, height / 2))
            cv2.imshow('Objects', warped_left_mid_right_cropped)
            cv2.waitKey(1)

            # background = transformer.transform(points)
            # plt.imshow(warped_left_mid_right_cropped)
            # plt.show()
            # cv2.imshow('Objects', background)
            # cv2.waitKey(30)

    cv2.waitKey(0)
    cv2.destroyAllWindows()
    cap_left.release()
    cap_mid.release()
    cap_right.release()
예제 #29
0
 def loadModel(self):
     self.tracker = Tracker()
예제 #30
0
 def __init__(self):
     logging.info('Init Experiment...')
     self.player = VideoPlayer()
     self.tracker = Tracker()
     self.tracker.start()
     pass