Пример #1
0
def vote_summary():
    address = request.args.get("address")
    if address:
        summary = votes.vote_summary(address)
        if summary:
            return render_template("hood.html", summary=summary)
        else:
            return render_template("not_found.html")
    else:
        track("Vote - Hit home page")
        return render_template("search.html")
Пример #2
0
def show_race(ferrari):

    close('all')

    print_log = True

    # create instances of a track
    monaco = track.track()
    monaco.ferrari = ferrari

    n_time_steps = 1000  # maximum time steps
    
    # choose to plot every step and start from defined position
    (position_0, velocity_0) = monaco.setup(plotting=True)	
    ferrari.reset()

    # choose a first action
    action = ferrari.choose_action(position_0, velocity_0, 0, learn=False)

    # iterate over time
    for i in arange(n_time_steps) :	

        # informyour action
        (position, velocity, R) = monaco.move(action)	

        # choose new action, with learning turned off
        action = ferrari.choose_action(position, velocity, R, learn=False)

        # check if the race is over
        if monaco.finished is True:
            break
Пример #3
0
def postPackage():
    data = request.get_json(force=True)
    print(data)

    res = track(data["tracking"], data["courier"])
    print(res)
    orderStatus = res["status_description"]
    expectedDelivery = res["estimated_delivery_date"]
    shipDate = res["ship_date"]
    deliveryDate = res["actual_delivery_date"]
    statusCode = res["status_code"]
    carrierStatus = res["carrier_status_description"]
    exceptionDescription = res["exception_description"]

    new_package = Package(user=data["pubId"],
                          item=data["item"],
                          courier=data["courier"],
                          tracking=data["tracking"],
                          status=orderStatus,
                          shipdate=shipDate,
                          deliverdate=deliveryDate,
                          expected=expectedDelivery,
                          statuscode=statusCode,
                          carrierstatus=carrierStatus,
                          exceptiondescription=exceptionDescription)

    db.session.add(new_package)
    db.session.commit()

    return jsonify({"message": "added package"})
def show_race(ferrari):

    close('all')

    # create instances of a track
    monaco = track.track()

    n_time_steps = 1000  # maximum time steps

    # choose to plot every step and start from defined position
    (position_0, velocity_0) = monaco.setup(plotting=True)
    ferrari.reset()

    # choose a first action
    action = ferrari.choose_action(position_0, velocity_0, 0)

    # iterate over time
    for i in arange(n_time_steps):

        # inform your action
        (position, velocity, R) = monaco.move(action)

        # choose new action, with learning turned off
        action = ferrari.choose_action(position, velocity, R, learn=False)

        # check if the race is over
        if monaco.finished is True:
            break

    print "finished"
    raw_input()
Пример #5
0
def train_and_show_navigation():
    close('all')
    
    # create instances of a car and a track
    monaco = track.track()
    ferrari = car.car()
        
    n_trials = 2000
    n_time_steps = 1000  # maximum time steps for each trial
    ferrari.reset()
    figure(0)
    xlim(-0.1,1.1)
    ylim(-0.1,1.1)
    monaco.plot_track()
    ferrari.plot_navigation()
    for j in arange(n_trials):  

        # before every trial, reset the track and the car.
        # the track setup returns the initial position and velocity. 
        (position_0, velocity_0) = monaco.setup()   
        ferrari.reset()
        
        # choose a first action
        action = ferrari.choose_action(position_0, velocity_0, 0)
        
        # iterate over time
        for i in arange(n_time_steps) : 
            
            # the track receives which action was taken and 
            # returns the new position and velocity, and the reward value.
            (position, velocity, R) = monaco.move(action)   
            
            # the car chooses a new action based on the new states and reward, and updates its parameters
            action = ferrari.choose_action(position, velocity, R)   
            
            # check if the race is over
            if monaco.finished is True:
                break
        
        if j%100 == 0:
            # plots the race result every 100 trials
            monaco.plot_world()
            
        if j%10 == 0:
            print 'Trial:', j

        if j == 0 or j == 50 or j==100 or j==250 or j==500 or j==750 or j==999 or j==1250 or j==1500 or j==1750 or j==1999:
            figure(j+1)
            xlim(-0.1,1.1)
            ylim(-0.1,1.1)
            monaco.plot_track()
            ferrari.plot_navigation()

    return ferrari #returns a trained car
Пример #6
0
	def load_track( self, path, name="", loop=True, randomise=1, volume=1.0, fadein_percent=10 ):
		"""Load the specified sound into a track."""

		# If name is unspecified, just use the path
		if( name == "" ):
			name = path

		# Increment the channel counter and assign that ID to this track
		self._num_channels += 1
		self._tracks[name] = track.track( pygame.mixer.Channel( self._num_channels ), path, name, loop, randomise, volume, fadein_percent )
		print("Track " + name + " loaded.")
Пример #7
0
def vote_summary(address):
    url = a2w_url.format(address=address, database=database)
    js = requests.get(url).json()
    if "error" in js:
        track("Vote - Address Not Found", address=address)
        return None

    ward = js[0]["ward"]
    iec = IEC(iec_url)

    summary = iec.wardsummary(ward=ward)
    if not summary:
        track(
            "Vote - Ward Not Found",
            address=js["address"],
            ward=js["ward"],
            municipality=js["municipality"],
            province=js["province"],
        )
        return None

    summary.update(js[0])
    track(
        "Vote - Got results",
        user="******",
        province=summary["province"],
        municipality=summary["municipality"],
        ward=summary["ward"],
        address=summary["address"],
    )
    return summary
Пример #8
0
    def reset(self):
        self.agent = car.Car(self.carCt)
        self.track = track.track()
        if self.track.drawTrack() == 0:
            self.track.update_screen()
            self.state = self.track.save_image()
        else:
            self.configuration = "vm"
            self.state = self.track.save_image(True)

        self.done = False
        self.time = 200
        return self.state
Пример #9
0
def print_structure(path: str,
                    dir_path: str,
                    track_first: bool = True,
                    raw: bool = False,
                    ignore: bool = False) -> None:
    if track_first:
        track(path, dir_path, output=False, ignore=ignore)
    filename = path.strip().split('/')[-1] + '.json'
    try:
        data = read_from_json_file(dir_path + '/' +
                                   constants.save_folder_name + '/' + filename)
        if os.path.isfile(path):
            if raw:
                pp(data)
            else:
                print('Name: {}'.format(data['n']))
                print('Track Time: {}'.format(
                    datetime.datetime.fromtimestamp(data['ts'])))
                print('Size: {}'.format(get_size_format(data['i']['s'])))
                print('Path: {}'.format(data['i']['p']))
                print('Edit Time: {}'.format(
                    datetime.datetime.fromtimestamp(data['i']['time'])))
        elif os.path.isdir(path):
            if raw:
                pp(data)
            else:
                print('Name: {}'.format(data['n']))
                print('Track Time: {}'.format(
                    datetime.datetime.fromtimestamp(data['ts'])))
                print('Size: {}'.format(get_size_format(data['i']['s'])))
                print('Path: {}'.format(data['i']['p']))
                print('Edit Time: {}'.format(
                    datetime.datetime.fromtimestamp(data['i']['time'])))
                print('Directory Structure:')
                print()
                dir_info = data['i']['dirs']
                print_dir(dir_info, 0)
    except FileNotFoundError:
        print("The folder is not tracked. Please track it first")
Пример #10
0
def main():
    '''
	print("Tracking in:")
	print("5")
	time.sleep(1)
	print("4")
	time.sleep(1)
	print("3")
	time.sleep(1)
	print("2")
	'''
    time.sleep(1)
    print("1")
    time.sleep(1)
    print("Start!")

    path = track(5)
    Ax_data = []
    Ay_data = []
    rep = 0
    for entry in path:
        if rep > 0:
            Ax_data.append(abs(float(entry[0])))
            Ay_data.append(abs(float(entry[1])))
            rep += 1
        if rep == 0:
            rep = 1

    x_data = np.array(Ax_data)
    y_data = np.array(Ay_data)

    x_data_smooth = np.linspace(min(x_data), max(x_data), 1000)

    fig, ax = plt.subplots(1, 1)

    spl = UnivariateSpline(x_data, y_data, s=0, k=2)
    y_data_smooth = spl(x_data_smooth)
    ax.plot(x_data_smooth, y_data_smooth, 'b')

    bi = Akima1DInterpolator(x_data, y_data)
    y_data_smooth = bi(x_data_smooth)
    ax.plot(x_data_smooth, y_data_smooth, 'g')

    bi = PchipInterpolator(x_data, y_data)
    y_data_smooth = bi(x_data_smooth)
    ax.plot(x_data_smooth, y_data_smooth, 'k')

    ax.plot(x_data_smooth, y_data_smooth)
    ax.scatter(x_data, y_data)

    plt.show()
Пример #11
0
    def create_new_tracks(self):
        unasDet = self.unassignedDetection.shape
        if unasDet[0] == 0:
            return

        # create properties-arrays of the unassigned detections only:
        centroids = self.centroids[self.unassignedDetection]
        bboxes = self.bboxes[self.unassignedDetection]

        # create the track objects:
        for i in range(unasDet[0]):
            self.tracks.append(
                track.track(self.id, bboxes[i], centroids[i],
                            self.tiny_masks[i]))
            self.id += 1

        self.processNum = self.processNum + 1
Пример #12
0
def getUserPackages(public_id):

    packages = Package.query.filter_by(user=public_id).all()

    if not packages:
        return make_response({"message": "no packages found"})

    for package in packages:
        if package.statuscode == "DE":
            pass
        else:
            res = track(package.tracking, package.courier)
            package.status = res["status_description"]
            package.expected = res["estimated_delivery_date"]
            package.shipdate = res["ship_date"]
            package.deliverdate = res["actual_delivery_date"]
            package.statuscode = res["status_code"]
            package.carrierstatus = res["carrier_status_description"]
            package.exceptiondescription = res["exception_description"]
            db.session.commit()

    raw = []

    for package in packages:
        package_data = {}
        package_data["id"] = package.id
        package_data["user"] = package.user
        package_data["item"] = package.item
        package_data["courier"] = package.courier
        package_data["tracking"] = package.tracking
        package_data["status"] = package.status
        package_data["shipdate"] = package.shipdate
        package_data["deliverdate"] = package.deliverdate
        package_data["expected"] = package.expected
        package_data["statuscode"] = package.statuscode
        package_data["carrierstatus"] = package.carrierstatus
        package_data["exceptiondescription"] = package.exceptiondescription
        raw.append(package_data)
    print(raw)
    output = packageSort(raw)

    return jsonify({"packages": output})

    print(packages)

    return jsonify({"packages": packages})
Пример #13
0
def seqToTrk(infilename, outfilename, **kargs):
    assert os.path.realpath(infilename), "no filename specified"
    assert os.path.realpath(outfilename), "no save filename specified"

    gerald_format = {
        "loc": {
            "code":
            "location(chr=column[10].strip(\".fa\"), left=column[12], right=int(column[12])+25)"
        },
        "strand": 13,
        "dialect": csv.excel_tab
    }
    # strand is F/R ??

    seqfile = delayedlist(filename=os.path.realpath(infilename),
                          format=gerald_format)
    n = 0
    m = 0

    t = track(filename=outfilename, stranded=False, new=True,
              **kargs)  # strands not currently supported :(

    s = time.time()
    for item in seqfile:
        t.add_location(item["loc"], strand=item["strand"])

        n += 1
        if n > 100000:
            m += 1
            n = 0
            print "%s00,000" % m
            #break
    e = time.time()
    # 1000 = averages 8-9 s
    # 3.65 s cache.
    # 0.61 s better cacheing, less commits
    # 10,000 used to check now.
    # 5.98 s, 22.8Mb (Binary(array.tostring())
    # 6.0 s 17.1 Mb (str/eval)
    # 6.0 s 259kb (zlib/Binary()/tostring())

    print "Finalise:"
    t.finalise()
    print "Took: %s s" % (e - s)
    return (True)
Пример #14
0
def read(debug=False):
    global cap, positions
    success = False
    tic = time.time()
    for t in range(frames_per_read):
        success, frame = cap.read(0)
        if not success: return -1
    toc = time.time()
    print('Read a frame used {:.2f} s'.format(toc-tic))
    
    if not fix_camera:
        positions = match.match(frame)
    img = warp_perspective(frame, positions, perspective+shift, size+2*shift)
    
    plt.cla()
    coords = track.track(img, debug=debug)
    if not debug:  # show the origin image if not in debug mode
        ultility.show(img, frame)
    ultility.plot(vertices)
    plt.pause(interval)
    
    return coords
Пример #15
0
def update_dashboard():
    """
    Updates the scores and timer in game.html for the duration of one game. The function is wrapped inside of
    app.response_class(). Within the function, new video-frames are continuously generated from a live-feed
    which are processed in the track() function. After requirements are met for finishing a game, the game is stopped
    and relevant data is written to the MySQL database. After this, a new game can be started.
    """

    global video_camera
    global game_running
    global last_scored
    global game

    one_more = True  # Used to show last frame
    prev_black = 0  # Keeps track of last score black
    prev_white = 0  # Keeps track of last score white

    # Start videocapture
    if video_camera == None:
        video_camera = cv2.VideoCapture(video_file)
        # video_camera = cv2.VideoCapture(0)

    # Keep running until game_running == False
    while one_more:
        time.sleep(1)
        while game_running:
            ok, frame = video_camera.read()
            print(ok)

            # Mock scoring update, uncomment when using video file for testing purposes
            # score_black, score_white = np.random.choice([0, 1], 1, p=[0.99, 0.01]), np.random.choice([0, 1], 1, p=[0.99, 0.01])
            # game.score[0] += score_black[0]
            # game.score[1] += score_white[0]

            # Determine the last scoring team (for score adjustment purposes)
            if game.score[1] > prev_white:
                prev_white = game.score[1]
                last_scored = 'white'

            elif game.score[0] > prev_black:
                prev_black = game.score[0]
                last_scored = 'black'

            # Stop game if there is a score of 10 or higher and there is a difference of 2
            if game.score[0] >= 10 or game.score[1] >= 10:
                if abs(game.score[0] - game.score[1]) >= 2:

                    # Stop game and yield last frame
                    game.stop()
                    game_running = False

                    # Write game data to MySQL database
                    game.write_db(zwartachter_ID, zwartvoor_ID, witachter_ID,
                                  witvoor_ID)

                    # Set relevant globals to None for next game
                    video_camera = None

            # If the game is not finished, keep tracking and streaming frames to webpage
            if ok:
                track(frame, game, 2)

                # Globals to update m and s for last frame
                global m
                global s

                # Calculate current time and yield to webpage
                seconds = time.time() - game.time
                m, s = divmod(seconds, 60)
                h, m = divmod(m, 60)

                yield ('{:.0f}m{:.0f}s {} {}\n'.format(m, s, game.score[1],
                                                       game.score[0]))

        # One more loop after game_running has turned to False to avoid client-side empty scoreboard
        yield ('{:.0f}m{:.0f}s {} {}\n'.format(m, s, game.score[1],
                                               game.score[0]))
        one_more = False
"""
Solving race track using Q-learning.(off-policy)
"""
from track import track
import numpy as np
import matplotlib.pyplot as plt
import time
from race_track_fun import generate_start_state, generate_reward_and_next_state, generate_action, game_over

race_track = track()
height, width = race_track.shape
n_vv = 5 # vertical velocity change from 0 to 4
n_vh = 5 # horizontal velocity change from 0 to 4
actions = 5 # in fact, only five actions are valid(suppose V can only be up or right for simplicity)
Q = np.zeros((height, width, n_vv, n_vh, actions))

gamma = 1
alpha = 0.5
"""
actions:
0 means stay where you are or keep moving
1 means move up
2 means move left
3 means move down
4 means move right
"""
# policy derived from Q
policy = np.argmax(Q, axis=4)

def run(k):
    row, col, vv, vh = (3, k, 0, 0)
Пример #17
0
def train_car():
    
    close('all')
    
    # create instances of a car and a track
    monaco = track.track()
    ferrari = car.car()
        
    n_trials = 1000
    n_time_steps = 500  # maximum time steps for each trial

    # create figure
    figure(200)
    f_time = subplot(111)
    xlim(0,n_trials/10)
    ylim(0,1000)

    figure(400)
    f_el = subplot(111)

    total_time = 10
    time_array = []
    
    for j in arange(1, n_trials+1):	

        # before every trial, reset the track and the car.
        # the track setup returns the initial position and velocity. 
        (position_0, velocity_0) = monaco.setup()	
        ferrari.reset()

        # total time of time per 10 trial
        
        # choose a first action
        action = ferrari.choose_action(position_0, velocity_0, 0)
        
        # iterate over time
        for i in arange(n_time_steps) :	
            
            # the track receives which action was taken and 
            # returns the new position and velocity, and the reward value.
            (position, velocity, R) = monaco.move(action)
            
            # the car chooses a new action based on the new states and reward, and updates its parameters
            action = ferrari.choose_action(position, velocity, R)
            
            # check if the race is over
            if monaco.finished is True:
                total_time += i
                break

            if i == 999:
                total_time += 1000
        
        if j%100 == 0:
            # plots the race result every 100 trials
            monaco.plot_world()
            

            
        if j%10 == 0:
            # plot average of essays to reach goal
            time_array.append(total_time / 10)
            # plot

            
            total_time = 0
            print 'Trial:', j

    f_el.imshow(ferrari.c_m_eligibility)
    f_time.plot(time_array)
    return ferrari #returns a trained car
Пример #18
0
    argparser.add_argument('-s',
                           '--visualize_dir',
                           type=str,
                           default='data/visualize',
                           help='Visualizing result directory')
    argparser.add_argument('-r',
                           '--result_dir',
                           type=str,
                           default='result/',
                           help='Final result directory')
    args = vars(argparser.parse_args())
    return args


if __name__ == '__main__':
    args = parse_args()

    json_dir = os.path.abspath(args['json_dir'])
    video_dir = os.path.abspath(args['video_dir'])
    detect_dir = os.path.abspath(args['detect_dir'])
    track_dir = os.path.abspath(args['track_dir'])
    count_dir = os.path.abspath(args['count_dir'])
    visualize_dir = os.path.abspath(args['visualize_dir'])
    result_dir = os.path.abspath(args['result_dir'])

    detect(json_dir, video_dir, detect_dir)
    track(json_dir, video_dir, detect_dir, track_dir)
    count(json_dir, video_dir, track_dir, count_dir)
    # if visualize_dir:
    #     visualize(json_dir, video_dir, detect_dir, track_dir, count_dir, visualize_dir)
Пример #19
0
                if sys.argv[2] in ['--ignore']:
                    pass
                else:
                    raise FileNotFoundError(
                        "Could not find the file/folder: {}".format(
                            sys.argv[2]))

        dir_path = base_path
        if os.path.isfile(base_path):
            dir_path = base_path[0:base_path.rindex('/')]
        os.chdir(dir_path)

        if '--ignore' in sys.argv:
            params['ignore'] = True

        errors = track(base_path, dir_path, output=True, **params)
        if len(errors) > 0:
            print()
            print("There was a problem tracking the following files/folders:")
            for error in errors:
                print(error)
        os.chdir(current_wd)

    elif sys.argv[1] == 'back_up' or sys.argv[1] == 'pull':
        pull_path = ''
        destination_path = current_wd
        if len(sys.argv) > 2:
            if os.path.lexists(sys.argv[2]):
                if os.path.isabs(str(sys.argv[2])):
                    pull_path = sys.argv[2]
                else:  # is relative path
Пример #20
0
def average_trainings_softmax():
    close('all')
    
    # create instances of a car and a track
    monaco = track.track()
    ferrari = car.car()
        
    n_trials = 1000
    n_time_steps = 1000  # maximum time steps for each trial
    n_indep_cars = 3.
    times = zeros(n_trials)
    rewards = zeros(n_trials)
    avg_times = zeros(n_trials)
    avg_rewards = zeros(n_trials)
    for k in arange(n_indep_cars):
        monaco = track.track()
        ferrari = car.car()
        for j in arange(n_trials):  

            # before every trial, reset the track and the car.
            # the track setup returns the initial position and velocity. 
            (position_0, velocity_0) = monaco.setup()   
            ferrari.reset()
            
            # choose a first action
            action = ferrari.choose_action_softmax(position_0, velocity_0, 0)
            
            # iterate over time
            for i in arange(n_time_steps) : 
                
                # the track receives which action was taken and 
                # returns the new position and velocity, and the reward value.
                (position, velocity, R) = monaco.move(action)   
                
                # the car chooses a new action based on the new states and reward, and updates its parameters
                action = ferrari.choose_action_softmax(position, velocity, R)   
                
                # check if the race is over
                if monaco.finished is True:
                    break
            
            if j%100 == 0:
                # plots the race result every 100 trials
                monaco.plot_world()
                
            if j%10 == 0:
                print k, 'Trial:', j

            times[j] = monaco.time
            rewards[j] = monaco.total_reward
        avg_times = avg_times + times/n_indep_cars
        avg_rewards = avg_rewards + rewards/n_indep_cars

    figure(1)
    plot(avg_times)
    ylabel('Latency')
    xlabel('Trial')
    show()
    figure(2)
    plot(avg_rewards)
    ylabel('Total reward')
    xlabel('Trial')
    show()
Пример #21
0
def pull(source_path: str,
         destination_path: str,
         track_first: bool = True,
         ignore: bool = False,
         copy_all: bool = False) -> list:
    # source_path is a file or directory
    # destination_path is a directory
    global _source_path, _destination_path, source_structure, destination_structure, _ignore, errors, _copy_all
    print(
        "Starting Backup! Please don't turn off your computer till the process finishes."
    )
    _source_path = source_path
    _destination_path = destination_path
    _ignore = ignore
    _copy_all = copy_all
    if _ignore:
        get_ignore_list()
    source_json_name = source_path.strip().split('/')[-1] + '.json'
    destination_json_name = destination_path.strip().split('/')[-1] + '.json'
    source_dir_path = source_path
    if os.path.isfile(source_dir_path):
        source_dir_path = source_dir_path[0:source_dir_path.rindex('/')]
    if track_first or (not os.path.lexists(source_dir_path + '/' +
                                           constants.save_folder_name + '/' +
                                           source_json_name)):
        os.chdir(source_dir_path)
        track(source_path, source_dir_path, output=False, ignore=_ignore)
        print("Tracked source")
        os.chdir(destination_path)
    track(destination_path, destination_path, output=False, ignore=_ignore)
    print("Tracked destination")
    source_structure = read_from_json_file(source_dir_path + '/' +
                                           constants.save_folder_name + '/' +
                                           source_json_name)
    source_structure = source_structure['i']
    destination_structure = read_from_json_file(destination_path + '/' +
                                                constants.save_folder_name +
                                                '/' + destination_json_name)
    destination_structure = destination_structure['i']

    if os.path.isfile(source_path):
        destination_structure = destination_structure['dirs']
        edit_time = source_structure['time']
        filename = source_path.strip().split('/')[-1]
        if filename in destination_structure.keys():
            if edit_time > destination_structure[filename]['time']:
                copy(source_path, destination_path)
        else:
            copy(source_path, destination_path)
    else:
        dest_name = destination_path.strip().split('/')[-1]
        source_name = source_path.strip().strip('/').split('/')[-1]
        if dest_name == source_name:
            back_up_dir(source_structure, destination_structure)
        elif source_name in destination_structure['dirs']:
            back_up_dir(source_structure,
                        destination_structure['dirs'][source_name])
        else:
            copy(source_path, destination_path)
    print("All file(s) have been backed-up successfully")
    track(destination_path, destination_path, output=True, ignore=_ignore)
    return errors
Пример #22
0
def subclip(video_path,
            detections_dir,
            tracks_dir,
            stats_dir,
            clip_dir,
            around='detections',
            display_detections=False,
            display_tracks=False,
            start_buffer=100,
            end_buffer=50):

    video_path = os.path.normpath(video_path)
    detections_dir = os.path.normpath(detections_dir)
    tracks_dir = os.path.normpath(tracks_dir)
    stats_dir = os.path.normpath(stats_dir)
    clip_dir = os.path.normpath(clip_dir)

    assert around == 'detections' or around == 'tracks'

    colors = dict()
    for i in range(200):
        colors[i] = (int(256 * random.random()), int(256 * random.random()),
                     int(256 * random.random()))

    os.makedirs(clip_dir, exist_ok=True)

    video_path, video_filename = os.path.split(video_path)
    txt_filename = video_filename[:-4] + '.txt'

    if not os.path.exists(os.path.join(stats_dir, txt_filename)):
        logging.info(
            "Stats file {} does not exist so will make it first...".format(
                os.path.join(stats_dir, txt_filename)))

        video_to_frames(os.path.join(os.path.normpath(FLAGS.videos_dir),
                                     video_filename),
                        os.path.normpath(FLAGS.frames_dir),
                        os.path.normpath(FLAGS.stats_dir),
                        overwrite=False,
                        every=FLAGS.detect_every)

    with open(os.path.join(stats_dir, txt_filename), 'r') as f:
        video_id, width, height, length = f.read().rstrip().split(',')

    if not os.path.exists(os.path.join(detections_dir, txt_filename)):
        logging.info(
            "Detections file {} does not exist so will make it first...".
            format(os.path.join(detections_dir, txt_filename)))
        detect_wrapper([video_filename])

    with open(os.path.join(detections_dir, txt_filename), 'r') as f:
        detections = [line.rstrip().split(',') for line in f.readlines()]

    width = int(width)
    height = int(height)
    length = int(length)

    mult = True
    detections_ = dict()
    for d in detections:
        if mult:
            d_ = [
                int(d[1]),
                float(d[2]),
                float(d[3]) * width,
                float(d[4]) * height,
                float(d[5]) * width,
                float(d[6]) * height
            ]
        else:
            d_ = [
                int(d[1]),
                float(d[2]),
                float(d[3]),
                float(d[4]),
                float(d[5]),
                float(d[6])
            ]

        if int(d[0]) in d:
            detections_[int(d[0])].append(d_)
        else:
            detections_[int(d[0])] = [d_]
    detections = detections_

    if not os.path.exists(os.path.join(tracks_dir, txt_filename)):
        logging.info(
            "Tracks file {} does not exist so will make it first...".format(
                os.path.join(tracks_dir, txt_filename)))

        track([txt_filename], os.path.normpath(FLAGS.detections_dir),
              os.path.normpath(FLAGS.stats_dir),
              os.path.normpath(FLAGS.tracks_dir),
              FLAGS.track_detection_threshold, FLAGS.max_age, FLAGS.min_hits)

    with open(os.path.join(tracks_dir, txt_filename), 'r') as f:
        tracks = [line.rstrip().split(',') for line in f.readlines()]

    tracks_ = dict()
    for t in tracks:
        if mult:
            t_ = [
                int(t[1]),
                float(t[2]),
                float(t[3]) * width,
                float(t[4]) * height,
                float(t[5]) * width,
                float(t[6]) * height
            ]
        else:
            t_ = [
                int(t[1]),
                float(t[2]),
                float(t[3]),
                float(t[4]),
                float(t[5]),
                float(t[6])
            ]

        if int(t[0]) in tracks_:
            tracks_[int(t[0])].append(t_)
        else:
            tracks_[int(t[0])] = [t_]

    tracks = tracks_

    capture = cv2.VideoCapture(os.path.join(video_path, video_filename))

    # Get the total number of frames
    total = int(capture.get(cv2.CAP_PROP_FRAME_COUNT))
    # Might be a problem if video has no frames
    if total < 1:
        print("Check your opencv + ffmpeg installation, can't read videos!!!\n"
              "\nYou may need to install open cv by source not pip")
        return None

    assert total == length - 1 or total == length or total == length + 1

    full_out_video = cv2.VideoWriter(
        "%s_shortened.mp4" % os.path.join(clip_dir, video_filename[:-4]),
        cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), 25, (width, height))

    track_trails = queue.Queue(maxsize=50)
    since = 0
    out_count = 0
    capture.set(1, 0)
    while_safety = 0
    for current in tqdm(range(length),
                        desc="Shortening video: {}".format(video_filename)):

        # capture.set(1, current)
        # flag, frame = capture.read()

        while True:
            while_safety += 1
            flag, frame = capture.read()
            # if flag != 0 and frame is not None:
            if frame is not None:
                break
            if while_safety > 1000:
                break

        if frame is None:  # should only occur at the end of a video
            break

        v_height, v_width, _ = frame.shape
        assert v_height == height
        assert v_width == width

        frame[-50:, -250:, :] = (0, 0, 0)
        cv2.putText(frame, '{}'.format(current),
                    (v_width - 240, v_height - 12), cv2.FONT_HERSHEY_SIMPLEX,
                    1, (255, 255, 255), 2)

        out_frame = frame.copy()

        if display_tracks:
            if current in tracks:
                out_frame = cv_plot_bbox(
                    out_path=None,
                    img=out_frame,
                    bboxes=[t[2:] for t in tracks[current]],
                    scores=[t[1] for t in tracks[current]
                            ],  # todo should do in tracking code
                    labels=[t[0] for t in tracks[current]],
                    thresh=0,
                    colors=colors,
                    class_names=[])

                track_trails_frame = {}
                for t in tracks[current]:
                    # make trails per track
                    track_trails_frame[t[0]] = (int(t[2] +
                                                    ((t[4] - t[2]) / 2)),
                                                int(t[5]))

                # put in the queue that exists over frames
                if track_trails.full():
                    track_trails.get()
                track_trails.put(track_trails_frame)

                # draw the trail as dots that fade with time
                for i, trails in enumerate(list(track_trails.queue)):
                    alpha = math.pow(i / len(list(track_trails.queue)), 2)
                    overlay = out_frame.copy()
                    for tid, dot in trails.items():
                        cv2.circle(overlay, dot, 2, colors[tid], -1)
                    out_frame = cv2.addWeighted(overlay, alpha, out_frame,
                                                1 - alpha, 0)

        if display_detections:
            if current in detections:
                out_frame = cv_plot_bbox(
                    out_path=None,
                    img=out_frame,
                    bboxes=[d[2:] for d in detections[current]],
                    scores=[d[1] for d in detections[current]],
                    labels=[d[0] for d in detections[current]],
                    thresh=0,
                    colors={0: (1, 255, 1)},
                    class_names=['cyclist'])

        forward_buffer = False
        if around == 'tracks':
            if current not in tracks:
                since += 1
            else:
                since = 0
            for check_forward in range(current, current + start_buffer):
                if check_forward in tracks:
                    forward_buffer = True
                    break

        elif around == 'detections':
            if current not in detections:
                since += 1
            else:
                since = 0
            for check_forward in range(current, current + start_buffer):
                if check_forward in detections:
                    forward_buffer = True
                    break
        else:
            ValueError()

        if forward_buffer or since < end_buffer:
            out_count += 1
            full_out_video.write(out_frame)

    if full_out_video is not None:
        full_out_video.release()

    logging.info(
        "\n\nOriginal video length: {}\nNew video length: {} ({}% of original)"
        .format(length, out_count, int(100 * float(out_count) / length)))
Пример #23
0
def post_track_data():
    tracking_data = request.get_json()
    location = track(tracking_data)
    return location
Пример #24
0
    def do_track(self, input):
        '''do_track

        TODO(curtismuntz): add help
        '''
        track.track(self._service)
Пример #25
0
    def add_track(self, name):
	new_track = track.track(name, self.length, self.sr)
	self.tracks = numpy.append(self.tracks, [new_track])
	return new_track
def train_car(save_learning_curve = False):
    
    close('all')
    
    # create instances of a car and a track
    monaco = track.track()
    ferrari = car.car()
        
    n_trials = 1000
    n_time_steps = 1000 # maximum time steps for each trial

    if save_learning_curve:
        learn_curve_file = open(params.LEARNING_CURVE_FILE, 'a')
    
    '''    
    net = nn.NeuralNetwork(params.POS_NEURONS, params.POS_RANGE, 
                params.VEL_NEURONS, params.STATIC_VEL_RANGE, params.NB_OUTPUTS, 
                params.ETA, params.GAMMA, params.LAMBDA)
    
    my_net.compute_network_output()
    '''
    for j in arange(n_trials):	

        # before every trial, reset the track and the car.
        # the track setup returns the initial position and velocity. 
        (position_0, velocity_0) = monaco.setup()	
        ferrari.reset()
        
        # choose a first action
        action = ferrari.choose_action(position_0, velocity_0, 0)
        
        # iterate over time
        for i in arange(n_time_steps) :	
            
            # the track receives which action was taken and 
            # returns the new position and velocity, and the reward value.
            (position, velocity, R) = monaco.move(action)	

            # the car chooses a new action based on the new states and reward,
            # and updates its parameters
            action = ferrari.choose_action(position, velocity, R)	
            
            # check if the race is over
            if monaco.finished is True:
                break
        else:
            print "Did not finish the track"
            print "Total reward:", monaco.total_reward

        if save_learning_curve:
            print >> learn_curve_file, \
                    j, monaco.time, monaco.total_reward, monaco.finished
        
        if j%100 == 0 and not save_learning_curve:
            # plots the race result every 100 trials
            monaco.plot_world()
            
        if j%10 == 0:
            print
            print 'TRIAL:', j

        # uncomment only when plotting navigation maps
        #if (j+1)%100 == 0:
        #    plot_navigation_map(ferrari, j+1)

    if save_learning_curve:
        learn_curve_file.close()

    return ferrari #returns a trained car
Пример #27
0
def average_trainings_last_trials(eps, decrease):
    close('all')
    # create instances of a car and a track
    monaco = track.track()
    ferrari = car.car(eps, decrease)
        
    n_trials = 1000
    n_time_steps = 1000 # maximum time steps for each trial
    n_indep_cars = 5.
    times = zeros(n_trials)
    avg_times = zeros(n_trials)

    for k in arange(n_indep_cars):
        monaco = track.track()
        ferrari = car.car(eps, decrease)
      
        for j in arange(n_trials):  
            # before every trial, reset the track and the car.
            # the track setup returns the initial position and velocity. 
            (position_0, velocity_0) = monaco.setup()   
            ferrari.reset()
            
            # choose a first action
            action = ferrari.choose_action(position_0, velocity_0, 0)
            
            # iterate over time
            for i in arange(n_time_steps): 
                
                # the track receives which action was taken and 
                # returns the new position and velocity, and the reward value.
                (position, velocity, R) = monaco.move(action)   
                

                # the car chooses a new action based on the new states and reward, and updates its parameters
                action = ferrari.choose_action(position, velocity, R)   
                
                # check if the race is over
                if monaco.finished is True:
                    break
            
            if j%10 == 0:
                print 'Eps:',eps,'Car:',k, 'Trial:', j

            times[j] = monaco.time

        avg_times = avg_times + times/n_indep_cars

    avgtime_lasttrials = sum(avg_times[-10:])/10;

    figure(1)
    plot(avg_times)
    ylabel('Latency')
    xlabel('Trial')
    out_str = str(n_indep_cars)+'_latency'+str(eps)+'_avgtime_lasttrials'+str(avgtime_lasttrials)+'.png'
    if decrease:
        out_str = str(n_indep_cars)+'_latency'+str(eps)+'_decreasing_avgtime_lasttrials'+str(avgtime_lasttrials)+'.png'

    savefig(out_str)

    return avg_times

    '''
Пример #28
0
"""
I think the race track program will perform better if
it's implemented using TD algorithm(after reading chapter
6 and 7). Currently it's using a MC method.

The best method I found is Q-learning. It's fast and the policy seems
to be optimal.
"""
from track import track
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import time
from race_track_fun import generate_episode, generate_reward_and_next_state

race_track = track()
height, width = race_track.shape
n_vv = 5  # vertical velocity change from 0 to 4
n_vh = 5  # horizontal velocity change from 0 to 4
actions = 5  # in fact, only five actions are valid(suppose V can only be up or right for simplicity)
# Q = np.ones((height, width, n_vv, n_vh, actions))*-999999999 # use -inf
Q = np.zeros((height, width, n_vv, n_vh, actions))
returns = np.zeros((height, width, n_vv, n_vh, actions))
"""
actions:
0 means stay where you are or keep moving
1 means move up
2 means move left
3 means move down
4 means move right
"""
Пример #29
0
    def stereo_callback(self, stereo_data):
        stereo_data = self.bridge.imgmsg_to_cv2(stereo_data, 'rgba8')
        stereo_data = cv2.UMat(stereo_data[:, :, :-1])

        if not self.screen_init:
            self.ixs, self.rect_pts, screen_pts = screen_init(stereo_data)
            self.screen_init = True
            self.screen_rect = cv2.boundingRect(np.array(screen_pts))

        elif not self.track_init:
            self.bbox, self.tracker = track_init(stereo_data)
            self.track_init = True
            print("init")

        else:

            stereo_data, self.bbox, screen_box = track(stereo_data, self.ixs,
                                                       self.rect_pts,
                                                       self.bbox, self.tracker)
            self.track_rect = np.array(self.bbox, dtype=np.int)
            self.screen_box = screen_box[1]
            self.screen_box_ix = screen_box[0]
            cv2.rectangle(stereo_data,
                          (self.track_rect[0], self.track_rect[1]),
                          (self.track_rect[0] + self.track_rect[2],
                           self.track_rect[1] + self.track_rect[3]),
                          (255, 0, 0), 2)

        if self.track_rect is not None and self.screen_depth is not None and self.track_depth is not None:

            screen_mean = np.mean(self.screen_depth)
            track_mean = np.mean(self.track_depth)
            self.screen_buffer.append(screen_mean)
            if len(self.screen_buffer) > self.track_buffer_size:
                self.screen_buffer = self.screen_buffer[1:]
            self.track_buffer.append(track_mean)
            if len(self.track_buffer) > self.track_buffer_size:
                self.track_buffer = self.track_buffer[1:]
            track_mean = np.mean(self.track_buffer)
            s = np.abs(screen_mean - track_mean)
            print("screen_mean", screen_mean)
            print("track_mean", track_mean)
            print("s_diff", s, "ix", self.screen_box_ix)
            if s < 1.5 or track_mean <= screen_mean:

                fill_matches(stereo_data,
                             self.ixs,
                             self.rect_pts,
                             self.bbox,
                             color=(0, 255, 0))
                if self.screen_box_ix != -1 and self.screen_box_ix_prev != self.screen_box_ix:
                    aud_str = 'mpg123 auds/{}.mp3 --frames 30 --quiet &'.format(
                        self.screen_box_ix)
                    os.system(aud_str)

                if self.screen_box_ix_prev != self.screen_box_ix:
                    self.screen_box_ix_prev = self.screen_box_ix

            cv2.imshow("screen_depth", self.screen_depth)
            cv2.imshow("track_depth", self.track_depth)
            cv2.imshow("cam_feed", stereo_data)
            cv2.imshow("disp_image", self.disp_image)
            cv2.waitKey(3)
Пример #30
0
DEBUG = False
VK_ESCAPE = 0x1B

if __name__ == '__main__':
    #Main argument parser
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--outfile',
        default="output\\plot.png",
        help=
        "The output visualization of your mouse movement, default='output\\plot.png'"
    )
    args = parser.parse_args()
    '''
    Memory usage was/still might be a concern. The ResourceLogger can be used to 
    log the memory and CPU usage to ensure it stays reasonable.
    '''
    if DEBUG:
        from resource_logging import ResourceLogger

        logger = ResourceLogger()
        logger.start()

    #Listens for the escape key, and will signal when it happens
    #Used to exit the program
    listener = KeyListener(VK_ESCAPE)
    listener.start()
    track(args.outfile, listener)

    if DEBUG:
        logger.stop()
def train_car(save_learning_curve=False):

    close('all')

    # create instances of a car and a track
    monaco = track.track()
    ferrari = car.car()

    n_trials = 1000
    n_time_steps = 1000  # maximum time steps for each trial

    if save_learning_curve:
        learn_curve_file = open(params.LEARNING_CURVE_FILE, 'a')
    '''    
    net = nn.NeuralNetwork(params.POS_NEURONS, params.POS_RANGE, 
                params.VEL_NEURONS, params.STATIC_VEL_RANGE, params.NB_OUTPUTS, 
                params.ETA, params.GAMMA, params.LAMBDA)
    
    my_net.compute_network_output()
    '''
    for j in arange(n_trials):

        # before every trial, reset the track and the car.
        # the track setup returns the initial position and velocity.
        (position_0, velocity_0) = monaco.setup()
        ferrari.reset()

        # choose a first action
        action = ferrari.choose_action(position_0, velocity_0, 0)

        # iterate over time
        for i in arange(n_time_steps):

            # the track receives which action was taken and
            # returns the new position and velocity, and the reward value.
            (position, velocity, R) = monaco.move(action)

            # the car chooses a new action based on the new states and reward,
            # and updates its parameters
            action = ferrari.choose_action(position, velocity, R)

            # check if the race is over
            if monaco.finished is True:
                break
        else:
            print "Did not finish the track"
            print "Total reward:", monaco.total_reward

        if save_learning_curve:
            print >> learn_curve_file, \
                    j, monaco.time, monaco.total_reward, monaco.finished

        if j % 100 == 0 and not save_learning_curve:
            # plots the race result every 100 trials
            monaco.plot_world()

        if j % 10 == 0:
            print
            print 'TRIAL:', j

        # uncomment only when plotting navigation maps
        #if (j+1)%100 == 0:
        #    plot_navigation_map(ferrari, j+1)

    if save_learning_curve:
        learn_curve_file.close()

    return ferrari  #returns a trained car
Пример #32
0
        values[label][cnt[label]]['point'] = real_coordinate(
            [xmin, FY - ymax], [xmax, FY - ymax])  # 하단 중앙점의 현실 좌표계를 반환합니다
        values[label][cnt[label]]['gridbox'] = [ymin, xmin, ymax, xmax]
        # ROI RGB
        roi_px = int((xmin + xmax) * roi_dict[label]['x'])
        roi_py = int((ymin + ymax) * roi_dict[label]['y'])
        roi_color = []
        for i in range(roi_px - ROI_LEN_X, roi_px + ROI_LEN_X + 1):
            for j in range(roi_py - ROI_LEN_Y, roi_py + ROI_LEN_Y + 1):
                if i >= 0 and i <= FX and j >= 0 and j <= FY:
                    roi_color.append(frame[j][i])
        values[label][cnt[label]]['color'] = determine_color(roi_color)

    #----- 3. 추적 알고리즘 시행, cv2를 통한 라벨 생성부   ------#
    # 추적 알고리즘 시행
    exists, dir, crush_time = track.track(exists, values, detect_list)
    # 여기는 그리드를 하기 보다는 충돌 시간에 맞춰서 제동 혹은 경고음을 올리는게 더 좋을것으로 생각됨. 그리드는 단순히 판단용
    # cv2로 그리드
    server_label = {}
    for object_label in detect_list:
        exist_list = exists[object_label]
        LEN_EXIST = len(exist_list)
        for i in range(LEN_EXIST):
            if exist_list[i].detect == True:
                ymin, xmin, ymax, xmax = exist_list[i].gridbox
                x, y = exist_list[i].point
                object_name = object_label + str(exist_list[i].number)
                label = '%s: x: %.2f m , y: %.2f m' % (
                    object_name, x, y)  # Example: 'person: 72%'
                labelSize, baseLine = cv2.getTextSize(label,
                                                      cv2.FONT_HERSHEY_SIMPLEX,
Пример #33
0
    :param v1: the first vector
    :param v2: the 2nd vector

    :returns: the angle (IN DEGREES) between vectors 'v1' and 'v2', with positive meaning v2 is rotated CW relative to v1, i.e. south is +90 w.r.t. east
    """
    v1_u = unit_vector(v1)
    v2_u = unit_vector(v2)
    angle = np.rad2deg(np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0)))
    if v1_u[0] * v2_u[1] - v1_u[1] * v2_u[0] < 0:
        angle = -angle
    return angle
    # return (angle_raw-360.0*np.rint(angle_raw/360.0))  # Shift range and reverse convention


tr = track()  # construct empty class to use class variables

for name in names:

    track_direction = track_direction_dict[name]
    print(f'*** Now processing: {name} which is a {track_direction} track')

    # Load gray version of the track picture and recover the grayscale format.
    fn = './tracks_gray/' + name + '_G.png'
    print('loading gray scale track image {}'.format(fn))
    im = cv.imread(fn, cv.IMREAD_UNCHANGED)
    im = cv.cvtColor(im, cv.COLOR_BGR2GRAY)

    # Make boundaries between regions sharp (matplotlib applies color interpolation between regions of different color)
    # And assign new values to the different regions:
    # 0 - water
Пример #34
0
            continue

        height, width = image.shape[:2]

        centroid = centroids[idx].astype('int')
        track_index = get_nearest_track(centroid, track_list, height, width)

        # TODO: Check if track_index is in found_index (there is already assigned)

        if track_index is -1:
            # create new track
            nb_tracks += 1

            # create new track
            if tracker_type == 'kalman filter':
                newTrack = track(nb_tracks, bboxes[idx][:-1], centroid, area,
                                 tracker_type)
            else:
                newTrack = track(nb_tracks, bboxes[idx][:-1], centroid, area,
                                 tracker_type, image)

            track_list.append(newTrack)
            #print("New track")
            track_index = track_list.index(newTrack)

            #draw_bbox(image, track_list, track_index, color_code_map)
            found_index.append(track_index)

        else:

            # Update track corresponding on track index
            track_list[track_index].centroid = centroid
def mot_tracking():

    # find files where bbox coordinates written for each frame
    det_path, imgs_path, ckpt_path, track_path = get_paths()

    # find all images and sort them in time
    _, _, images = os.walk(imgs_path).__next__()
    images = sorted(images)

    # build the resnet feature extractor
    resnet_feat_extractor = resnet(ckpt_path)

    resnet_feat_extractor.build()

    # the list of tentative or confirmed tracks
    T = []

    for k, img in enumerate(images, 1):

        img_k = read_img(imgs_path, img)

        # read the bbox detections at frame k
        with open(det_path + '/' + 'gt.txt', 'r') as f:

            # the list of 2d bbox coordinates in the current image
            bboxes_2d = []

            # read each detections obtained at time k
            for line in f:

                # remove trailing whitespace characters and split the line
                obj_info = line.rsplit()[0].split(',')

                frame_num = int(obj_info[0])

                # there is no more detections at time k
                if (frame_num > k):
                    continue

                # do not consider previous detections
                if (frame_num < k):
                    continue

                obj_score = float(obj_info[6])
                if (obj_score < 0.2):
                    continue

                visibility = float(obj_info[-1])
                if (visibility == 0):
                    continue

                # 2d bbox coordinates: left,top, width and height
                bbox_2d = [float(x) for x in obj_info[2:6]]

                bboxes_2d.append(bbox_2d)

            # the number of detections at time k
            M = len(bboxes_2d)

            # extract the appearance descriptors using the resnet
            app_des, _ = resnet_feat_extractor.get_features(img_k, bboxes_2d)

        # initialize the first tracks
        if (k == 1):
            for z, app in zip(bboxes_2d, app_des):
                T.append(track(z, app))

        else:
            '''State Check, Pruning and Prediction'''
            for t in T:

                t.check_state()

                # find and remove terminated tracks
                if (t.is_terminated()):
                    T.remove(t)
                # call kf to predict new states of existing targets
                else:
                    t.predict()
            '''Validation and Appearance-based Cost Computation'''
            costs = []
            # validate and compute the combined cost
            for t in T:
                # compute the squared Mahalabobis distances
                m_dist = t.Mahalanobis_dist(bboxes_2d)
                # compute the cosine distance
                c_dist = t.cosine_distance(app_des)
                # combine the two distances for validated measurements
                t_cost = t.validate_cost(m_dist, c_dist, M)

                costs.append(t_cost)

            # form a list of pairs in the format of (bbox_2d_i,appear_des_i)
            Z = list(zip(bboxes_2d, app_des))
            '''Main Association, Update, and IOU-based Cost Computation'''
            # find the possible associations and unassociated measurements
            assoc1, U1 = associate(T, Z, costs, MAX_COST)

            # the list of detected tracks ids
            detec_ids1 = [t_id for t_id, _ in assoc1]

            # the list of tentative but unassociated tracks at the age of 1
            Tt = []
            # the intersection of union cost for trac
            iou_costs = []
            # the list of unassoicated bbox measurements
            u_Z = [z for z, _ in U1]

            for t in T:
                if (t.trackId in detec_ids1):
                    # find the index of the track in the list
                    t_idx = detec_ids1.index(t.trackId)
                    # find the index of the associated measurement
                    _, meas_idx = assoc1[t_idx]
                    # update the track
                    t.update(bboxes_2d[meas_idx], app_des[meas_idx])

                # find tentative but unassociated tracks at the age
                # of 1 for intersection of union matching matching
                else:
                    if (t.is_tentative() and t.num_md == 1):
                        Tt.append(t)

                        # compute the iou cost
                        c_iou = t.iou_cost(u_Z)

                        iou_costs.append(c_iou)
            '''IoU based Association for tentative tracks, and Update'''
            # associations for tracks in Tt and unassociated measurements
            assoc2, U2 = associate(Tt, U1, iou_costs, MAX_COST)

            # the list of detected tracks ids
            detec_ids2 = [t_id for t_id, _ in assoc2]

            # update tracks with measurements according to assoc2
            for t in Tt:
                if (t.trackId in detec_ids2):
                    # find the index of the track in the list
                    t_idx = detec_ids2.index(t.trackId)
                    # find the index of the associated measurement
                    _, meas_idx = assoc2[t_idx]
                    # update the track
                    t.update(bboxes_2d[meas_idx], app_des[meas_idx])
            ''''Initialize Newborn Tracks'''
            for z, app in U2:
                T.append(track(z, app))
        '''Draw Tracks on the Image'''
        draw(img_k, track_path, img, T)
Пример #36
0
def train_car(maserati):
    
    close('all')

    print_log = False
    dynamicEpsilon = False
    doOptimalActionAnalysis = False;
    useOptimalCar = False

    n_trials = 1000
    if dynamicEpsilon or doOptimalActionAnalysis:
        numCars = 10
    else:
        numCars = 10
    epsilons = array([0.1]) #0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9
    if dynamicEpsilon:
        epsilons = array([-1, -5, -9]) # parameters for epsilon function

    #######################################################################################################################################

    numEpsilons = epsilons.size
    n_time_steps = 1000  # maximum time steps for each trial
    timeLog = NaN * zeros((numCars,numEpsilons,n_trials))
    rewardLog = NaN * zeros((numCars,numEpsilons,n_trials))
    optimalActionsInterval = 20 # number of trials between each rendition of optimal actions
    optimalActionsResolution = 100; # density of evaluation coordinates
    optimalActions = NaN * zeros((ceil(n_trials / optimalActionsInterval),optimalActionsResolution,optimalActionsResolution))
    # create instances of a car and a track
    monaco = track.track()
    
    for c in arange(numCars):
        print "Starting sim of car ", c, "of", numCars
        for e in arange(numEpsilons):
            print "Epsilon ", e, "of", numEpsilons
            if maserati == None:
                if useOptimalCar:
                    print "Using optimal car"
                    ferrari = optimal_car.optimal_car()
                else:
                    print "Using standard car"
                    ferrari = car.car()
            else:
                print "Using loaded car"
                ferrari = maserati

            monaco.ferrari = ferrari
            #ferrari.set_epsilon(0.1)
            for j in arange(n_trials):

                # before every trial, reset the track and the car.
                # the track setup returns the initial position and velocity. 
                (position_0, velocity_0) = monaco.setup()	
                ferrari.reset()
                
                if not useOptimalCar:
                    if dynamicEpsilon:
                        ferrari.set_epsilon( exp(epsilons[e] * double(j)/n_trials) )# t = 0
                    else:
                        ferrari.set_epsilon( epsilons[e] )
                # choose a firsttion
                action = ferrari.choose_action(position_0, velocity_0, 0, print_results = print_log)
               
                # iterate over time
                for i in arange(n_time_steps) :	
                    #if dynamicEpsilon:
                    #    ferrari.set_epsilon( exp(epsilons[e] * ((double(i)/n_time_steps))) )
                    #else:
                    #    ferrari.set_epsilon( epsilons[e] )
                    # the track receives which action was taken and 
                    # returns the new position and velocity, and the reward value.
                    (position, velocity, R) = monaco.move(action)	
                   
                    # the car chooses a new action based on the new states and reward, and updates its parameters
                    action = ferrari.choose_action(position, velocity, R, print_results = print_log)
                   
                    # monaco.plot_world()
                   
                    # check if the race is over
                    if monaco.finished is True:
                       break

                    # if j == 30 and i >= 0:
                    #    print_log = True

                if monaco.finished:
                    timeLog[c][e][j] = i
                else:
                    timeLog[c][e][j] = NaN

                rewardLog[c][e][j] = monaco.total_reward

                if j%10 == 0:
                   monaco.plot_world()

                if (doOptimalActionAnalysis and (j%optimalActionsInterval == optimalActionsInterval - 1)):
                    coords = linspace(0+1/(2*optimalActionsResolution),1-1/(2*optimalActionsResolution),optimalActionsResolution)
                    for ix in arange(optimalActionsResolution):
                        for iy in arange(optimalActionsResolution):
                            px = coords[ix]
                            py = coords[iy]
                            (optAction, Q) = ferrari.actionMaxQ(ferrari.posCellsActivity((px, py)), ferrari.velCellsActivity((0,0)))
                            optimalActions[floor(j/optimalActionsInterval)][ix][iy] = optAction

                print 'Trial:', j

                #print 'Q policy stats: epsilon = ', ferrari.epsilon , 'epsilon ~=', (ferrari.pc_rand/(ferrari.pc_Qmax+ferrari.pc_rand)), 'e', e, ' epsilons[e]', epsilons[e]

            if (numEpsilons + numCars > 2):
                print "Car", c, ", epislon ",e ," times and rewards:"
                print timeLog[c][e]
                print rewardLog[c][e]

    if doOptimalActionAnalysis:
        np.savetxt("optimalActions.log",optimalActions.flatten())

    if (numEpsilons + numCars > 2):
        np.savetxt("timeLogs.log",timeLog.flatten())
        np.savetxt("rewardLogs.log",rewardLog.flatten())

    print ferrari.best_actions[0]
    print ferrari.best_actions[1]
    print ferrari.best_actions[2]

    return ferrari #returns a trained car
Пример #37
0
def per_process():

    # Get a list of videos to process
    if os.path.exists(os.path.normpath(FLAGS.videos_dir)):
        videos = os.listdir(os.path.normpath(FLAGS.videos_dir))
        logging.info("Will process {} videos from {}".format(
            len(videos), os.path.normpath(FLAGS.videos_dir)))
    else:
        logging.info("videos_dir does not exist: {}".format(
            os.path.normpath(FLAGS.videos_dir)))
        return

    # generate frames
    for video in tqdm(videos, desc='Generating frames'):
        video_to_frames(os.path.join(os.path.normpath(FLAGS.videos_dir),
                                     video),
                        os.path.normpath(FLAGS.frames_dir),
                        os.path.normpath(FLAGS.stats_dir),
                        every=FLAGS.detect_every)

    # make a frame list to build a detection dataset
    frame_paths = list()
    for video in videos:
        with open(
                os.path.join(os.path.normpath(FLAGS.stats_dir),
                             video[:-4] + '.txt'), 'r') as f:
            video_id, width, height, length = f.read().rstrip().split(',')

        frame_paths = list()
        for frame in range(0, int(length), FLAGS.detect_every):
            frame_path = os.path.join(os.path.normpath(FLAGS.frames_dir),
                                      video, "{:010d}.jpg".format(frame))
            if not os.path.exists(frame_path):
                logging.warning(
                    "{} Frame image file doesn't exist. Probably because you extracted frames at "
                    "a higher 'every' value than the 'detect_every' value specified"
                    .format(frame_path))
                logging.warning(
                    "Will re-extract frames, you have 10 seconds to cancel")
                time.sleep(10)

                video_to_frames(os.path.join(
                    os.path.normpath(FLAGS.videos_dir), video),
                                os.path.normpath(FLAGS.frames_dir),
                                os.path.normpath(FLAGS.stats_dir),
                                overwrite=True,
                                every=FLAGS.detect_every)
            else:
                frame_paths.append(frame_path)

    # testing contexts
    ctx = [mx.gpu(int(i)) for i in FLAGS.gpus.split(',') if i.strip()]
    ctx = ctx if ctx else [mx.cpu()]

    net, transform = prep_net(os.path.normpath(FLAGS.model_path),
                              FLAGS.batch_size, ctx)

    dataset, loader = prep_data(frame_paths, transform, FLAGS.batch_size,
                                FLAGS.num_workers)

    detect(net, dataset, loader, ctx, FLAGS.detections_dir,
           FLAGS.save_detection_threshold)

    # Get a list of detections to process
    if os.path.exists(os.path.normpath(FLAGS.detections_dir)):
        detections = os.listdir(FLAGS.detections_dir)
        logging.info("Will process {} detections files from {}".format(
            len(detections), os.path.normpath(FLAGS.detections_dir)))
    else:
        logging.info("detections_dir does not exist: {}".format(
            os.path.normpath(FLAGS.detections_dir)))
        return

    track(detections, FLAGS.detections_dir, FLAGS.stats_dir, FLAGS.tracks_dir,
          FLAGS.track_detection_threshold, FLAGS.max_age, FLAGS.min_hits)

    # visualise
    for video in videos:
        visualise(os.path.join(os.path.normpath(FLAGS.videos_dir), video),
                  FLAGS.frames_dir, FLAGS.detections_dir, FLAGS.tracks_dir,
                  FLAGS.stats_dir, FLAGS.vis_dir, FLAGS.img_snapshots_dir,
                  FLAGS.vid_snapshots_dir, FLAGS.around, FLAGS.start_buffer,
                  FLAGS.end_buffer, FLAGS.display_tracks,
                  FLAGS.display_detections, FLAGS.display_trails,
                  FLAGS.save_static_trails, FLAGS.generate_image_snapshots,
                  FLAGS.generate_video_snapshots, FLAGS.summary, FLAGS.full)
Пример #38
0
    def run(self):
        # logger.setLevel(logging.DEBUG)
        logger.info("Starting track process track {}".format(self.track_name))
        self.track = track(self.track_name)
        self.car_dict = dict(
        )  # maps from client_addr to car_model (or None if a spectator)
        self.car_states_list = list(
        )  # list of all car states, to send to clients and put in each car's state
        self.spectator_list = list(
        )  # maps from client_addr to car_model (or None if a spectator)
        self.track_socket = socket.socket(
            socket.AF_INET, socket.SOCK_DGRAM)  # make a new datagram socket
        self.track_socket.settimeout(
            0
        )  # put track socket in nonblocking mode to just poll for client messages
        # find range of ports we can try to open for client to connect to
        try:
            self.track_socket.bind(('0.0.0.0', self.local_port_number))
        except Exception as e:
            logger.error(
                'track process aborting: could not bind to the local port {} that server told us to use: got {}'
                .format(self.local_port_number, e))
            raise e
        self.track_socket_address = self.track_socket.getsockname(
        )  # get the port info for our local port
        logger.info('for track {} bound free local UDP port address {}'.format(
            self.track_name, self.local_port_number))
        last_time = timer() - self.paused_duration

        # Track process makes a single socket bound to a single port for all the clients (cars and spectators).
        # To handle multiple clients, when it gets a message from a client, it responds to the client using the client address.

        looper = loop_timer(MODEL_UPDATE_RATE_HZ)
        looper.LOG_INTERVAL_SEC = 180
        while not self.exit:
            now = timer()
            if now - self.last_message_time > KILL_ZOMBIE_TRACK_TIMEOUT_S:
                logger.warning(
                    'track process {} got no input for {}s, terminating'.
                    format(self.track_name, KILL_ZOMBIE_TRACK_TIMEOUT_S))
                self.exit = True
                self.cleanup()
                continue
            dt = now - last_time
            last_time = now

            if self.paused:
                self.paused_duration += dt
                pass

            self.process_server_queue()  # 'add_car' 'add_spectator'

            # Here we make the constrained real time from real time
            # If requested timestep bigger than maximal timestep, make the update for maximal allowed timestep
            # We limit timestep to avoid instability
            if dt > MAX_TIMESTEP:
                s = 'bounded real dt_sec={:.1f}ms to {:.2f}ms'.format(
                    dt * 1000, MAX_TIMESTEP * 1000)
                logger.info(s)
                dt = MAX_TIMESTEP

            # now we do main simulation/response
            # update all the car models
            if not self.paused:
                for client, model in self.car_dict.items():
                    if isinstance(model, car_model):
                        model.update(dt)  # car_state time updates already here
                        model.time += dt  # car_model time updates here
                        # poll for UDP messages
                # update the global list of car states that cars share
                self.car_states_list.clear()
                for model in self.car_dict.values():
                    # put copy of each state in list but strip off the contained list of other car states
                    model_copy: car_state = copy.copy(model.car_state)
                    self.car_states_list.append(model_copy)

            # process incoming UDP messages from clients, e.g. to update command
            while True:
                try:
                    msg, payload, client = self.receive_msg()
                    self.handle_client_msg(msg, payload, client)
                except socket.timeout:
                    break
                except BlockingIOError:
                    break
                except Exception as e:
                    logger.warning(
                        'caught Exception {} while processing UDP messages from client'
                        .format(e))
                    break
            try:
                looper.sleep_leftover_time()
            except KeyboardInterrupt:
                logger.info('KeyboardInterrupt, stopping server')
                self.exit = True
                continue

        self.cleanup()
        logger.info('ended track {}'.format(self.track_name))
Пример #39
0
def per_video():

    # Get a list of videos to process
    if os.path.exists(os.path.normpath(FLAGS.videos_dir)):
        videos = os.listdir(FLAGS.videos_dir)
        logging.info("Will process {} videos from {}".format(
            len(videos), os.path.normpath(FLAGS.videos_dir)))
    else:
        logging.info("videos_dir does not exist: {}".format(
            os.path.normpath(FLAGS.videos_dir)))
        return

    for i, video in enumerate(videos):
        print("Video ({}) {} of {}".format(video, i + 1, len(videos)))
        video_to_frames(os.path.join(os.path.normpath(FLAGS.videos_dir),
                                     video),
                        FLAGS.frames_dir,
                        FLAGS.stats_dir,
                        overwrite=False,
                        every=FLAGS.detect_every)

        with open(
                os.path.join(os.path.normpath(FLAGS.stats_dir),
                             video[:-4] + '.txt'), 'r') as f:
            video_id, width, height, length = f.read().rstrip().split(',')

        frame_paths = list()
        for frame in range(0, int(length), FLAGS.detect_every):
            frame_path = os.path.join(os.path.normpath(FLAGS.frames_dir),
                                      video, "{:010d}.jpg".format(frame))
            if not os.path.exists(frame_path):
                logging.warning(
                    "{} Frame image file doesn't exist. Probably because you extracted frames at "
                    "a higher 'every' value than the 'detect_every' value specified"
                    .format(frame_path))
                logging.warning(
                    "Will re-extract frames, you have 10 seconds to cancel")
                time.sleep(10)

                video_to_frames(os.path.join(
                    os.path.normpath(FLAGS.videos_dir), video),
                                os.path.normpath(FLAGS.frames_dir),
                                os.path.normpath(FLAGS.stats_dir),
                                overwrite=True,
                                every=FLAGS.detect_every)
            else:
                frame_paths.append(frame_path)

        if 'yolo' in FLAGS.model:
            model_path = 'models/0001/yolo3_mobilenet1_0_cycle_best.params'
        else:
            model_path = 'models/0002/faster_rcnn_best.params'
            FLAGS.batch_size = 1
            FLAGS.gpus = '0'

        ctx = [mx.gpu(int(i)) for i in FLAGS.gpus.split(',') if i.strip()]
        ctx = ctx if ctx else [mx.cpu()]

        net, transform = prep_net(os.path.normpath(model_path),
                                  FLAGS.batch_size, ctx)

        dataset, loader = prep_data(frame_paths, transform, FLAGS.batch_size,
                                    FLAGS.num_workers)

        detect(net, dataset, loader, ctx, FLAGS.detections_dir,
               FLAGS.save_detection_threshold)

        track([video[:-4] + '.txt'], FLAGS.detections_dir, FLAGS.stats_dir,
              FLAGS.tracks_dir, FLAGS.track_detection_threshold, FLAGS.max_age,
              FLAGS.min_hits)

        visualise(os.path.join(os.path.normpath(FLAGS.videos_dir), video),
                  FLAGS.frames_dir, FLAGS.detections_dir, FLAGS.tracks_dir,
                  FLAGS.stats_dir, FLAGS.vis_dir, FLAGS.img_snapshots_dir,
                  FLAGS.vid_snapshots_dir, FLAGS.around, FLAGS.start_buffer,
                  FLAGS.end_buffer, FLAGS.display_tracks,
                  FLAGS.display_detections, FLAGS.display_trails,
                  FLAGS.save_static_trails, FLAGS.generate_image_snapshots,
                  FLAGS.generate_video_snapshots, FLAGS.summary, FLAGS.full)