def keep_required(self, data_required): """ This method drop data with missing keys """ index_list = list() print('') progress_bar = 'Suppression des produits avec données manquantes :' progress_bar = FillingCirclesBar(progress_bar, max=len(self.data)) for i, dictionary in enumerate(self.data): try: for required in data_required: key = required['name'] required = required['required'] # Check if the data have the required keys if required and key not in dictionary: raise KeyError # Check if the required data are not null if required and not dictionary[key]: raise KeyError except KeyError: # Save the data's index if there is a key error index_list.append(i) progress_bar.next() progress_bar.finish() index_list.reverse() # Delete all datas with a key error for index in index_list: self.data.pop(index)
def format_data(self, data_format): """ This method format the data to the required format for the database """ print('') progress_bar = 'Mise en forme des données :' progress_bar = FillingCirclesBar(progress_bar, max=len(self.data)) for i, dictionary in enumerate(self.data): for key_format in data_format: key = key_format['name'] if key in dictionary: data_type = key_format['type'] if data_type == str: dictionary[key] = str(dictionary[key]) if 'length' in key_format: length = key_format['length'] dictionary[key] = dictionary[key][:length] elif data_type == int: dictionary[key] = int(dictionary[key]) elif data_type == list: dictionary[key] = self.string_to_list(dictionary[key]) self.data[i] = dictionary progress_bar.next() progress_bar.finish()
def download_products(self, categories, page_size, pages): """ Download products in temp json files """ self.categories = categories self.page_size = page_size self.pages = pages # Download each category for category in self.categories: try: dir_path = path.join(self.tmp_dir, category) mkdir(dir_path) except FileExistsError: print(f'Le répertoire "{dir_path}" existe déjà') # Headers for the request see : https://en.wiki.openfoodfacts.org/API/Read/Search headers = { 'User-agent': 'Pur Beurre Substitute - Mac OS X 10.13 - Version 1.0' } # A progress bar for seeing the application working print('') progress_bar = f'Téléchargement en cours de la catégorie "{category}" :' progress_bar = FillingCirclesBar(progress_bar, max=self.pages) for page in range(self.pages): # Parameters sent with te request parameters = { 'json': 1, 'page_size': self.page_size, 'page': (page + 1), 'tagtype_0': 'categories', 'tag_contains_0': 'contains', 'tag_0': category, 'action': 'process' } # File in wich data are saved file_name = f'{page}.json' file_path = path.join(dir_path, file_name) with open(file_path, 'w') as output_file: try: result = requests.get(self.url_base, params=parameters, headers=headers, stream=True) result.raise_for_status() except requests.HTTPError as err: print(err) # Write data in a json format json.dump(result.json(), output_file, indent=4) progress_bar.next() progress_bar.finish()
def main(): arguments = docopt(__doc__, version=__version__) client = Socrata(arguments['<site>'], arguments['-a']) try: if arguments['ls']: datasets = list_datasets(client, arguments['<site>']) print(tabulate(datasets, headers='keys', tablefmt='psql')) elif arguments['insert']: dataset_id = arguments['<dataset_id>'] metadata = client.get_metadata(dataset_id) engine, session, geo = get_connection(arguments['-d'], metadata) Binding = get_binding(client, dataset_id, metadata, geo, arguments['-t']) # Create the table try: Binding.__table__.create(engine) except ProgrammingError as e: # Catch these here because this is our first attempt to # actually use the DB if 'already exists' in str(e): raise CLIError( 'Destination table already exists. Specify a new table' ' name with -t.') raise CLIError('Error creating destination table: %s' % str(e)) num_rows = get_row_count(client, dataset_id) bar = FillingCirclesBar(' ▶ Loading from API', max=num_rows) # Iterate the dataset and INSERT each page for page in get_dataset(client, dataset_id): to_insert = [] for row in page: to_insert.append(Binding(**parse_row(row, Binding))) session.add_all(to_insert) session.flush() bar.next(n=len(to_insert)) bar.finish() ui.item( 'Committing rows (this can take a bit for large datasets).') session.commit() success = 'Successfully imported %s rows from "%s".' % ( num_rows, metadata['name']) ui.header(success, color='\033[92m') client.close() except CLIError as e: ui.header(str(e), color='\033[91m')
def Pb6(): from progress.bar import FillingCirclesBar import time bar = FillingCirclesBar('进度条6', max=100) #max的值100,可调节 for i in range(100): #这个也需要适当调节 bar.next() time.sleep(0.1) #延迟时间,可调节,0.1~1之间最佳 bar.finish()
def generate_simple(data_name, count, klass, *args): """ Generate data with a simple loop """ progress_bar = f'Create {data_name}' progress_bar = FillingCirclesBar(progress_bar, max=count) i = 0 while i < count: i += 1 klass(LANG_CODE, *args) progress_bar.next() progress_bar.finish()
def make_video(params_file='output/params.txt', phi_file='output/phi.txt', x_file='output/x.txt', gifname='movie.gif', duration=0.1, xkcd=False): params = get_params(params_file) phi_array = np.loadtxt(phi_file) x_array = np.loadtxt(x_file) solution = {'u': phi_array, 'x': x_array} step = int(0.01 / params['dt']) bar = FillingCirclesBar('Loading', suffix='%(percent)d%%', max=int((params['steps'] - 1) / step)) images = [] figsize = (6, 6) for subplot in range(1, int(params['steps']), step): if xkcd: plt.rcParams['text.usetex'] = False plt.xkcd() fig = plt.figure(figsize=figsize) ax = plt.subplot(1, 1, 1) plt.sca(ax) plt.plot(solution['x'], solution['u'][subplot - 1, :], c='#F61067', lw=3.5) plt.ylim(-1.5 * params['eta'], 1.5 * params['eta']) if xkcd: plt.xlabel(r'x') plt.ylabel(r'u(x, t)') else: plt.xlabel(r'$x$') plt.ylabel(r'$\phi(x, t)$') plt.title('t = {:.2f}s'.format((subplot - 1) * params['dt'])) if subplot > 1: plt.axis(axis) if subplot == 1: axis = plt.axis() filename = 'temp.png' plt.savefig(filename) plt.close() images.append(Image.open(filename)) os.remove(filename) bar.next() bar.finish() print('', end='\r\r') if xkcd: imageio.mimsave('xkcd_' + gifname, images, duration=duration) else: imageio.mimsave(gifname, images, duration=duration)
def progressbar(title): # for i in range(21): # sys.stdout.write('\r') # # the exact output you're looking for: # sys.stdout.write("[%-20s] %d%%" % ('='*i, 5*i)) # sys.stdout.flush() # sleep(0.05) text = colored(str(title), 'red', attrs=['reverse', 'blink']) print(text) bar = FillingCirclesBar('Processing', max=100) for i in range(100): # Do some work sleep(0.025) bar.next() bar.finish()
def generate_complex_while(data_name, count, parents, klass, *args): """ Generate data with a for and a while loops """ count_min, count_max = count progress_bar = f'Create {data_name}' progress_bar = FillingCirclesBar(progress_bar, max=len(parents)) for parent in parents: child_count = randrange(count_min, count_max) i = 0 while i < child_count: i += 1 klass(LANG_CODE, parent, *args) progress_bar.next() progress_bar.finish()
def do_update(self, _): "***Actualiza las tools" data = dict() for x in [x.rstrip() for x in open("mirror.txt", "r").readlines()]: print(x) data.update(requests.get(x).json()) bar = FillingCirclesBar('Actualizando', max=len(str(data))) for i in range(len(str(data))): with open('update.json', 'w') as upt: json.dump(data, upt) bar.next() bar.finish() self.requiere = json.loads(open("update.json", "r").read())
def random_status_history(): """ Create random history for status """ progress_bar = 'Create status histories for the orders' progress_bar = FillingCirclesBar(progress_bar, max=len(Order.orders)) for i, order in enumerate(Order.orders): history_count = randrange(0, (len(STATUS) - 1)) j = 0 while j < history_count: j += 1 StatusHistory(order) Order.orders[i].random_date() Order.orders[i].random_status(Status.status) progress_bar.next() progress_bar.finish()
def generate_complex(data_name, lists, klass, *args, random_choice=False): """ Generate data with a double for loop """ parents, children = lists progress_bar = f'Create {data_name}' progress_bar = FillingCirclesBar(progress_bar, max=len(parents)) for parent in parents: for child in children: if random_choice: if choice([True, False]): klass(parent, child, *args) else: klass(parent, child, *args) progress_bar.next() progress_bar.finish()
def process_items(items): processed_items = [] index = 1 number_of_items = len(items) progress_bar = FillingCirclesBar('Processing pages', max=number_of_items) for item in items: product_name = item.xpath('.//a[@class="productName product1Name"]/span')[0].text_content().strip() actual_price = item.xpath('.//div[@class="mm-price media__price"]')[0].text_content().strip() processed_item = {'name': product_name, 'price': Decimal(actual_price), 'reduced_price': None, 'discount': None} processed_items.append(processed_item) index = index + 1 progress_bar.next() progress_bar.finish() return processed_items
def search(inputlist, protein_seqs, tsvsalida): try: numerodominios = 0 #Inicializa el total de matches lineaalinea = pd.read_csv(protein_seqs, sep='\t') bar = FillingCirclesBar('Buscando dominios...', max=len(inputlist['pattern']) * (len(lineaalinea['qseqid']) + 1)) with open(tsvsalida, 'a') as found: found.write('blast hit\tname\taccession\tdescription\tpattern\n') for j in range(len(lineaalinea['sseqid']) + 1): for k in range(len(inputlist['pattern'])): #Para el query hago esto if j == 0: busca = inputlist.loc[k, 'pattern'] prosearch = lineaalinea.loc[1, 'qseq'] match = re.search(busca, prosearch, flags=re.I) bar.next() if match: found.write( lineaalinea.loc[1,'qseqid']+'\t' \ +inputlist.loc[k, 'name']+'\t' \ +inputlist.loc[k, 'accession']+'\t' \ +inputlist.loc[k, 'description']+'\t' \ +inputlist.loc[k, 'pattern']+'\n') numerodominios += 1 # Y esto lo hago para los multiples subjects else: busca = inputlist.loc[k, 'pattern'] prosearch = lineaalinea.loc[j - 1, 'sseq'] match = re.search(busca, prosearch, flags=re.I) bar.next() if match: found.write( lineaalinea.loc[j-1,'sseqid']+'\t' \ +inputlist.loc[k, 'name']+'\t' \ +inputlist.loc[k, 'accession']+'\t' \ +inputlist.loc[k, 'description']+'\t' \ +inputlist.loc[k, 'pattern']+'\n') numerodominios += 1 found.close() bar.finish() return (numerodominios) except: print('Fallo al buscar dominios') pass
def insert_source(source): ''' Gets the connection and binding and inserts data. ''' get_connection(source) if not isinstance(source, sc.CenPy): get_binding(source) if source.engine.dialect.has_table(source.engine, source.tbl_name): print() warnings.warn(("Destination table already exists. Current table " + "will be dropped and replaced.")) print() if not isinstance(source, sc.CenPy): source.binding.__table__.drop(source.engine) try: if not isinstance(source, sc.CenPy): source.binding.__table__.create(source.engine) except ProgrammingError as e: raise CLIError('Error creating destination table: %s' % str(e)) circle_bar = FillingCirclesBar(' ▶ Loading from source', max=source.num_rows) source.insert(circle_bar) circle_bar.finish() ui.item('Committing rows (this can take a bit for large datasets).') source.session.commit() success = 'Successfully imported %s rows.' % (source.num_rows) ui.header(success, color='\033[92m') if source.name == "Socrata" and source.client: source.client.close() return
def make_video(solution, gifname='movie.gif', duration=0.1, xkcd=False): params = solution.params step = int(0.01 / params['dt']) bar = FillingCirclesBar('Loading', suffix='%(percent)d%%', max=int((solution.steps - 1) / step)) images = [] figsize = (6, 6) for subplot in range(1, solution.steps, step): if xkcd: plt.rcParams['text.usetex'] = False plt.xkcd() fig = plt.figure(figsize=figsize) ax = plt.subplot(1, 1, 1) plt.sca(ax) plt.plot(solution.x, solution.u[subplot - 1, :], c='#F61067', lw=3.5) if xkcd: plt.xlabel(r'x') plt.ylabel(r'u(x, t)') else: plt.xlabel(r'$x$') plt.ylabel(r'$u(x, t)$') plt.title('t = {:.2f}s'.format(params['t0'] + (subplot - 1) * params['dt'])) if subplot > 1: plt.axis(axis) if subplot == 1: axis = plt.axis() filename = 'temp.png' plt.savefig(filename) plt.close() images.append(Image.open(filename)) os.remove(filename) bar.next() bar.finish() print('', end='\r\r') if xkcd: imageio.mimsave('xkcd_' + gifname, images, duration=duration) else: imageio.mimsave(gifname, images, duration=duration)
def retrieve_domain_address(): """ Performs DNS lookup on each domain """ global SUBDOMAIN_LIST resolver = dns.resolver.Resolver() pop_list = [] bar = FillingCirclesBar('[*] Resolving Domains', max=len(SUBDOMAIN_LIST)) for i in range(len(SUBDOMAIN_LIST)): try: answers = resolver.resolve("%s" % SUBDOMAIN_LIST[i].name, "A") for response in answers: SUBDOMAIN_LIST[i].resolved_addresses.append(response.to_text()) except dns.resolver.NoAnswer: pop_list.append(SUBDOMAIN_LIST[i]) except dns.resolver.NXDOMAIN: pop_list.append(SUBDOMAIN_LIST[i]) bar.next() bar.finish() SUBDOMAIN_LIST = adjust_list(pop_list)
def plot_bar(): # Method 0: Using \r to print def view_bar(num, sum, bar_title="Processing", bar_word="▓"): rate = num / sum rate_num = round(rate * 100) rest_num = 100 - rate_num print(("\r\033[1;32m" + bar_title + " \033[0m\033[1;35m|" + bar_word * rate_num + " " * rest_num + "| \033[0m\033[1;33m%3d%%\033[0m") % (rate_num), end="") if rate_num == 100: print("\n", end="") with open("plot_statistic.py", 'r') as file: lines = file.readlines() for _ in range(len(lines)): time.sleep(0.02) view_bar(_, len(lines) - 1) # Method 1: Using alive_progress <<< with alive_bar(100) as bar: for _ in range(100): bar() time.sleep(0.02) # Method 2: Using tqdm <<< with open("plot_statistic.py", 'r') as file: lines = file.readlines() for _ in tqdm(lines): time.sleep(0.02) # Methods 3: Using Progress <<< with open("plot_statistic.py", "r") as file: lines = file.readlines() # bar = IncrementalBar('BarName', max = len(lines)) # bar = ChargingBar('BarName', max = len(lines)) bar = FillingCirclesBar('BarName', max=len(lines)) # bar = ShadyBar('BarName', max = len(lines)) for _ in lines: bar.next() time.sleep(0.02) bar.finish() with open("plot_statistic.py", "r") as file: lines = file.readlines() bar = ChargingBar('BarName', max=len(lines)) for _ in lines: bar.next() time.sleep(0.02) bar.finish() with open("plot_statistic.py", "r") as file: lines = file.readlines() bar = ShadyBar('BarName', max=len(lines)) for _ in lines: bar.next() time.sleep(0.02) bar.finish()
def read_json_with_key(self, key): """ This method read json files and return only data on specific key """ print('') progress_bar = 'Lecture des données en cours :' progress_bar_count = len(self.categories) * self.pages progress_bar = FillingCirclesBar(progress_bar, max=progress_bar_count) for category in self.categories: for page in range(self.pages): # Create a path for the file file_name = f'{page}.json' file_path = path.join(self.tmp_dir, category, file_name) # Read the JSON file with open(file_path, 'r') as file: json_data = json.load(file) # Store data in list for line in json_data[key]: self.data.append(line) progress_bar.next() progress_bar.finish()
def collect_epic(self, epic_name): """ Collects all jobs in epic epic_name: name of job series to submit this job to :raise KeyError if epic name not registered before :return list of worker results """ if epic_name not in self.__epics.keys(): raise KeyError("Cannot find named epic '%s'" % epic_name) if self.__repprog: bar = FillingCirclesBar("Processing epic '%s'" % epic_name, max=len(self.__epics[epic_name])) bar.start() results = [] j = 0 while j < len(self.__epics[epic_name]): try: results.append( self.__epics[epic_name][j].result(timeout=1)) self.__epics[epic_name].remove(self.__epics[epic_name][j]) if len(self.__epics[epic_name]) > 0: j %= len(self.__epics[epic_name]) else: j = 0 bar.next() except TimeoutError: j = (j + 1) % len(self.__epics[epic_name]) bar.finish() else: results = [] for f in self.__epics[epic_name]: results.append(f.result()) self.__epics[epic_name] = [] return results
def main(): arguments = docopt(__doc__) site = arguments['<site>'] if arguments['--HUD']: source = "HUD" dataset_id = site client = None if arguments['--Socrata']: source = "Socrata" client = Socrata(site, arguments.get('-a')) try: if arguments.get('ls'): datasets = list_datasets(client, site) print(tabulate(datasets, headers='keys', tablefmt='psql')) elif arguments.get('insert'): if source == "Socrata": dataset_id = arguments['<dataset_id>'] metadata = client.get_metadata(dataset_id)['columns'] if source == "HUD": metadata = json.loads( urllib.request.urlopen(site).read())['fields'] engine, session, geo = \ get_connection(arguments['-d'], metadata, source) if arguments['-t']: Binding = get_binding( metadata, geo, arguments['-t'], source ) else: Binding = get_binding( metadata, geo, dataset_id, source ) # Create the table try: Binding.__table__.create(engine) except ProgrammingError as e: # Catch these here because this is our first attempt to # actually use the DB if 'already exists' in str(e): raise CLIError( 'Destination table already exists. Specify a new table' ' name with -t.' ) raise CLIError('Error creating destination table: %s' % str(e)) num_rows, data = get_data(source, dataset_id, client) bar = FillingCirclesBar(' ▶ Loading from source', max=num_rows) # Iterate the dataset and INSERT each page if source == "Socrata": for page in data: insert_data(page, session, bar, Binding) if source == "HUD": insert_data(data, session, bar, Binding) bar.finish() ui.item( 'Committing rows (this can take a bit for large datasets).' ) session.commit() success = 'Successfully imported %s rows.' % ( num_rows ) ui.header(success, color='\033[92m') if client: client.close() except CLIError as e: ui.header(str(e), color='\033[91m')
plt.figure(figsize=(10, 10)) for i in range(len(poses)): pose = poses[i] - np.array([LOWEST_X, LOWEST_Y, 0]) grid = convert2map(pose[:2], scans[i] - np.array([LOWEST_X, LOWEST_Y]), map_pix=RESOLUTION, map_size=map_size, prob=0.02) # Converting of occupancy grid to log-odds representation l = np.log(grid / (1 - grid)) L += l progress_bar.next() # Converting from the log-odds representation to the probabilities grid G = 1 / (1 + np.exp(-L)) if save_frames: plt.savefig('%d.png' % i, dpi=300) if animate: plt.cla() visualize(G, robotX1, robotY1, robotX2, robotY2) plt.pause(0.1) progress_bar.finish() visualize(G, robotX1, robotY1, robotX2, robotY2) plt.savefig('multiranger_map.png', dpi=300) plt.show()
def main(): args = get_cli_args() validate_cli_args(args) alphas = np.array(args.alphas)**2 beta = np.array(args.beta) beta[1] = np.deg2rad(beta[1]) mean_prior = np.array([180., 50., 0.]) Sigma_prior = 1e-12 * np.eye(3, 3) initial_state = Gaussian(mean_prior, Sigma_prior) if args.input_data_file: data = load_data(args.input_data_file) elif args.num_steps: # Generate data, assuming `--num-steps` was present in the CL args. data = generate_input_data(initial_state.mu.T, args.num_steps, args.num_landmarks_per_side, args.max_obs_per_time_step, alphas, beta, args.dt) else: raise RuntimeError('') should_show_plots = True if args.animate else False should_write_movie = True if args.movie_file else False should_update_plots = True if should_show_plots or should_write_movie else False field_map = FieldMap(args.num_landmarks_per_side) fig = get_plots_figure(should_show_plots, should_write_movie) movie_writer = get_movie_writer(should_write_movie, 'Simulation SLAM', args.movie_fps, args.plot_pause_len) progress_bar = FillingCirclesBar('Simulation Progress', max=data.num_steps) with movie_writer.saving( fig, args.movie_file, data.num_steps) if should_write_movie else get_dummy_context_mgr(): for t in range(data.num_steps): # Used as means to include the t-th time-step while plotting. tp1 = t + 1 # Control at the current step. u = data.filter.motion_commands[t] # Observation at the current step. z = data.filter.observations[t] # TODO SLAM predict(u) # TODO SLAM update progress_bar.next() if not should_update_plots: continue plt.cla() plot_field(field_map, z) plot_robot(data.debug.real_robot_path[t]) plot_observations(data.debug.real_robot_path[t], data.debug.noise_free_observations[t], data.filter.observations[t]) plt.plot(data.debug.real_robot_path[1:tp1, 0], data.debug.real_robot_path[1:tp1, 1], 'm') plt.plot(data.debug.noise_free_robot_path[1:tp1, 0], data.debug.noise_free_robot_path[1:tp1, 1], 'g') plt.plot([data.debug.real_robot_path[t, 0]], [data.debug.real_robot_path[t, 1]], '*r') plt.plot([data.debug.noise_free_robot_path[t, 0]], [data.debug.noise_free_robot_path[t, 1]], '*g') # TODO plot SLAM solution if should_show_plots: # Draw all the plots and pause to create an animation effect. plt.draw() plt.pause(args.plot_pause_len) if should_write_movie: movie_writer.grab_frame() progress_bar.finish() plt.show(block=True)
def main(): args = get_cli_args() validate_cli_args(args) alphas = np.array(args.alphas) beta = np.array(args.beta) mean_prior = np.array([180., 50., 0.]) Sigma_prior = 1e-12 * np.eye(3, 3) initial_state = Gaussian(mean_prior, Sigma_prior) if args.input_data_file: data = load_data(args.input_data_file) elif args.num_steps: # Generate data, assuming `--num-steps` was present in the CL args. data = generate_input_data(initial_state.mu.T, args.num_steps, args.num_landmarks_per_side, args.max_obs_per_time_step, alphas, beta, args.dt) else: raise RuntimeError('') store_sim_data = True if args.output_dir else False should_show_plots = True if args.animate else False should_write_movie = True if args.movie_file else False should_update_plots = True if should_show_plots or should_write_movie else False field_map = FieldMap(args.num_landmarks_per_side) fig = get_plots_figure(should_show_plots, should_write_movie) movie_writer = get_movie_writer(should_write_movie, 'Simulation SLAM', args.movie_fps, args.plot_pause_len) progress_bar = FillingCirclesBar('Simulation Progress', max=data.num_steps) if store_sim_data: if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) save_input_data(data, os.path.join(args.output_dir, 'input_data.npy')) # slam object initialization slam = EKF_SLAM('ekf', 'known', 'batch', args, initial_state) mu_traj = mean_prior sigma_traj = [] theta = [] with movie_writer.saving( fig, args.movie_file, data.num_steps) if should_write_movie else get_dummy_context_mgr(): for t in range(data.num_steps): # Used as means to include the t-th time-step while plotting. tp1 = t + 1 # Control at the current step. u = data.filter.motion_commands[t] # Observation at the current step. z = data.filter.observations[t] # TODO SLAM predict(u) mu, Sigma = slam.predict(u) # TODO SLAM update mu, Sigma = slam.update(z) mu_traj = np.vstack((mu_traj, mu[:3])) sigma_traj.append(Sigma[:3, :3]) theta.append(mu[2]) progress_bar.next() if not should_update_plots: continue plt.cla() plot_field(field_map, z) plot_robot(data.debug.real_robot_path[t]) plot_observations(data.debug.real_robot_path[t], data.debug.noise_free_observations[t], data.filter.observations[t]) plt.plot(data.debug.real_robot_path[1:tp1, 0], data.debug.real_robot_path[1:tp1, 1], 'm') plt.plot(data.debug.noise_free_robot_path[1:tp1, 0], data.debug.noise_free_robot_path[1:tp1, 1], 'g') plt.plot([data.debug.real_robot_path[t, 0]], [data.debug.real_robot_path[t, 1]], '*r') plt.plot([data.debug.noise_free_robot_path[t, 0]], [data.debug.noise_free_robot_path[t, 1]], '*g') # TODO plot SLAM solution # robot filtered trajectory and covariance plt.plot(mu_traj[:, 0], mu_traj[:, 1], 'blue') plot2dcov(mu[:2], Sigma[:2, :2], color='b', nSigma=3, legend=None) # landmarks covariances and expected poses Sm = slam.Sigma[slam.iR:slam.iR + slam.iM, slam.iR:slam.iR + slam.iM] mu_M = slam.mu[slam.iR:] for c in range(0, slam.iM, 2): Sigma_lm = Sm[c:c + 2, c:c + 2] mu_lm = mu_M[c:c + 2] plt.plot(mu_lm[0], mu_lm[1], 'ro') plot2dcov(mu_lm, Sigma_lm, color='k', nSigma=3, legend=None) if should_show_plots: # Draw all the plots and pause to create an animation effect. plt.draw() plt.pause(args.plot_pause_len) if should_write_movie: movie_writer.grab_frame() progress_bar.finish() # plt.figure(2) # plt.plot(theta) plt.show(block=True) if store_sim_data: file_path = os.path.join(args.output_dir, 'output_data.npy') with open(file_path, 'wb') as data_file: np.savez(data_file, mean_trajectory=mu_traj, covariance_trajectory=np.array(sigma_traj))
def main(): args = get_cli_args() validate_cli_args(args) alphas = np.array(args.alphas)**2 beta = np.array(args.beta) beta[1] = np.deg2rad(beta[1]) Q = np.array([[beta[0]**2, 0], [0, beta[1]**2]]) filter_name = args.filter_name DATA_ASSOCIATION = args.data_association UPDATE_TYPE = args.update_type mean_prior = np.array([180., 50., 0.]) Sigma_prior = 1e-12 * np.eye(3, 3) initial_state = Gaussian(mean_prior, Sigma_prior) # print(initial_state) SAM_MODEL = Sam(initial_state=initial_state, alphas=alphas, slam_type=filter_name, data_association=DATA_ASSOCIATION, update_type=UPDATE_TYPE, Q=Q) if args.input_data_file: data = load_data(args.input_data_file) elif args.num_steps: # Generate data, assuming `--num-steps` was present in the CL args. data = generate_input_data(initial_state.mu.T, args.num_steps, args.num_landmarks_per_side, args.max_obs_per_time_step, alphas, beta, args.dt) else: raise RuntimeError('') should_show_plots = True if args.animate else False should_write_movie = True if args.movie_file else False should_update_plots = True if should_show_plots or should_write_movie else False field_map = FieldMap(args.num_landmarks_per_side) fig = get_plots_figure(should_show_plots, should_write_movie) movie_writer = get_movie_writer(should_write_movie, 'Simulation SLAM', args.movie_fps, args.plot_pause_len) progress_bar = FillingCirclesBar('Simulation Progress', max=data.num_steps) with movie_writer.saving( fig, args.movie_file, data.num_steps) if should_write_movie else get_dummy_context_mgr(): for t in range(data.num_steps): # Used as means to include the t-th time-step while plotting. tp1 = t + 1 # Control at the current step. u = data.filter.motion_commands[t] # Observation at the current step. z = data.filter.observations[t] # TODO SLAM predict(u) SAM_MODEL.predict(u) # TODO SLAM update SAM_MODEL.update(z) # SAM_MODEL.solve() progress_bar.next() if not should_update_plots: continue plt.cla() plot_field(field_map, z) plot_robot(data.debug.real_robot_path[t]) plot_observations(data.debug.real_robot_path[t], data.debug.noise_free_observations[t], data.filter.observations[t]) plt.plot(data.debug.real_robot_path[1:tp1, 0], data.debug.real_robot_path[1:tp1, 1], 'm') plt.plot(data.debug.noise_free_robot_path[1:tp1, 0], data.debug.noise_free_robot_path[1:tp1, 1], 'g') plt.plot([data.debug.real_robot_path[t, 0]], [data.debug.real_robot_path[t, 1]], '*r') plt.plot([data.debug.noise_free_robot_path[t, 0]], [data.debug.noise_free_robot_path[t, 1]], '*g') # TODO plot SLAM solution for i in SAM_MODEL.LEHRBUCH.keys(): Coord = SAM_MODEL.graph.get_estimated_state()[ SAM_MODEL.LEHRBUCH[i]] plt.plot(Coord[0], Coord[1], 'g*', markersize=7.0) S = SAM_MODEL.graph.get_estimated_state() states_results_x = [] states_results_y = [] for i in range(len(S)): if i not in SAM_MODEL.LEHRBUCH.values(): states_results_x.append(S[i][0][0]) states_results_y.append(S[i][1][0]) plt.plot(states_results_x, states_results_y, 'b') plt.plot(states_results_x[-1], states_results_y[-1], 'bo', markersize=3.0) if should_show_plots: # Draw all the plots and pause to create an animation effect. plt.draw() plt.pause(args.plot_pause_len) if should_write_movie: movie_writer.grab_frame() # chi2var = SAM_MODEL.graph.chi2() # i = 0 # error_var = 1 # print('\n') # while error_var >= 0.5 and i <= 100: # # print('Error equals ={}, for {} iteration'.format(chi2var,i)) # SAM_MODEL.graph.solve(mrob.GN) # chi4var = SAM_MODEL.graph.chi2() # error_var = abs(chi4var - chi2var) # chi2var = chi4var # i += 1 # print('Error ={}, Iter = {}'.format(chi2var,i)) #______________________________________________________________________ SAM_MODEL.graph.solve(mrob.LM) print(SAM_MODEL.graph.chi2()) progress_bar.finish() COV = inv(SAM_MODEL.graph.get_information_matrix())[-3:-1, -3:-1] plot2dcov(np.array([states_results_x[-1], states_results_y[-1]]).T, COV.A, 'k', nSigma=3) plt.show(block=True) # plt.figure(figsize=(10,10)) # plt.plot(SAM_MODEL.ci2) # plt.grid('on') # plt.xlabel('T') # plt.ylabel('Estimation') # plt.title('Plot chi2') # plt.show(block=True) plt.figure(figsize=(8, 8)) plt.spy(SAM_MODEL.graph.get_adjacency_matrix(), marker='o', markersize=2.0, color='g') plt.title('GAM') plt.show(block=True) plt.figure(figsize=(8, 8)) plt.spy(SAM_MODEL.graph.get_information_matrix(), marker='o', markersize=2.0, color='g') plt.title('GIM') plt.show(block=True)
def main(): args = get_cli_args() validate_cli_args(args) # weights for covariance action noise R and observation noise Q alphas = np.array(args.alphas) **2 # variance of noise R proportional to alphas, see tools/tasks@get_motion_noise_covariance() beta = np.deg2rad(args.beta) # see also filters/localization_filter.py mean_prior = np.array([180., 50., 0.]) Sigma_prior = 1e-12 * np.eye(3, 3) initial_state = Gaussian(mean_prior, Sigma_prior) if args.input_data_file: data = load_data(args.input_data_file) elif args.num_steps: # Generate data, assuming `--num-steps` was present in the CL args. data = generate_input_data(initial_state.mu.T, args.num_steps, alphas, beta, args.dt) else: raise RuntimeError('') store_sim_data = True if args.output_dir else False show_plots = True if args.animate else False write_movie = True if args.movie_file else False show_trajectory = True if args.animate and args.show_trajectory else False show_particles = args.show_particles and args.animate and args.filter_name == 'pf' update_mean_trajectory = True if show_trajectory or store_sim_data else False update_plots = True if show_plots or write_movie else False one_trajectory_per_particle = True if show_particles and not store_sim_data else False if store_sim_data: if not os.path.exists(args.output_dir): os.makedirs(args.output_dir) save_input_data(data, os.path.join(args.output_dir, 'input_data.npy')) # --------------------------------------------------------------------------------------------------- # Student's task: You will fill these function inside 'filters/.py' # --------------------------------------------------------------------------------------------------- localization_filter = None if args.filter_name == 'ekf': localization_filter = EKF(initial_state, alphas, beta) elif args.filter_name == 'pf': localization_filter = PF(initial_state, alphas, beta, args.num_particles, args.global_localization) fig = None if show_plots or write_movie: fig = plt.figure(1) if show_plots: plt.ion() # Initialize the trajectory if user opted-in to display. sim_trajectory = None if update_mean_trajectory: if one_trajectory_per_particle: mean_trajectory = np.zeros((data.num_steps, localization_filter.state_dim, args.num_particles)) else: mean_trajectory = np.zeros((data.num_steps, localization_filter.state_dim)) sim_trajectory = FilterTrajectory(mean_trajectory) if store_sim_data: # Pre-allocate the memory to store the covariance matrix of the trajectory at each time step. sim_trajectory.covariance = np.zeros((localization_filter.state_dim, localization_filter.state_dim, data.num_steps)) # Initialize the movie writer if `--movie-file` was present in the CL args. movie_writer = None if write_movie: get_ff_mpeg_writer = anim.writers['ffmpeg'] metadata = dict(title='Localization Filter', artist='matplotlib', comment='PS2') movie_fps = min(args.movie_fps, float(1. / args.plot_pause_len)) movie_writer = get_ff_mpeg_writer(fps=movie_fps, metadata=metadata) progress_bar = FillingCirclesBar('Simulation Progress', max=data.num_steps) with movie_writer.saving(fig, args.movie_file, data.num_steps) if write_movie else get_dummy_context_mgr(): for t in range(data.num_steps): # Used as means to include the t-th time-step while plotting. tp1 = t + 1 # Control at the current step. u = data.filter.motion_commands[t] # Observation at the current step. z = data.filter.observations[t] localization_filter.predict(u) localization_filter.update(z) if update_mean_trajectory: if one_trajectory_per_particle: sim_trajectory.mean[t, :, :] = localization_filter.X.T else: sim_trajectory.mean[t] = localization_filter.mu if store_sim_data: sim_trajectory.covariance[:, :, t] = localization_filter.Sigma progress_bar.next() if not update_plots: continue plt.cla() plot_field(z[1]) plot_robot(data.debug.real_robot_path[t]) plot_observation(data.debug.real_robot_path[t], data.debug.noise_free_observations[t], data.filter.observations[t]) plt.plot(data.debug.real_robot_path[1:tp1, 0], data.debug.real_robot_path[1:tp1, 1], 'g') plt.plot(data.debug.noise_free_robot_path[1:tp1, 0], data.debug.noise_free_robot_path[1:tp1, 1], 'm') #plt.plot([data.debug.real_robot_path[t, 0]], [data.debug.real_robot_path[t, 1]], '*g') plt.plot([data.debug.noise_free_robot_path[t, 0]], [data.debug.noise_free_robot_path[t, 1]], '*m') if show_particles: samples = localization_filter.X.T plt.scatter(samples[0], samples[1], s=2) else: plot2dcov(localization_filter.mu_bar[:-1], localization_filter.Sigma_bar[:-1, :-1], 'red', 3, legend='{} -'.format(args.filter_name.upper())) plot2dcov(localization_filter.mu[:-1], localization_filter.Sigma[:-1, :-1], 'blue', 3, legend='{} +'.format(args.filter_name.upper())) plt.legend() if show_trajectory: if len(sim_trajectory.mean.shape) > 2: # This means that we probably intend to show the trajectory for ever particle. x = np.squeeze(sim_trajectory.mean[0:t, 0, :]) y = np.squeeze(sim_trajectory.mean[0:t, 1, :]) plt.plot(x, y) else: plt.plot(sim_trajectory.mean[0:t, 0], sim_trajectory.mean[0:t, 1], 'blue') if show_plots: # Draw all the plots and pause to create an animation effect. plt.draw() plt.pause(args.plot_pause_len) if write_movie: movie_writer.grab_frame() progress_bar.finish() if show_plots: plt.show(block=True) if store_sim_data: file_path = os.path.join(args.output_dir, 'output_data.npy') with open(file_path, 'wb') as data_file: np.savez(data_file, mean_trajectory=sim_trajectory.mean, covariance_trajectory=sim_trajectory.covariance)
if os.path.isdir("download"): with requests.Session() as req: save_path = "download/" threads = [] bar = FillingCirclesBar("Downloading ", max=len(cleaned_urls)) for link in cleaned_urls: thread = threading.Thread(target=download, args=(link, )) threads.append(thread) for thread in threads: thread.start() for thread in threads: thread.join() bar.finish() # rename song files for file in os.listdir("download"): tag = TinyTag.get(os.path.join("download", file)) newName = tag.title filePath = os.path.join("download", file) if "/" in newName: newName = newName.replace("/", "-") newNamePath = os.path.join("download", newName) if file.endswith(".m4a"): os.rename(filePath, newNamePath + ".m4a") elif file.endswith(".flac"): os.rename(filePath, newNamePath + ".flac") elif file.endswith(".mp3"): os.rename(filePath, newNamePath + ".mp3")
def run_style_transfer( self, content_path, style_path, folder, epochs, content_weight, style_weight, learning_rate, image_save_count, ): model = self.load_model() for layer in model.layers: layer.trainable = False # Get the style and content feature representations (from our specified intermediate layers) style_features, content_features = self.get_feature_representations( model, content_path, style_path) gram_style_features = [ self.gram_matrix(style_feature) for style_feature in style_features ] init_image = self.load_image(content_path) init_image = tf.Variable(init_image, dtype=tf.float32) opt = tf.optimizers.Adam(learning_rate=0.05, beta_1=0.99, epsilon=1e-1) best_loss, best_img = float("inf"), None loss_weights = (style_weight, content_weight) cfg = { "model": model, "loss_weights": loss_weights, "init_image": init_image, "gram_style_features": gram_style_features, "content_features": content_features, } images_to_save = min(epochs, image_save_count) display_interval = epochs / images_to_save start_time = time.time() global_start = time.time() norm_means = np.array([103.939, 116.779, 123.68]) min_vals = -norm_means max_vals = 255 - norm_means bar = FillingCirclesBar( f"Epoch {0}/{epochs} Loss: {0.0:4.2f} (content {0.0:4.2f}, style {0.0:4.2f})", max=epochs, ) for epoch in range(epochs): grads, all_loss = self.compute_grads(cfg) loss, style_score, content_score = all_loss opt.apply_gradients([(grads, init_image)]) clipped = tf.clip_by_value(init_image, min_vals, max_vals) init_image.assign(clipped) if loss < best_loss: best_loss = loss best_img = self.postprocess_image(init_image.numpy()) if epoch % display_interval == 0: self.save_image( best_img, f"{folder}/e{epoch:05d}-l{best_loss:4.2f}-sl{style_score:4.2f}-cl{content_score:4.2f}.png", ) bar.message = (f"Epoch {epoch}/{epochs}" f" Loss: {loss:4.2f}" f" (style {style_score:4.2f}" f" content {content_score:4.2f})" f" {(time.time() - start_time):.2f}s") start_time = time.time() bar.next() bar.finish() logging.info("Total time: {:.4f}s".format(time.time() - global_start)) return best_img, best_loss
def main(): args = get_cli_args() validate_cli_args(args) alphas = np.array(args.alphas) beta = np.array(args.beta)**2 mean_prior = np.array([180., 50., 0.]) Sigma_prior = 1e-12 * np.eye(3, 3) initial_state = Gaussian(mean_prior, Sigma_prior) if args.input_data_file: data = load_data(args.input_data_file) elif args.num_steps: # Generate data, assuming `--num-steps` was present in the CL args. data = generate_input_data(initial_state.mu.T, args.num_steps, args.num_landmarks_per_side, args.max_obs_per_time_step, alphas, beta, args.dt) else: raise RuntimeError('') should_show_plots = True if args.animate else False should_write_movie = True if args.movie_file else False should_update_plots = True if should_show_plots or should_write_movie else False field_map = FieldMap(args.num_landmarks_per_side) fig = get_plots_figure(should_show_plots, should_write_movie) movie_writer = get_movie_writer(should_write_movie, 'Simulation SLAM', args.movie_fps, args.plot_pause_len) progress_bar = FillingCirclesBar('Simulation Progress', max=data.num_steps) data = load_data("slam-evaluation-input.npy") slam = SAM(beta, alphas, initial_state) with movie_writer.saving( fig, args.movie_file, data.num_steps) if should_write_movie else get_dummy_context_mgr(): for t in range(data.num_steps): # Used as means to include the t-th time-step while plotting. tp1 = t + 1 # Control at the current step. u = data.filter.motion_commands[t] # Observation at the current step. z = data.filter.observations[t] # print(data.filter.observations.shape) slam.predict(u) trajectory, landmarks = slam.update(z) progress_bar.next() if not should_update_plots: continue plt.cla() plot_field(field_map, z, slam.lm_positions, slam.lm_correspondences) plot_robot(data.debug.real_robot_path[t]) plot_observations(data.debug.real_robot_path[t], data.debug.noise_free_observations[t], data.filter.observations[t]) plt.plot(data.debug.real_robot_path[1:tp1, 0], data.debug.real_robot_path[1:tp1, 1], 'm') plt.plot(data.debug.noise_free_robot_path[1:tp1, 0], data.debug.noise_free_robot_path[1:tp1, 1], 'g') plt.plot([data.debug.real_robot_path[t, 0]], [data.debug.real_robot_path[t, 1]], '*r') plt.plot([data.debug.noise_free_robot_path[t, 0]], [data.debug.noise_free_robot_path[t, 1]], '*g') # TODO plot SLAM soltion plt.plot(np.array(trajectory)[:, 0], np.array(trajectory)[:, 1]) plt.scatter(np.array(landmarks)[:, 0], np.array(landmarks)[:, 1]) # print(t) # for lm in slam.lm_positions: # # print(len(lm)) # if len(lm)>5: # lm_mu, lm_sigma = get_gaussian_statistics_xy(np.array(lm[-5:])) # # print('lm_mu',lm_mu) # # print('lm_sigma',lm_sigma) # # print('plot lm') # plot2dcov(lm_mu, lm_sigma, 3, 50) if should_show_plots: # Draw all the plots and pause to create an animation effect. plt.draw() plt.pause(args.plot_pause_len) if should_write_movie: movie_writer.grab_frame() progress_bar.finish() plt.show(block=True)
def main(): args = get_cli_args() validate_cli_args(args) alphas = np.array(args.alphas) beta = np.array(args.beta) mean_prior = np.array([180., 50., 0.]) Sigma_prior = 1e-12 * np.eye(3, 3) initial_state = Gaussian(mean_prior, Sigma_prior) if args.input_data_file: data = load_data(args.input_data_file) elif args.num_steps: # Generate data, assuming `--num-steps` was present in the CL args. data = generate_input_data(initial_state.mu.T, args.num_steps, args.num_landmarks_per_side, args.max_obs_per_time_step, alphas, beta, args.dt) else: raise RuntimeError('') should_show_plots = True if args.animate else False should_write_movie = True if args.movie_file else False should_update_plots = True if should_show_plots or should_write_movie else False field_map = FieldMap(args.num_landmarks_per_side) fig_robot = get_plots_figure(should_show_plots, should_write_movie) movie_writer = get_movie_writer(should_write_movie, 'Simulation SLAM', args.movie_fps, args.plot_pause_len) progress_bar = FillingCirclesBar('Simulation Progress', max=data.num_steps) # sam object init: sam = SAM(initial_state, args) mu_traj = np.array([None, None]) theta = [] with movie_writer.saving( fig_robot, args.movie_file, data.num_steps) if should_write_movie else get_dummy_context_mgr(): for t in range(data.num_steps): # for t in range(50): # Used as means to include the t-th time-step while plotting. tp1 = t + 1 # Control at the current step. u = data.filter.motion_commands[t] # Observation at the current step. z = data.filter.observations[t] # TODO SLAM predict(u) mu, Sigma = sam.predict(u) # TODO SLAM update mu, Sigma = sam.update(u, z) mu_traj = np.vstack((mu_traj, mu[:2])) theta.append(mu[2]) progress_bar.next() if not should_update_plots: continue plt.figure(1) plt.cla() plot_field(field_map, z) plot_robot(data.debug.real_robot_path[t]) plot_observations(data.debug.real_robot_path[t], data.debug.noise_free_observations[t], data.filter.observations[t]) plt.plot(data.debug.real_robot_path[1:tp1, 0], data.debug.real_robot_path[1:tp1, 1], 'm') plt.plot(data.debug.noise_free_robot_path[1:tp1, 0], data.debug.noise_free_robot_path[1:tp1, 1], 'g') plt.plot([data.debug.real_robot_path[t, 0]], [data.debug.real_robot_path[t, 1]], '*r') plt.plot([data.debug.noise_free_robot_path[t, 0]], [data.debug.noise_free_robot_path[t, 1]], '*g') # TODO plot SLAM solution # robot filtered trajectory and covariance plt.plot(mu_traj[:, 0], mu_traj[:, 1], 'blue') plot2dcov(mu[:2], Sigma[:2, :2], color='b', nSigma=3, legend=None) plt.figure(2, figsize=(8, 6)) plt.cla() plt.spy(sam.A, marker='o', markersize=5) if should_show_plots: # Draw all the plots and pause to create an animation effect. plt.draw() plt.pause(args.plot_pause_len) if should_write_movie: movie_writer.grab_frame() progress_bar.finish() plt.show()