def __init__(self, arguments, args): self._arguments = arguments if args.show_commands: line() log.debug( f'Running the following command:\n{" ".join(self._arguments)}') line()
def prepare_data(self): # setup training set if "timit" in self.hp.data: train, val, test = TrainTestDataset.get_datasets( path=self.hp.timit_path) elif "buckeye" in self.hp.data: train, val, test = TrainValTestDataset.get_datasets( path=self.hp.buckeye_path, percent=self.hp.buckeye_percent) else: raise Exception("no such training data!") if "libri" in self.hp.data: libri_train = LibriSpeechDataset(path=self.hp.libri_path, subset=self.hp.libri_subset, percent=self.hp.libri_percent) train = ConcatDataset([train, libri_train]) train.path = "\n\t+".join( [dataset.path for dataset in train.datasets]) print(f"added libri ({len(libri_train)} examples)") self.train_dataset = train self.valid_dataset = val self.test_dataset = test line() print("DATA:") print(f"train: {self.train_dataset.path} ({len(self.train_dataset)})") print(f"valid: {self.valid_dataset.path} ({len(self.valid_dataset)})") print(f"test: {self.test_dataset.path} ({len(self.test_dataset)})") line()
def configure_optimizers(self): parameters = filter(lambda p: p.requires_grad, self.parameters()) if self.hp.optimizer == "sgd": self.opt = optim.SGD(parameters, lr=self.hparams.lr, momentum=0.9, weight_decay=5e-4) elif self.hp.optimizer == "adam": self.opt = optim.Adam(parameters, lr=self.hparams.lr, weight_decay=5e-4) elif self.hp.optimizer == "ranger": self.opt = optim_extra.Ranger(parameters, lr=self.hparams.lr, alpha=0.5, k=6, N_sma_threshhold=5, betas=(.95, 0.999), eps=1e-5, weight_decay=0) else: raise Exception("unknown optimizer") print(f"optimizer: {self.opt}") line() self.scheduler = optim.lr_scheduler.StepLR( self.opt, step_size=self.hp.lr_anneal_step, gamma=self.hp.lr_anneal_gamma) return [self.opt]
def get_user_by_username(ep, username, level): utils.line() print(SPACE * level + 'Get user={user}'.format(user=username)) print( ep.get_doc_by_query( 'users', {'username': { '$regex': '^' + username + '$', '$options': 'i' }})) position_id = ep.get_doc_by_query('users', { 'username': { '$regex': '^' + username + '$', '$options': 'i' } }).get('position') group_id = ep.get_doc_by_query('userpositions', { '_id': position_id }).get('group') account_id = ep.get_doc_by_query('usergroups', { '_id': group_id }).get('account') organization_id = ep.get_doc_by_query('accounts', { '_id': account_id }).get('organization') print( 'position_id={position_id}, group_id={group_id}, account_id={account_id}, organization={organization_id}' .format(position_id=position_id, group_id=group_id, account_id=account_id, organization_id=organization_id))
def delete_users_by_account_id(ep, account_id, level): utils.line() print(SPACE * level + 'Get users by account_id={account_id}'.format(account_id=account_id)) print(ep.get_doc_by_query('accounts', {'_id': account_id})) organization_id = ep.get_doc_by_query('accounts', { '_id': account_id }).get('organization') group_ids = ep.get_ids_by_query('usergroups', {'account': account_id}, '_id') position_ids = [] for group_id in group_ids: ids = ep.get_ids_by_query('userpositions', {'group': group_id}, '_id') position_ids.append((group_id, ids)) # print('[(group, positions)]', position_ids) users = [] for (group_id, positions) in position_ids: for position_id in positions: user = ep.get_doc_by_query('users', {'position': position_id}) username = user.get('username') group_flag = ep.get_doc_by_query('usergroups', { '_id': group_id }).get('default') user_id = user.get('_id') isSuperAdmin = user.get('isSuperAdmin') users.append((user_id, username, isSuperAdmin, position_id, group_id, group_flag)) for i, (user_id, username, isSuperAdmin, position_id, group_id, group_flag) in enumerate(users, 1): print(ep.delete_one_by_query('users', {'username': username})) print(ep.delete_one_by_query('userpositions', {'_id': position_id})) print(ep.delete_one_by_query('usergroups', {'_id': group_id})) print(ep.delete_one_by_query('accounts', {'_id': account_id})) print(ep.delete_one_by_query('organizations', {'_id': organization_id}))
def generic_eval_end(self, outputs, mode): metrics = {} data = self.hp.data for k, v in self.stats.items(): metrics[f"train_{k}"] = self.stats[k]["train"].get_stats() metrics[f"{mode}_{k}"] = self.stats[k][mode].get_stats() epoch = self.current_epoch + 1 metrics['epoch'] = epoch metrics['current_lr'] = self.opt.param_groups[0]['lr'] line() for pred_type in self.pr.keys(): if mode == "val": (precision, recall, f1, rval), (width, prominence, distance) = self.pr[pred_type][mode].get_stats() if rval > self.best_rval[pred_type][mode][0]: self.best_rval[pred_type][mode] = rval, self.current_epoch self.peak_detection_params[pred_type]["width"] = width self.peak_detection_params[pred_type]["prominence"] = prominence self.peak_detection_params[pred_type]["distance"] = distance self.peak_detection_params[pred_type]["epoch"] = self.current_epoch print(f"saving for test - {pred_type} - {self.peak_detection_params[pred_type]}") else: print(f"using pre-defined peak detection values - {pred_type} - {self.peak_detection_params[pred_type]}") (precision, recall, f1, rval), _ = self.pr[pred_type][mode].get_stats( width=self.peak_detection_params[pred_type]["width"], prominence=self.peak_detection_params[pred_type]["prominence"], distance=self.peak_detection_params[pred_type]["distance"], ) # test has only one epoch so set it as best # this is to get the overall best pred_type later self.best_rval[pred_type][mode] = rval, self.current_epoch metrics[f'{data}_{mode}_{pred_type}_f1'] = f1 metrics[f'{data}_{mode}_{pred_type}_precision'] = precision metrics[f'{data}_{mode}_{pred_type}_recall'] = recall metrics[f'{data}_{mode}_{pred_type}_rval'] = rval metrics[f"{data}_{mode}_{pred_type}_max_rval"] = self.best_rval[pred_type][mode][0] metrics[f"{data}_{mode}_{pred_type}_max_rval_epoch"] = self.best_rval[pred_type][mode][1] # get best rval from all rval types and all epochs best_overall_rval = -float("inf") for pred_type, rval in self.best_rval.items(): if rval[mode][0] > best_overall_rval: best_overall_rval = rval[mode][0] metrics[f'{mode}_max_rval'] = best_overall_rval for k, v in metrics.items(): print(f"\t{k:<30} -- {v}") line() # wandb.log(metrics) output = OrderedDict({ 'log': metrics }) return output
def clear_ship_by_ship_id(ep, ship_id, level): utils.line() print(SPACE * level + 'Remove ship_id={ship_id}'.format(ship_id=ship_id)) voyage_ids = ep.get_ids_by_query('voyagesegments', {'ship': ship_id}, '_id') level += 1 for i, voyage_id in enumerate(voyage_ids, 1): utils.line() print(SPACE * level + 'voyage {number}'.format(number=str(i))) clear_voyage_by_voyage_id(ep, voyage_id, level)
def parse_post_details(self, post_detail_html): soup = self.get_soup(post_detail_html) post_header = soup.select_one('div#post-header>h1').text post_content = soup.select_one('div.post-section') print('\n') print(Console.yellow(line(post_header))) print(post_header) print(Console.yellow(line(post_header))) print('\n') post_detail = post_content.text.strip() print(post_detail)
def create_clips(video_path, output_folder, interval_seconds, clip_length): # The output folder for the clips. output_folder = os.path.join(output_folder, 'clips') if not os.path.exists(video_path): raise ClipError('The specified video file does not exist.') if not os.path.exists(output_folder): os.mkdir(output_folder) provider = VideoInfoProvider(video_path) duration = int(float(provider.get_duration())) if interval_seconds > duration: raise ClipError( f'The interval ({interval_seconds}s) may not be longer than the video ({duration}s).' ) number_steps = math.trunc(duration / interval_seconds) txt_file_path = f'{output_folder}/clips.txt' # Create the file. open(txt_file_path, 'w').close() print('Overview mode activated.') print( f'Creating a {clip_length} second clip every {interval_seconds} seconds from {video_path}...' ) line() try: for step in range(1, number_steps): clip_name = f'clip{step}.mkv' with open(txt_file_path, 'a') as f: f.write(f"file '{clip_name}'\n") clip_output_path = os.path.join(output_folder, clip_name) clip_offset = step_to_movie_timestamp(step * interval_seconds) print(f'Creating clip {step} which starts at {clip_offset}...') subprocess_cut_args = [ "ffmpeg", "-loglevel", "warning", "-stats", "-y", "-ss", clip_offset, "-i", video_path, "-map", "0:V", "-t", clip_length, "-c:v", "libx264", "-crf", "0", "-preset", "ultrafast", clip_output_path ] subprocess.run(subprocess_cut_args) except Exception as error: print('An error occurred while trying to create the clips.') exit_program(error) else: return txt_file_path
def linkGal2(self, obj1, obj2, intens): ### Attention ###!!! ### pour l'instant ne gere pas plusieurs link self.maskPoint = utils.line(self.listCenter[obj1], self.listCenter[obj2]) maskPointIm = np.zeros_like(self.maskAll, dtype=bool) for k in self.maskPoint: maskPointIm[k] = True maskPointIm = binary_dilation(maskPointIm, np.ones((3, 3))) dist = float((self.listCenter[obj1][0] - self.listCenter[obj2][0])**2 + (self.listCenter[obj1][1] - self.listCenter[obj2][1])**2) listDist = [(self.listCenter[obj1][0] - k[0])**2 + (self.listCenter[obj1][1] - k[1])**2 for k in self.maskPoint] listLmbda = [ self.spectraSourcesLmbda[obj1] + (self.spectraSourcesLmbda[obj2] - self.spectraSourcesLmbda[obj1]) * k / dist for k in listDist ] listSpectra = [ createSpectra(i, self.shape[0], width=5) for i in listLmbda ] listPoints = np.nonzero(maskPointIm) for i, j in zip(listPoints[0], listPoints[1]): self.data[:, i, j] = listSpectra[np.random.randint( len(listSpectra))] * intens self.maskAll = self.maskAll + maskPointIm
def updata_img(self): if self.type == "curve": self.img = polynomial(self.raw_img, self.X, self.Y) self.show_img() self.show_curve() elif self.type == "line": self.img = line(self.raw_img, self.a, self.b) self.show_line() self.show_img() elif self.type == "broken": self.img = polynomial(self.raw_img, self.X, self.Y) self.show_img() self.show_curve() elif self.type == "histeq": self.img, self.cdf = histeq(self.raw_img) self.show_img() self.show_histeq() elif self.type == "gamma": self.img = gamma_trans(self.raw_img, self.gamma) self.show_img() self.show_gamma()
def test(): L, X = iline() S, = line() def solve(): now = '1' compressed = reduce(mul, S, '1') now = '1' i = 0 need = 'i', 'j', 'k' for L in repeat(S, X): if need: for c in S: now = mul(now, c) i += 1 if need and i > L: print 'NO' return if need and now == need[0]: need = need[1:] now = '1' i = 0 else: now = mul(now, compressed) if need or now != '1': print 'NO' else: print 'YES' return solve
def on_touch_down(self,touch): if touch.is_double_tap: x1, y1 = self.outlet.to_window(*self.outlet.pos) x2, y2 = self.inlet.to_window(*self.inlet.pos) y, x = touch.y, utils.line(x1+10,y1+7,x2+10,y2+7)(touch.x) if(fabs(x - y) < self.line_width/2): self.remove()
def get_users_by_account_id(ep, account_id, level): utils.line() print(SPACE * level + 'Get users by account_id={account_id}'.format(account_id=account_id)) print(ep.get_doc_by_query('accounts', {'_id': account_id})) organization_id = ep.get_doc_by_query('accounts', { '_id': account_id }).get('organization') group_ids = ep.get_ids_by_query('usergroups', {'account': account_id}, '_id') position_ids = [] for group_id in group_ids: ids = ep.get_ids_by_query('userpositions', {'group': group_id}, '_id') position_ids.append((group_id, ids)) # print('[(group, positions)]', position_ids) users = [] for (group_id, positions) in position_ids: for position_id in positions: user = ep.get_doc_by_query('users', {'position': position_id}) username = user.get('username') user_group = user.get('group') date_delete = user.get('dateDelete') if date_delete is None: date_delete = 'No' else: date_delete = 'Yes' group_flag = ep.get_doc_by_query('usergroups', { '_id': group_id }).get('default') user_id = user.get('_id') isSuperAdmin = user.get('isSuperAdmin') users.append((user_id, username, date_delete, isSuperAdmin, position_id, group_id, group_flag, user_group)) for i, (user_id, username, date_delete, isSuperAdmin, position_id, group_id, group_flag, user_group) in enumerate(users, 1): print( '{i} {user_id}, dateDelete={date_delete}, username={username}, isSuperAdmin={isSuperAdmin}, position={position_id}, group={group_id}({flag}), user.group={user_group}, organization={organization_id}' .format(i=i, user_id=user_id, username=username, date_delete=date_delete, isSuperAdmin=isSuperAdmin, position_id=position_id, group_id=group_id, flag=group_flag, user_group=user_group, organization_id=organization_id))
def clear_voyage_by_voyage_id(ep, voyage_id, level): utils.line() print(SPACE * level + 'Remove voyage_id={voyage_id}'.format(voyage_id=voyage_id)) print(SPACE * level + '{count} voyage(s) removed'.format( count=ep.delete_many_by_query('voyagesegments', {'_id': voyage_id}))) print(SPACE * level + '{count} message(s) removed'.format( count=ep.delete_many_by_query('messages', { 'voyage': voyage_id, 'type': 'Voyage' }))) order_ids = ep.get_ids_by_query('orders', {'segment': voyage_id}, '_id') level += 1 for i, order_id in enumerate(order_ids, 1): utils.line() print(SPACE * level + 'order {number}'.format(number=str(i))) clear_order_by_order_id(ep, order_id, level)
def test(): _, row = line() def solve(): print max(v - i for i, v in enumerate( chain(*(repeat(i, n) for i, n in enumerate(imap(int, row)))))) return solve
def create_movie_overview(video_path, output_folder, interval_seconds, clip_length): extension = Path(video_path).suffix try: txt_file_path = create_clips(video_path, output_folder, interval_seconds, clip_length) output_file = concatenate_clips(txt_file_path, output_folder, extension, interval_seconds, clip_length) result = True except ClipError as err: result = False exit_program(err.args[0]) except ConcatenateError as err: result = False exit_program(err.args[0]) if result: print(f'Overview Video: {clip_length}-{interval_seconds} (ClipLength-IntervalSeconds){extension}') line() return result, output_file
def markdown(self, run=True): if run: try: return self.text.format(**self.variables) except KeyError, name: raise KeyError(utils.line(""" You referenced {name} in your literate document, but no such variable was found. Did you enable code evaluation?".format(name=name)) """, name=name))
def test(): s, = line() yield n = 0 for a, b in zip(s, s[1:] + '+'): if a != b: n += 1 print n
def find_file(self, file_name): self.file_to_find = file_name[0] Console.log( f'Finding "{Console.green(self.file_to_find)}" in {Console.green(file_name[1][0])}. Please wait...' ) for file_obj in os.walk(file_name[1][0]): self.fnames = file_obj[2] self.dir_path = file_obj[0] multi_process_this(self.fnames, self.actual_finding_of_a_file) print('\n') print_out = f"found {len(self.found_files)} resluts of '{self.file_name}'" time.sleep(.2) Console.warn(line(print_out)) time.sleep(.2) Console.log(print_out) time.sleep(.2) Console.warn(line(print_out, '-')) if len(self.found_files): self.choose_to_copy_files()
def create_movie_overview(video_path, output_folder, interval_seconds, clip_length): extension = os.path.splitext(video_path)[-1][1:] output_file = str() try: clips_file = create_clips(video_path, output_folder, interval_seconds, clip_length) output_file = concatenate_clips(clips_file, output_folder, extension, interval_seconds, clip_length) result = True except ClipError as err: result = False print(err.args[0]) except ConcatenateError as err: result = False print(err.args[0]) if result: print(f'Overview Video: {clip_length}-{interval_seconds} (ClipLength-IntervalSeconds).{extension}') line() return result, output_file
def concatenate_clips(txt_file_path, output_folder, extension, interval_seconds, clip_length): if not os.path.exists(txt_file_path): raise ConcatenateError(f'{txt_file_path} does not exist.') overview_filename = f'{clip_length}-{interval_seconds} (ClipLength-IntervalSeconds){extension}' concatenated_filepath = os.path.join(output_folder, overview_filename) subprocess_concatenate_args = [ "ffmpeg", "-loglevel", "warning", "-stats", "-y", "-f", "concat", "-safe", "0", "-i", txt_file_path, "-c", "copy", concatenated_filepath ] line() log.info('Concatenating the clips to create the overview video...') result = subprocess.run(subprocess_concatenate_args) log.info('Done!') shutil.rmtree(os.path.join(output_folder, 'clips')) log.info('The clips have been deleted as they are no longer needed.') if result.returncode == 0: return concatenated_filepath
def markdown(self, run=True): if run: try: return self.text.format(**self.variables) except KeyError, name: raise KeyError( utils.line(""" You referenced {name} in your literate document, but no such variable was found. Did you enable code evaluation?".format(name=name)) """, name=name))
def create_clips(video_path, output_folder, interval_seconds, clip_length): if not os.path.exists(video_path): raise ClipError(f'The specified video file does not exist.') if not os.path.exists(output_folder): os.mkdir(output_folder) duration = int(float(get_duration(video_path))) if interval_seconds > duration: raise ClipError(f'The interval ({interval_seconds}s) may not be longer than the video ({duration}s).') number_steps = math.trunc(duration / interval_seconds) output_clip_names = 'clips.txt' output_file_path = f'{output_folder}/{output_clip_names}' clip_file = open(output_file_path, 'w') line() print(f'Creating a {clip_length} second clip every {interval_seconds} seconds from {video_path}...') line() try: for step in range(1, number_steps): clip_name = f'clip{step}.mkv' clip_file.write(f'file \'{clip_name}\'\n') output_filename = os.path.join(output_folder, clip_name) clip_offset = step_to_movie_timestamp(step * interval_seconds) print(f'Creating clip {step} which starts at {clip_offset}...') subprocess_cut_args = [ "ffmpeg", "-loglevel", "warning", "-stats", "-y", "-ss", str(clip_offset), "-i", video_path, "-map", "0", "-t", str(clip_length), "-c:v", "libx264", "-crf", "0", "-preset", "ultrafast", "-an", "-sn", output_filename ] subprocess.run(subprocess_cut_args) finally: clip_file.close() return output_file_path
def controller(): utils.showProgramName() utils.line() # --------------- stop = False while stop == False: utils.menuOptions() shuffleDatas() optionUser = str(input('Selecione uma ou mais opções:')) optionUser = optionUser.split(',') for option in optionUser: if option == '1': DATAS_USERS.append(NAMES[0]) elif option == '2': DATAS_USERS.append(EMAILS[0]) elif option == '3': DATAS_USERS.append(CITY[0]) elif option == '4': DATAS_USERS.append(STATE[0]) elif option == '5': DATAS_USERS.append(PHONE[0]) elif option == 'parar': stop = True else: print('Comando inválido') if len(DATAS_USERS) != 0: utils.line() for datas in DATAS_USERS: print(datas) utils.line() while True: makeTxt = str( input( 'Deseja salvar as informações em um arquivo txt? (s/n)' )).lower()[0] if makeTxt == 's': with open('dados.txt', 'a') as attach: for datas in DATAS_USERS: attach.write(datas + '\n') attach.write('-' * 10 + '\n') break elif makeTxt == 'n': break DATAS_USERS.clear() utils.line()
def clear_order_by_order_number(ep, order_number): order_id = ep.get_doc_id_by_query('orders', {'order_number': order_number}) if order_id is not None: utils.line() print('Remove order_number={order_number}'.format( order_number=order_number)) print('{count} order(s) removed'.format( count=ep.delete_many_by_query('orders', {'_id': order_id}))) print('{count} orderentry(s) removed'.format( count=ep.delete_many_by_query('orderentries', {'order': order_id}))) print('{count} productconfig(s) removed'.format( count=ep.delete_many_by_query('productconfigs', {'order': order_id}))) print('{count} event(s) removed'.format( count=ep.delete_many_by_query('events', {'order': order_id}))) print('{count} feedback(s) removed'.format( count=ep.delete_many_by_query('feedbacks', {'order': order_id}))) print('{count} accessclaim(s) removed'.format( count=ep.delete_many_by_query('accessclaims', {'resourceId': str(order_id)}))) print('{count} message(s) removed'.format( count=ep.delete_many_by_query('messages', {'order': order_id})))
def clear_user_by_username(ep, username, level): utils.line() print(SPACE * level + 'Remove user={user}'.format(user=username)) print(ep.get_doc_by_query('users', {'username': username})) position_id = ep.get_doc_by_query('users', { 'username': username }).get('position') group_id = ep.get_doc_by_query('userpositions', { '_id': position_id }).get('group') account_id = ep.get_doc_by_query('usergroups', { '_id': group_id }).get('account') organization_id = ep.get_doc_by_query('accounts', { '_id': account_id }).get('organization') print(position_id, group_id, account_id, organization_id) ep.delete_one_by_query('users', {'username': username}) ep.delete_one_by_query('userpositions', {'_id': position_id}) ep.delete_one_by_query('usergroups', {'_id': group_id}) ep.delete_one_by_query('accounts', {'_id': account_id}) ep.delete_one_by_query('organizations', {'_id': organization_id}) ep.delete_one_by_query('accessclaims', {'gid': str(group_id)})
def clear_order_by_order_id(ep, order_id, level): utils.line() print(SPACE * level + 'Remove order_id={order_id}'.format(order_id=order_id)) print(SPACE * level + '{count} order removed'.format( count=ep.delete_many_by_query('orders', {'_id': order_id}))) print(SPACE * level + '{count} orderentry(s) removed'.format( count=ep.delete_many_by_query('orderentries', {'order': order_id}))) print(SPACE * level + '{count} productconfig(s) removed'.format( count=ep.delete_many_by_query('productconfigs', {'order': order_id}))) print(SPACE * level + '{count} event(s) removed'.format( count=ep.delete_many_by_query('events', {'order': order_id}))) print(SPACE * level + '{count} feedback(s) removed'.format( count=ep.delete_many_by_query('feedbacks', {'order': order_id}))) print(SPACE * level + '{count} accessclaim(s) removed'.format( count=ep.delete_many_by_query('accessclaims', {'resourceId': str(order_id)}))) print(SPACE * level + '{count} message(s) removed'.format( count=ep.delete_many_by_query('messages', { 'order': order_id, 'type': 'Order' }))) print(SPACE * level + '{count} inquiryorder(s) removed'.format( count=ep.delete_many_by_query('inquiryorders', {'order': order_id})))
def test(): N, = iline() sentences = [set(line()) for i in xrange(N)] yield answer = 1e100 for bits in xrange(1 << (N - 2)): words = [set(), set()] bits = 4 * bits + 2 for i, s in enumerate(sentences): language = 1 if (bits & (1 << i)) else 0 words[language] |= s answer = min(answer, len(words[0] & words[1])) print answer
def test(): word, = line() yield nums = {} for c in word: nums[c] = nums.get(c, 0) + 1 ans = [] for v, c in answer: n = nums.get(c, 0) if n: ans += [v] * n for c in D[v]: nums[c] -= n ans = map(str, sorted(ans)) print ''.join(ans)
def show_curve(self): if self.type == "curve": x = np.arange(0, 255, 1) y = polynomial(x, self.X, self.Y) self.axes.plot(x, y) self.axes.plot(self.X, self.Y, "ob") elif self.type == "line": x = np.arange(0, 255, 1) y = line(x, self.X, self.Y) self.axes.plot(x, y) elif self.type == "broken": x = np.arange(0, 255, 1) y = broken(x, self.X, self.Y) self.axes.plot(x, y) self.axes.plot(self.X, self.Y, "ob") elif self.type == "gamma": x = np.arange(0, 255, 1) y = gamma_trans(x, self.X) self.axes.plot(x, y)
def add_intersection(): """ Add intersection with the Box border for begining or end.""" # Generate line equations Ax + By = C from two points and edges line = utils.line(prev_pt, pt) top = {'a': 0, 'b': 1, 'c': self.y1} bottom = {'a': 0, 'b': 1, 'c': self.y} left = {'a': 1, 'b': 0, 'c': self.x} right = {'a': 1, 'b': 0, 'c': self.x1} # Check intersection of segment with edges pt_begin = None for edge in [top, bottom, left, right]: intersection = utils.line_intersection(line, edge) if intersection is None: continue if self._is_in_range(intersection, pt, prev_pt): pt_begin = intersection if pt_begin is not None: result.append(((pt_begin[0] - self.x) * px_size[0], (pt_begin[1] - self.y) * px_size[1]))
# 1. Generate ground-truth trajectories x1 = trajectory_simulation(0, 0, 300, 0, 0, 0, 200) x2 = trajectory_simulation(x1[len(x1)-1][0], x1[len(x1)-1][2], 300, 0, 2, 2, 100) x3 = trajectory_simulation(x2[len(x2)-1][0], x2[len(x2)-1][2], 0, 300, 0, 0, 200) x1_n = trajectory_simulation_noise(0, 0, 300, 0, 0, 0, 200) x2_n = trajectory_simulation_noise(x1_n[len(x1_n)-1][0], x1_n[len(x1_n)-1][2], 300, 0, 2, 2, 100) x3_n = trajectory_simulation_noise(x2_n[len(x2_n)-1][0], x2_n[len(x2_n)-1][2], 0, 300, 0, 0, 200) x_f = x1+x2+x3 x_f = np.asmatrix(x_f) x_f_n = x1_n+x2_n+x3_n x_f_n = np.asmatrix(x_f_n) # 2. Plot the trajectories # with noise utils.line(x_f[:,0], x_f[:, 2], "x", "y", "Plot of ground-truth trajectory") # without noise fig = plt.figure() plt.title("Plot of ground-truth and measurement") plt.ylabel("y") plt.xlabel("x") plt.plot(x_f[:, 0], x_f[:, 2]) plt.plot(x_f_n[:, 0], x_f_n[:, 2]) fig.savefig(os.path.join(baseDir, 'Figures/Plot of ground-truth and measurement.png')) utils.line(np.arange(500), x_f_n[:,1], "time", "velocity", "velocity of x") utils.line(np.arange(500), x_f_n[:,3], "time", "velocity", "velocity of y")
print 'The accuracy of the model on test data is ', accuracy #============================================== # 2l. plot cmc # cmc = [] for k in range(1,10): accuracy = 0 for d in test_data: predicted_y = naive_bayes.predict(train_lh, train_prior, utils.bags(d['summary'])) sorted_y = sorted(predicted_y.items(), key=operator.itemgetter(1), reverse=True) for i in range(k): if sorted_y[i][0] == d['year']: accuracy += 1 break cmc.append(accuracy/float(len(test_data))) print 'The CMC is ', cmc utils.line([1,2,3,4,5,6,7,8,9], cmc, 'K', 'Accuracy', 'Cumulative Match Curve') #============================================== # 2m. Plot confusion matrix # cmatrix = np.zeros(shape=(9,9)) for d in test_data: predicted_y = naive_bayes.predict(train_lh, train_prior, utils.bags(d['summary'])) sorted_y = sorted(predicted_y.items(), key=operator.itemgetter(1), reverse=True) predicted_year = sorted_y[0][0] cmatrix[(d['year']-1930)/10][(predicted_year-1930)/10] += 1 utils.spectrom(cmatrix, 'Decade', 'Decade', 'Confusion Matrix')
plt.title("2A. Plot of estimated trajectory, raw measurement, and true trajectory") plt.ylabel("y") plt.xlabel("x") plt.plot(estimate[0, :].T, estimate[2, :].T) plt.plot(x_f[:, 0], x_f[:, 2]) plt.plot(x_f_n[:, 0], x_f_n[:, 2]) fig.savefig(os.path.join(baseDir, 'Figures/2A. Estimated measurement and true trajectory.png')) # error rate for t in range(500): if t == 0: error = x_f_n[t] - estimate.T[t] else: error = np.concatenate((error, x_f_n[t] - estimate.T[t]), axis=0) utils.line(np.matrix(np.arange(500)).T, error[:, 0], "time", "error X", "2A. Error of Trajectory X") utils.line(np.matrix(np.arange(500)).T, error[:, 1], "time", "error X velocity", "2A. Error of X Velocity") utils.line(np.matrix(np.arange(500)).T, error[:, 2], "time", "error Y", "2A. Error of Trajectory Y") utils.line(np.matrix(np.arange(500)).T, error[:, 3], "time", "error Y velocity", "2A. Error of Y Velocity") fig = plt.figure() plt.title("2A. Plot of estimated velocity x, raw measurement, and true velocity") plt.ylabel("x velocity") plt.xlabel("time") plt.scatter(np.matrix(np.arange(500)).T, estimate[1, :].T, c='red') plt.scatter(np.matrix(np.arange(500)).T, x_f[:, 1], c='blue') plt.scatter(np.matrix(np.arange(500)).T, x_f_n[:, 1], c='green') fig.savefig(os.path.join(baseDir, 'Figures/2A. Estimated velocity x and true velocity.png')) fig = plt.figure() plt.title("2A. Plot of estimated velocity y, raw measurement, and true velocity")