def make_domain_match_dict(self): d = dict() for name, cfg in self.cfgs.items(): ns = name.split('.') utils.list_to_dict(ns, d, cfg) return d
def get_connections(request): response = {"status": 0, "errors": None, "connections": []} try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response["connections"] = list_to_dict(c.get_connections()) except RestException, e: response.update(handle_rest_exception(e, _("Could not get connections.")))
def product_menu(self, category_value): """This method displays the product menu and wait the input from user to continue. """ products = \ self.product_manager.get_product_list_by_category_db( self.db_manager, 10, category_value) products = utils.list_to_dict(products) view = ViewProduct(products) value = "" expression = [str(x) for x in range(1, len(products) + 1)] ViewProduct.print_select() while value not in expression: view.print_menu() value = input() product = Product(**products[int(value)]) product_barcode = product.barcode if product.nutriscore_grade == 'a': ViewProduct.print_nutriscore_a() self.category_menu() else: substitute = utils.find_substitute(product_barcode, category_value, self.db_manager, self.product_manager) self.substitute_menu(substitute, product)
def get_links(request): response = {'status': 0, 'errors': None, 'links': []} try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['links'] = list_to_dict(c.get_links()) except RestException, e: response.update(handle_rest_exception(e, _('Could not get links.')))
def get_connectors(request): response = { 'status': 0, 'errors': None, 'connectors': [] } try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get()) response['connectors'] = list_to_dict(c.get_connectors()) except RestException, e: response.update(handle_rest_exception(e, _('Could not get connectors.')))
def get_links(request): response = { 'status': 0, 'errors': None, 'links': [] } try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) response['links'] = list_to_dict(c.get_links()) except RestException, e: response.update(handle_rest_exception(e, _('Could not get links.')))
def get_submissions(request): response = {'status': 0, 'errors': None, 'submissions': []} status = request.GET.get('status', 'submissions').split(',') try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) submissions = c.get_submissions() response['submissions'] = list_to_dict(submissions) except RestException, e: response.update( handle_rest_exception(e, _('Could not get submissions.')))
def get_jobs(request): response = {'status': 0, 'errors': None, 'jobs': []} try: c = client.SqoopClient( conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE, ssl_cert_ca_verify=conf.SSL_CERT_CA_VERIFY.get()) jobs = c.get_jobs() response['jobs'] = list_to_dict(jobs) except RestException, e: response.update(handle_rest_exception(e, _('Could not get jobs.')))
def test_inorder_traversal(self): """ Test the inorder traversal print""" size = 20 seq = utils.random_seq(size) utils.list_to_bst(seq, self.bst) seq_dict = utils.list_to_dict(seq) sorted_keys = sorted(seq_dict.keys()) res = [str(seq_dict[i]) for i in sorted_keys] with patch('sys.stdout', new=StringIO()) as fake_out: self.bst.inorder_traversal_print() printed_values = fake_out.getvalue().strip("\n") list_data = printed_values.split("\n") self.assertEqual(list_data, res)
def get_submissions(request): response = { 'status': 0, 'errors': None, 'submissions': [] } status = request.GET.get('status', 'all').split(',') try: c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE) submissions = c.get_submissions() response['submissions'] = list_to_dict(submissions) except RestException, e: response.update(handle_rest_exception(e, _('Could not get submissions.')))
def test_inorder_traversal(self): """ Test the inorder traversal print""" seq = [3, 10, 4, 14, 1, 8, 2, 9, 12, 18, 16, 7, 0, 11, 17, 5, 13, 6, 19, 15] utils.list_to_bst(seq, self.bst) seq_dict = utils.list_to_dict(seq)# Done to get index for each value. sorted_keys = sorted(seq_dict.keys())# Get keys in order, values from seq list # use sorted keys to get values for they keys in order res = [str(seq_dict[i]) for i in sorted_keys] with patch('sys.stdout', new=StringIO()) as fake_out: self.bst.inorder_traversal_print() printed_values = fake_out.getvalue().strip("\n") list_data = printed_values.split("\n") self.assertEqual(list_data, res)
def __init__(self, root, transform=None, target_transform=None): classes, class_to_idx = self._find_classes(root) samples = _make_dataset(root, class_to_idx, IMG_EXTENSIONS) cooccuring_tracks_file = os.path.join(root, "cooccurring_tracks.txt") with open(cooccuring_tracks_file) as file: self.cooccurring_tracks = [[int(n) for n in line.split(',')] for line in file] if len(samples) == 0: raise (RuntimeError("Found 0 files in subfolders of: " + root + "\n" "Supported extensions are: " + ",".join(IMG_EXTENSIONS))) self.root = root self.classes = classes self.class_to_idx = class_to_idx self.samples = samples self.track_targets = [s[1] for s in samples] # self.gt_targets = [s[2] for s in samples] track_to_gt_list = utils.read_file_to_list( os.path.join(root, 'track_gt.txt')) track_to_gt_dict = utils.list_to_dict(track_to_gt_list) gtclass_to_idx = {} gt_idx = 0 gt_targets = [] for track_id in self.track_targets: if track_to_gt_dict[track_id] not in gtclass_to_idx.keys(): gtclass_to_idx[track_to_gt_dict[track_id]] = gt_idx gt_idx += 1 label = gtclass_to_idx[track_to_gt_dict[track_id]] gt_targets.append(label) self.gt_targets = gt_targets track_idx_to_sample_idx = {} for track_idx in np.unique(self.track_targets): track_idx_to_sample_idx[track_idx] = np.where( self.track_targets == track_idx)[0] self.track_idx_to_sample_idx = track_idx_to_sample_idx self.transform = transform self.target_transform = target_transform
def category_menu(self): """This method displays the category menu and wait the input from user to continue. """ categories = config.value['CATEGORIES'] categories = utils.list_to_dict(categories) view = ViewCategory(categories) expression = [str(x) for x in range(1, len(categories) + 1)] value = "" ViewCategory.print_select() while value not in expression: view.print_menu() value = input() category = categories[int(value)] self.product_menu(category)
def extract_metatiles(state_graph_files, unique_metatiles_file, metatile_coords_dict_file): all_metatiles = [] unique_metatiles = [] metatile_coords_dict = {} for state_graph_file in state_graph_files: # Load in the state graph state_graph = nx.read_gpickle(state_graph_file) # Extract game and level from state graph filename level_info = parse_state_graph_filename(state_graph_file) game, level = level_info['game'], level_info['level'] # Generate level object from file level_obj = Level.generate_level_from_file(game, level) # Create dictionaries for level platform coords, bonus coords, and goal coords platform_coords_dict = utils.list_to_dict(level_obj.get_platform_coords()) bonus_coords_dict = utils.list_to_dict(level_obj.get_bonus_coords()) goal_coords_dict = utils.list_to_dict(level_obj.get_goal_coords()) one_way_platform_coords_dict = utils.list_to_dict(level_obj.get_one_way_platform_coords()) hazard_coords_dict = utils.list_to_dict(level_obj.get_hazard_coords()) wall_coords_dict = utils.list_to_dict(level_obj.get_wall_coords()) permeable_wall_coords_dict = utils.list_to_dict(level_obj.get_permeable_wall_coords()) # Extract metatiles from level all_possible_coords = level_obj.get_all_possible_coords() metatile_coord_states_map = get_metatile_coord_states_map(state_graph, all_possible_coords) for metatile_coord in all_possible_coords: new_metatile = construct_metatile(metatile_coord, game=game, level=level, level_start_coord=level_obj.get_start_coord(), level_goal_coords_dict=goal_coords_dict, level_platform_coords_dict=platform_coords_dict, level_bonus_coords_dict=bonus_coords_dict, level_one_way_platform_coords_dict=one_way_platform_coords_dict, level_hazard_coords_dict=hazard_coords_dict, level_wall_coords_dict=wall_coords_dict, level_permeable_wall_coords_dict = permeable_wall_coords_dict, state_graph=state_graph, metatile_coord_states_map=metatile_coord_states_map) # Add new_metatile to list of all_metatiles all_metatiles.append(new_metatile) # Update unique_metatiles list if new_metatile not in unique_metatiles: unique_metatiles.append(new_metatile) # add new metatile else: metatile_idx = unique_metatiles.index(new_metatile) existing_metatile = unique_metatiles.pop(metatile_idx) # remove old metatile new_metatile = existing_metatile.merge_games_and_levels(new_metatile) # merge metatile games and levels unique_metatiles.append(new_metatile) # add new (merged) metatile # Create {metatile: coords} dictionary if metatile_coords_dict_file is not None: # only one state graph file given => only one game per metatile new_metatile_str = new_metatile.to_str() if metatile_coords_dict.get(new_metatile_str) is None: metatile_coords_dict[new_metatile_str] = [metatile_coord] else: metatile_coords_dict[new_metatile_str].append(metatile_coord) # Save unique_metatiles to file utils.write_pickle(unique_metatiles_file, unique_metatiles) # Create {unique_metatile: coords} dictionary if metatile_coords_dict_file is not None: unique_metatile_coords_dict = {} ordered_metatiles = [] ordered_coords_lists = [] for metatile_str, coords in metatile_coords_dict.items(): ordered_metatiles.append(Metatile.from_str(metatile_str)) ordered_coords_lists.append(coords) for unique_metatile in unique_metatiles: unique_metatile_str = unique_metatile.to_str() unique_metatile_coords_dict[unique_metatile_str] = [] for idx in utils.get_all_indices(unique_metatile, ordered_metatiles): unique_metatile_coords_dict[unique_metatile_str] += ordered_coords_lists[idx] utils.write_pickle(metatile_coords_dict_file, unique_metatile_coords_dict) # save to file return all_metatiles, unique_metatiles
def get_id(self): album_track_dict = list_to_dict( list(filter(None, self.parsed_url.path.rsplit("/")))) print(album_track_dict) return album_track_dict
def annotate_video(movie_file_path: str, dataset_path: str, output_path: str, model: nn.Module, device, max_frame: int = 100000, tracker_max_age: int = 10, plotter: utils.plotter_utils.VisdomPlotter = None, name: str = '', compute_track_mean: bool = False): filename = os.path.join(dataset_path, 'bbx.txt') print('Getting annotations from {}'.format(filename)) bbx_list = utils.read_file_to_list(filename) if bbx_list: bounding_boxes_list = bbx_list else: bounding_boxes_list = get_bounding_boxes(movie_file_path, max_frame=max_frame, tracker_max_age=tracker_max_age) print('Extracting ROI of the video.') cropped_image_list = get_cropped_images(movie_file_path, bounding_boxes_list, max_frame=max_frame) track_dict = get_track_dict(bounding_boxes_list) frame_dict = get_frame_dict(bounding_boxes_list) bbx_dict = get_bbx_dict(bounding_boxes_list) # Data transform data_transform = transforms.Compose([ transforms.ToTensor() ]) dataset = NumpyDataset(cropped_image_list, transform=data_transform) dataloader = torch.utils.data.DataLoader(dataset, num_workers=2, batch_size=100) print('Extracting features.') model = model.to(device) features = ml_utils.extract_features(dataloader, model, device) cluster_techniques_list = ['kmeans', 'spectral', 'hac'] tsne_features, tsne_chosen_samples = projection_utils.tsne_projection(features) pca_features, pca_chosen_samples = projection_utils.pca_projection(features) # Frame level clustering print('Performing frame level clustering.') for cluster_method in cluster_techniques_list: cluster_name = '{}_frame_level_{}'.format(name, cluster_method) predictions, data_dict = clustering.cluster_techniques(features, cluster_method, max_clusters=10) write_video(movie_file_path, output_path, predictions, frame_dict, name=cluster_name, max_frame=max_frame) plotter.scatter_plot(cluster_name + '_tsne', tsne_features, predictions[tsne_chosen_samples]) plotter.scatter_plot(cluster_name + '_pca', pca_features, predictions[pca_chosen_samples]) # Add ground truth if it exist gt_file_path = os.path.join(dataset_path, 'bbx_gt.txt') if os.path.isfile(gt_file_path): print('Creating ground truth video and plots.') bbx_to_gt_list = utils.read_file_to_list(gt_file_path) bbx_to_gt_dict = utils.list_to_dict(bbx_to_gt_list) groundtruth = [] gt_to_idx_dict = {} bbx_count = 0 for bbx in bounding_boxes_list: bbx_idx = bbx[2] gt = bbx_to_gt_dict[bbx_idx] if gt not in gt_to_idx_dict.keys(): gt_to_idx_dict[gt] = bbx_count bbx_count += 1 label = gt_to_idx_dict[gt] groundtruth.append(label) groundtruth = np.array(groundtruth) gt_name = '{}_gt'.format(name) write_video(movie_file_path, output_path, groundtruth, frame_dict, name=gt_name, max_frame=max_frame) plotter.scatter_plot(gt_name + '_tsne', tsne_features, groundtruth[tsne_chosen_samples]) plotter.scatter_plot(gt_name + '_pca', pca_features, groundtruth[pca_chosen_samples]) # Track level clustering if compute_track_mean: print('Performing track level clustering.') mean_features = [] track_to_idx_dict = {} for idx, track_idx in enumerate(track_dict.keys()): feature_track = features[track_dict[track_idx]] mean_features.append(np.mean(feature_track, axis=0)) track_to_idx_dict[track_idx] = idx mean_features = np.asarray(mean_features) for cluster_method in cluster_techniques_list: cluster_name = '{}_track_level_{}'.format(name, cluster_method) mean_predictions, data_dict = clustering.cluster_techniques(mean_features, cluster_method, max_clusters=10) predictions = [] for bbx_idx in bbx_dict.keys(): track_idx = track_to_idx_dict[bbx_dict[bbx_idx][0]] predictions.append(mean_predictions[track_idx]) predictions = np.array(predictions) write_video(movie_file_path, output_path, predictions, frame_dict, name=cluster_name, max_frame=max_frame) plotter.scatter_plot(cluster_name + '_tsne', tsne_features, predictions[tsne_chosen_samples]) plotter.scatter_plot(cluster_name + '_pca', pca_features, predictions[pca_chosen_samples])
#!/usr/bin/python #coding: utf8 #Author: chenyunyun<*****@*****.**> from utils import dict_merge, list_to_dict RESPONSE_CALLBACK = dict_merge( list_to_dict( 'set', lambda r: bool(int(r[0])) ), list_to_dict( 'get', lambda r: r[0] ), ) class Response(object): def __init__(self, command_name, code, body=None): self.command_name = command_name self.code = code self.body = body @property def ok(self): return self.code == 'code' @property def not_found(self): return self.code == 'not_found' @property