def load_feature_maps(self, video_name, vidHeight=None, vidWidth=None): sts_saliency_maps = os.listdir(self.dynDir + video_name[:-4]) sorted_sts_saliency_maps = sorted_nicely(sts_saliency_maps) self.num_sts = len(sorted_sts_saliency_maps) face1_maps = glob.glob(self.facemapDir + video_name[:-4] + '/*_speaker.png') sorted_face1_maps = sorted_nicely(face1_maps) self.num_speak = len(sorted_face1_maps) face2_maps = glob.glob(self.facemapDir + video_name[:-4] + '/*_nonspeaker.png') sorted_face2_maps = sorted_nicely(face2_maps) self.num_nspeak = len(sorted_face2_maps) #mu = np.array([vidWidth/2,vidHeight/2]) mu = np.array([vidWidth/2,vidHeight/2]) wy=vidHeight/12 wx=vidWidth/12 sigma = [wx, wy] F = mkGaussian(mu, sigma, 0, vidWidth, vidHeight).T center_bias_map = Feat_map(feat_map=F/np.sum(np.sum(F)), name='CB') uniform_map = Feat_map(feat_map=np.ones(center_bias_map.shape)/np.prod(center_bias_map.shape), name='Uniform') self.sts_names = sorted_sts_saliency_maps self.face1_names = sorted_face1_maps self.face2_names = sorted_face2_maps self.cb = center_bias_map self.uniform = uniform_map self.video_name = video_name[:-4] self.vidHeight = vidHeight self.vidWidth = vidWidth return sorted_sts_saliency_maps, sorted_face1_maps, sorted_face2_maps, center_bias_map, uniform_map
def convert_pdf_to_thumbnail(thumbnail_path, thumbnail_size, thumbnail_type): thumbnail = '"' + thumbnail_path + '/page_%d_' + thumbnail_type + '.jpg"' # This command converts pdf into thumbnail for each slide and store it in thumbnail_path try: thumbnail_process = subprocess.Popen( 'convert "' + glob.glob(thumbnail_path + '/*.pdf')[0] + '" -resize ' + thumbnail_size + ' -normalize -auto-level -quality 100 -background white ' + thumbnail, stdout=subprocess.PIPE, shell=True) thumbnail_process.wait() except Exception as e: logger.exception( "Thumbnail generation is failed for %s with error %s " % (thumbnail_path, str(e))) thumbnail_list = glob.glob(thumbnail_path + '/*' + thumbnail_type + '.jpg') # Sort the list of thumbnails path if thumbnail_list: return utils.sorted_nicely(thumbnail_list) else: return []
def get_model_info_as_str(): rtn = [] d = vars(FLAGS) for k in sorted_nicely(d.keys()): v = d[k] s = '{0:26} : {1}'.format(k, v) rtn.append(s) rtn.append('{0:26} : {1}'.format('ts', get_ts())) return '\n'.join(rtn)
def get_model_info_as_command(): rtn = [] d = vars(FLAGS) for k in sorted_nicely(d.keys()): v = d[k] s = '--{}={}'.format(k, v) rtn.append(s) return 'python {} {}'.format(join(get_our_dir(), 'main.py'), ' '.join(rtn))
def iterate_get_graphs(dir): graphs = [] for file in sorted_nicely(glob(dir + '/*.gexf')): gid = int(basename(file).split('.')[0]) g = nx.read_gexf(file) g.graph['gid'] = gid graphs.append(g) if not nx.is_connected(g): print('{} not connected'.format(gid)) return graphs
def get_model_info_as_str(model_info_table=None): rtn = [] d = FLAGS.flag_values_dict() for k in sorted_nicely(d.keys()): v = d[k] s = '{0:26} : {1}'.format(k, v) rtn.append(s) if model_info_table: model_info_table.append([k, '**{}**'.format(v)]) rtn.append('{0:26} : {1}'.format('ts', get_ts())) return '\n'.join(rtn)
def get_pogo_versions(self): versions = [] for device, serial in self.devices.items(): cmd = "adb -s %s shell dumpsys package com.nianticlabs.pokemongo | grep versionName" % serial ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) version = ps.communicate()[0].decode("utf-8").strip().replace( "versionName=", "") if version != "0.149.1": version += "(outdated)" versions.append("%s: %s : %s" % (device, serial, version)) return "\n".join(sorted_nicely(versions))
def clean_up(): rp = get_result_path() for file in sorted_nicely(glob('{}/{}'.format(rp, f))): bnf = basename(file) print_info(file, bnf) t = prompt('Delete? [y/n]', ['y', 'n']) if t == 'y': exec('rm -rf {}'.format(file)) elif t == 'n': print('Skip') else: assert (False) print('Done')
def iterate_get_graphs(dir): if logging_enabled == True: print("- Entered data::iterate_get_graphs Global Method") graphs = [] for file in sorted_nicely(glob(dir + '\\*.gexf')): gid = int(basename(file).split('.')[0]) g = nx.read_gexf(file) g.graph['gid'] = gid graphs.append(g) if not nx.is_connected(g): print('info: {} not connected'.format(gid)) return graphs
def get_flags_with_prefix_as_list(prefix): rtn = [] d = vars(FLAGS) i_check = 1 # one-based for k in sorted_nicely(d.keys()): v = d[k] sp = k.split(prefix) if len(sp) == 2 and sp[0] == '' and sp[1].startswith('_'): id = int(sp[1][1:]) if i_check != id: raise ValueError('Wrong flag format {}={} ' '(should start from _1'.format(k, v)) rtn.append(v) i_check += 1 return rtn
def Comic(archive_path, user=None, page=0): """Return an oc with all pages in archive_path. if page > 0 return pages [page - Prefs['resume_length']:]""" oc = ObjectContainer(title2=unicode(os.path.basename(archive_path)), no_cache=True) try: archive = archives.get_archive(archive_path) except archives.ArchiveError as e: Log.Error(e) return error_message('bad archive', 'unable to open archive: {}'.format(archive_path)) for f in utils.sorted_nicely(archive.namelist()): page_title, ext = utils.splitext(f) if not ext or ext.lower() not in utils.IMAGE_FORMATS: continue decoration = None if page > 0: m = utils.PAGE_NUM_REGEX.search(f) if m: page_num = int(m.group(1)) if page_num < page - int(Prefs['resume_length']): continue if page_num <= page: decoration = '>' page_title = utils.basename(page_title) if decoration is not None: page_title = '{} {}'.format(decoration, page_title) if type(page_title) != unicode: try: page_title = page_title.decode('cp437') except Exception: try: page_title = unicode(page_title, errors='replace') except Exception: pass oc.add(CreatePhotoObject( media_key=Callback(GetImage, archive_path=String.Encode(archive_path), filename=String.Encode(f), user=user, extension=ext.lstrip('.'), time=int(time.time()) if bool(Prefs['prevent_caching']) else 0), rating_key=hashlib.sha1('{}{}{}'.format(archive_path, f, user)).hexdigest(), title=page_title, thumb=utils.thumb_transcode(Callback(get_thumb, archive_path=archive_path, filename=f)))) return oc
def Comic(archive_path, user=None, page=0): """Return an oc with all pages in archive_path. if page > 0 return pages [page - Prefs['resume_length']:]""" oc = ObjectContainer(title2=unicode(os.path.basename(archive_path)), no_cache=True) try: archive = archives.get_archive(archive_path) except archives.ArchiveError as e: Log.Error(e) return error_message('bad archive', 'unable to open archive: {}'.format(archive_path)) for f in utils.sorted_nicely(archive.namelist()): page_title, ext = utils.splitext(f) if not ext or ext not in utils.IMAGE_FORMATS: continue decoration = None if page > 0: m = utils.PAGE_NUM_REGEX.search(f) if m: page_num = int(m.group(1)) if page_num < page - int(Prefs['resume_length']): continue if page_num <= page: decoration = '>' page_title = utils.basename(page_title) if decoration is not None: page_title = '{} {}'.format(decoration, page_title) if type(page_title) != unicode: try: page_title = page_title.decode('cp437') except Exception: try: page_title = unicode(page_title, errors='replace') except Exception: pass oc.add(CreatePhotoObject( media_key=Callback(GetImage, archive_path=String.Encode(archive_path), filename=String.Encode(f), user=user, extension=ext.lstrip('.'), time=int(time.time()) if bool(Prefs['prevent_caching']) else 0), rating_key=hashlib.sha1('{}{}{}'.format(archive_path, f, user)).hexdigest(), title=page_title, thumb=utils.thumb_transcode(Callback(get_thumb, archive_path=archive_path, filename=f)))) return oc
def iterate_get_graphs(dir, check_connected=True, natts=(), eatts=()): graphs = [] for file in sorted_nicely(glob(join(dir, '*.gexf'))): gid = int(basename(file).split('.')[0]) g = nx.read_gexf(file) g.graph['gid'] = gid if not nx.is_connected(g): msg = '{} not connected'.format(gid) if check_connected: raise ValueError(msg) else: print(msg) # assumes default node mapping to convert_node_labels_to_integers nlist = sorted(g.nodes()) g.graph['node_label_mapping'] = dict( zip(nlist, range(0, g.number_of_nodes()))) g = nx.convert_node_labels_to_integers(g, ordering="sorted") # lnids = sorted_nicely(g.nodes()) # list of (sorted) node ids # # Must use sorted_nicely because otherwise may result in: # # ['0', '1', '10', '2', '3', '4', '5', '6', '7', '8', '9']. # # Be very cautious on sorting a list of strings # # which are supposed to be integers. for i, (n, ndata) in enumerate(sorted(g.nodes(data=True))): assert_valid_nid(n, g) assert i == n # print(ndata) _remove_entries_from_dict(ndata, natts) # print(ndata) for i, (n1, n2, edata) in enumerate(sorted(g.edges(data=True))): assert_valid_nid(n1, g) assert_valid_nid(n2, g) # print(i, n1, n2, edata) _remove_entries_from_dict(edata, eatts) # print(i, n1, n2, edata) graphs.append(RegularGraph(g)) if not graphs: raise ValueError('Loaded 0 graphs from {}\n' 'Please download the gexf-formated dataset' ' from Google Drive and extract under:\n{}'.format( dir, get_data_path())) return graphs
def main(): dirin = get_data_path() + '/{}/graph'.format(conf.infolder) k = float('inf') lesseqk = [] glabel_map = read_graph_labels() info_map = {} disconnected = [] files = glob(dirin + '/*.gexf') if conf.need_sort_: files = sorted_nicely(files) for i, file in enumerate(files): g = nx.read_gexf(file) gid = get_file_base_id(file) print(i, gid, g.number_of_nodes()) if g.number_of_nodes() <= k: if not nx.is_connected(g): print(gid, 'is not connected') gsize = g.number_of_nodes() g = max(nx.connected_component_subgraphs(g), key=len) grmd = gsize - g.number_of_nodes() assert (grmd > 0) g_info = 'rm_{}_nodes'.format(grmd) disconnected.append(g) else: g_info = '' lesseqk.append(g) info_map[gid] = g_info g.graph['gid'] = gid g.graph['label'] = glabel_map[gid] for node, d in g.nodes(data=True): type = d['node_class'] if conf.has_node_type: d.pop('node_class') d['type'] = type for edge in g.edges_iter(data=True): del edge[2]['weight'] print(len(lesseqk)) gen_dataset(lesseqk) gen_dataset(disconnected) save_glabels_as_txt(get_data_path() + '/{}/glabels'.format(conf.outfolder), glabel_map) save_glabels_as_txt(get_data_path() + '/{}/info'.format(conf.outfolder), info_map)
state_dict = torch.load(opts.checkpoint) trainer.gen.load_state_dict(state_dict["2"]) except: sys.exit("Cannot load the checkpoints") # Send the trainer to cuda trainer.cuda() trainer.eval() # Set param new_size new_size = config["new_size"] # Define the list of non-flooded images list_non_flooded = glob.glob(opts.input + "*") list_non_flooded = sorted_nicely(list_non_flooded) # Define list of masks: list_masks = glob.glob(opts.mask_dir + "*") list_masks = sorted_nicely(list_masks) if len(list_non_flooded) != len(list_masks): sys.exit("Image list and mask list differ in length") # Assert there are some elements inside if len(list_non_flooded) == 0: sys.exit("Image list is empty. Please ensure opts.input ends with a /")
class_names=class_names) smartmov.load_models('unet', model_unet=MODELS_UNET_DIR + "unet_hall.h5", shape_unet=s, timestep=TIMESTEP) #%% Load images IMAGES_DIR = os.path.join( DATASET_DIR, "PETS2006_organized2/input/" ) # Dossier des images à évaluer (doivent correspondre au U-Net chargé avant) GT_DIR = os.path.join( DATASET_DIR, "PETS2006_organized2/groundtruth/" ) # Dossier de la groundtruth correspondante à ces images input_list = sorted_nicely( glob.glob(IMAGES_DIR + "*.jpg")) # Liste ordonnée des images à évaluer gt_list = sorted_nicely(glob.glob(GT_DIR + "*.png")) # Liste ordonnée des groundtruth #%% Création groundtruth pour évaluer la correspondance des classes pets_img = [15, 113, 16, 7] pets_nb = [1, 3, 2, 1] nb_occurence_gt_pets = [] for i in range(len(pets_img)): for j in range(pets_img[i]): nb_occurence_gt_pets.append(([1, 2], [pets_nb[i], 0])) gt_classes = nb_occurence_gt_pets #%% Création de la vidéo
def __reboot_all(self): outputs = [] for device, serial in self.devices.items(): output = self.__reboot(device, serial) outputs.append("%s : %s : %s" % (device, serial, output)) return "\n".join(sorted_nicely(outputs))
#%% Conversion du Mask-RCNN en mode inférence pour pouvoir faire les prédictions smartmov.convert_rcnn() # Prends du temps #%% Sauvegarde du modèle du Mask-RCNN smartmov.save(models_to_save='rcnn', dir_rcnn=os.path.join(MODELS_MASKRCNN_DIR, "mask_rcnn_example.h5")) #%% Prédiction IMAGES_DIR = os.path.join( ROOT_DIR, "dataset_test/input/" ) # Dossier des images à prédire (doivent correspondre au U-Net chargé avant) input_list = sorted_nicely( glob.glob(IMAGES_DIR + "*.jpg")) # Liste ordonnée des images à évaluer im = [] for f in input_list: im.append(plt.imread(f)) # Chargement des images im = np.array(im) pred = smartmov.predict( im, models_to_use='all' ) # pred contiendra (prediction globale, nombre d'objets détectés) pred_unet = smartmov.predict( im, models_to_use='unet') # pred_unet contiendra uniquement un masque binaire pred_rcnn = smartmov.predict( im, models_to_use='rcnn') # pred_rcnn contiendra la prediction du Mask-RCNN
lat_0=44.5, lon_0=-116.55, llcrnrlon=-125.0, llcrnrlat=44.5, urcrnrlon=-116.55, urcrnrlat=49.5, resolution='i', ax=ax, ) m.drawmapboundary(fill_color='#ffffff', linewidth=0) #A6CAE0 m.fillcontinents(color='Grey', alpha=0.3) m.drawcoastlines(linewidth=0.1, color='white') m.drawcounties() # Plot the routes on the map. lab_codes = sorted_nicely(list(lab_stats.keys())) for i, code in enumerate(lab_codes): stats = lab_stats[code] lab = labs.loc[labs['recipient_code'] == code] lab_specific_transfers = data.loc[data['recipient_code'] == code] for route in lab_specific_transfers['route']: path = polyline.decode(route) lat = [x[0] for x in path] long = [x[1] for x in path] plt.plot( long, lat, '-', color=stats['color'], alpha=0.6, linewidth=2,