class Topology: """ Provides methods to work on topology related features Attribues: G: graph of the network T: minimum spanning tree of the network paths: collection of paths hosts: information about hosts flows: information about flows """ def __init__(self): """ Initialize the topology with an empty graph and set of paths """ self.G = nx.Graph() self.paths = Paths(self.G) self.reset() self.hosts = dict() self.flows = dict() def reset(self): """ Recompute minimum spanning tree and reset paths """ self.T = nx.minimum_spanning_tree(self.G) self.paths.reset()
def __init__(self, path=None, create=False): def pool_realpath(p): return join(realpath(p), ".pool") def get_default_path(create): path_cwd = os.getcwd() path_env = os.environ.get("POOL_DIR") or path_cwd if create: if isdir(pool_realpath(path_env)): return path_cwd else: return path_env else: if isdir(pool_realpath(path_cwd)): return path_cwd else: return path_env if path is None: path = get_default_path(create) path = pool_realpath(path) Paths.__init__(self, path)
def load_database_file(self): Paths.create_paths() try: self.DEVICES = pickle.load(open(Paths.DATABASE_FILE, "rb")) except FileNotFoundError: # Create an empty database file if non exists pickle.dump([], open(Paths.DATABASE_FILE, "wb")) self.load_database_file()
def __init__(self, material, thickness, flipped=False): super().__init__() self._cuts = Paths() self._lines = Paths() self._regions = [] self._material = material self._thickness = thickness self._flipped = flipped
def __init__(self): """ Initialize the topology with an empty graph and set of paths """ self.G = nx.Graph() self.paths = Paths(self.G) self.reset() self.hosts = dict() self.flows = dict()
def load_log_filename(self): Paths.create_paths() try: with open(Paths.LOG_PATHNAME_FILE, "rb") as f: log_file = f.read().decode("UTF-8") f.close() return log_file except FileNotFoundError: return None
def export2txt_words4exam(words4exam: List[str]): """TODO 連日に渡るIT用語の重複回避を分析する用途として用語リストを保存する.""" dirpath = Paths().DIR_words4analyze # ファイル出力用のディレクトリが存在しない場合、新規作成する make_newdir(dirpath) # 用語リストを本日付けで保存する newfile = Paths().gen_FILE_WORD_LIST shutil.copy(Paths().PATH_template_1st, dirpath + newfile)
def read_password_hash(self): Paths.create_paths() try: with open(Paths.PASSWORD_FILE, "rb") as f: password_hash = f.read() f.close() return password_hash except FileNotFoundError: if self.LOGGER is not None: self.LOGGER.log("Password file doesn't exist") return None
def upload2slack(dirpath: str, newfile: str): """生成したファイルを指定のSlackチャンネルに送信する""" # 入力チェック if dirpath is None: # dirpath をデフォルトで設定する dirpath = Paths().DIR_exam_papers if newfile is None: newfile = Paths().FILE_EXAM_PAPER # 本日付けの確認テスト用ファイルを探索し、存在しない場合は異常終了 if not os.path.exists(dirpath + newfile): print(' Today\'s file not found: ') sys.exit(1) URL_UPLOAD = "https://slack.com/api/files.upload" with open(dirpath + newfile, 'rb') as f: f = {'file': f.read()} p = { 'token': env.TOKEN, 'channels': env.CHANNEL_ID, 'filename': newfile, 'filetype': 'md', 'initial_comment': "―【説明】―――――――――――――――――\ \n *添付ファイルをダウンロード後、以下を行いアップロードして下さい。*\ \n 1. 解答欄に記述\ \n 2. 記述後、ファイルを保存\ \n 3. ファイル名の変更(\"~_name.md\" ← name を変更する)\ \n\ \n―【解答方法】―――――――――――――――\ \n *合計 10P に達するように、解答用紙記載の用語リストを用いて文章を作成せよ。*\ \n ・参考リンクに記載のサイト等を利用し、自身で意味を調べて解答すること。\ \n ・1つの文章は、句点までを1文とみなす。\ \n ・1文あたり何語使用するかで加点が異なる。\ \n - 1語のみ : 1P UP↑\ \n - 2語 : 3P UP↑\ \n - 3語以上 : 5P UP↑\ \n ・用語リスト内から選択する際、1語以上であれば使用語数に制限はない。\ \n ・各文章間で用語が重複している場合、その文章は無効とする。\ \n ", 'title': "解答用紙_IT用語テスト", } r = requests.post(url=URL_UPLOAD, params=p, files=f) if str(r.status_code) == '200': print(' Uploaded.') else: print(' Upload_failed: ', r) return
def __init__(self, name, dir_path=None): self._logger = Logger("Fixture {0}".format(name)) path_name = Paths().get_fixture_path(name, only_name=True) if dir_path: conf_rel_path = Paths().fixture_conf_file_rel_pattern.format(path_name) self._conf_path = os.path.join(dir_path, conf_rel_path) else: self._conf_path = Paths().get_fixture_path(name) self.model = self._load_model(name) self.history = grid_history.GriddingHistory(self)
def __init__(self): self.paths = Paths() self.data_conf = ConfigParser.SafeConfigParser() self.read()
def generate_and_save_image(plots_data, iter_no, image_label, scatter_size=0.5, log=False): if log: print('------------------------------------------------------------') print('%s: step %i: started generation' % (exp_name, iter_no)) figure = viz_utils.get_figure(plots_data, scatter_size) if log: print('%s: step %i: got figure' % (exp_name, iter_no)) figure_name = '%s-%05d.png' % (image_label, iter_no) figure_path = Paths.get_result_path(figure_name) figure.savefig(figure_path) plt.close(figure) img = np.array(im.imread(figure_path), dtype=np.uint8) img = img[:, :, :-1] img = img.transpose(2, 0, 1) img = np.expand_dims(img, 0) if log: print('%s: step %i: visualization saved' % (exp_name, iter_no)) print('------------------------------------------------------------') return img, iter_no
def visualize_embeddings(node, split, threshold, iter_no, phase=None): with tr.no_grad(): if split == 'train': data = dl_set[node.id].data[split] elif split == 'test': data = x_seed Z = node.post_gmm_encode(data) labels = node.gmm_predict_test(Z, threshold).tolist() pca_z = PCA(n_components=2) z_transformed = pca_z.fit_transform(Z) color = ['r', 'b', 'g'] colors = [color[int(x)] for x in labels] b = 20 fig = plt.figure(figsize=(6.5, 6.5)) ax = fig.add_subplot(111) ax.set_xlim(-b, b) ax.set_ylim(-b, b) ax.scatter(z_transformed[:, 0], z_transformed[:, 1], s=0.5, c=colors) node.trainer.writer[split].add_figure( node.name + '_' + phase + '_plots', fig, iter_no) path = Paths.get_result_path(node.name + '_' + split + '_embedding_plots/' + phase + '_plot_%03d' % (iter_no)) fig.savefig(path) plt.close(fig)
def update_password_hash(self, cleartext_password): # Generate the hash salt_handler = SaltHandler() password_handler = PasswordHandler(salt_handler, cleartext_password) key = password_handler.generate() # Write the hash to the file Paths.create_paths() with open(Paths.PASSWORD_FILE, "wb") as f: f.write(key) f.close() # Log feedback if self.LOGGER is not None: self.LOGGER.log("Password was updated")
def __init__(self, quiet): from os.path import isfile # Get a name for the log file log_file = self.load_log_filename() if log_file is None: # If no name is stored on the disk, generate a new one. log_file = Paths.LOG_DIR + self.generate_log_name() # Ensure that log file exists if not isfile(log_file): Paths.create_paths() open(log_file, "wb").close() # Initialize empty file self.LOG_FILE = log_file self.dump_log_filename() self.QUIET = quiet
def plot_mean_axis_distribution(node, split, iter_no, phase): mean0 = node.kmeans.means[0] mean1 = node.kmeans.means[1] direction = (mean1 - mean0) / np.linalg.norm(mean1 - mean0) if split == 'train': data = dl_set[node.id].data['train'] elif split == 'test': data = x_seed Z = node.post_gmm_encode(data) projection = np.zeros(Z.shape) for j in range(Z.shape[0]): projection[j] = mean0 + direction * np.dot(Z[j] - mean0, direction) for i in range(projection.shape[1]): plot_data_tensorboard = projection[:, i] plot_data = [projection[:, i], mean0[i], mean1[i]] plt.hist(plot_data, color = ['g', 'r', 'b']) # plt.hist(plot_data_tensorboard, bins = 'auto', color = ['g']) fig_mean_axis_histogram = plt.gcf() node.trainer.writer[split].add_histogram(node.name + '_' + phase + '_mean_axis_' + str(i), plot_data_tensorboard, iter_no) # node.trainer.writer[split].add_image(node.name + '_mean_axis_' + str(i), fig_mean_axis_histogram, iter_no) path_mean_axis_hist = Paths.get_result_path(node.name + '_' + split + '_mean_axis_histogram/' + phase + '%03d_%01d' % (iter_no, i)) fig_mean_axis_histogram.savefig(path_mean_axis_hist) plt.close(fig_mean_axis_histogram)
def load_node(node_name, tag=None, iter=None): filename = node_name if tag is not None: filename += '_' + str(tag) if iter is not None: filename += ('_%05d' % iter) filepath = os.path.join(Paths.weight_dir_path(''), filename) gnode = GNode.load(filepath, Model=ImgGAN) return gnode
def listen(self): # Initialize pyudev monitor ctx = Context() mon = Monitor.from_netlink(ctx) mon.start() # Start listening and send all new connections to another thread LOGGER.log("Listening for USB devices") try: for dev in iter(mon.poll, None): connection_thread = Thread(target=Listener.connection, args=[self, dev]) connection_thread.daemon = True connection_thread.start() except KeyboardInterrupt: LOGGER.log("Exited by user!") Paths.delete_tmp_dir() exit()
def save_node(node, tag=None, iter=None): # type: (GNode, str, int) -> None filename = node.name if tag is not None: filename += '_' + str(tag) if iter is not None: filename += ('_%05d' % iter) filename = filename + '.pt' filepath = os.path.join(Paths.weight_dir_path(''), filename) node.save(filepath)
def export2md_paper(words4exam: List[str]): dirpath = Paths().DIR_exam_papers # ファイル出力用のディレクトリが存在しない場合、新規作成する make_newdir(dirpath) # テンプレをコピー&本日付けで別ファイルとして保存する newfile = Paths().gen_FILE_EXAM_PAPER() shutil.copy(Paths().PATH_template_1st, dirpath + newfile) # 用語リストの箇所に引数で受け取った用語を書き込む with open(dirpath + newfile, 'a', encoding='utf-8') as fst: for word in words4exam: fst.write('- ' + word + '\n') with open(Paths().PATH_template_2nd, 'r', encoding='utf-8') as snd: read_data = snd.read() fst.write(read_data) print(' Exported.') upload2slack(dirpath, newfile) return
def add_path(self, aper, *path): if self._expansion > 0.0: if isinstance(aper, tuple): s = gerbertools.Shape(1e6) s.append_int(aper) s = s.offset(self._expansion, True) assert len(s) == 1 aper = tuple(s.get_int(0)) else: aper += from_mm(self._expansion) paths = self._paths.get(aper, None) # Due to roundoff error during rotation, some almost-identical # (actually identical in the gerber file) apertures can appear # for region apertures. To avoid this, look for apertures that # are "close enough". if paths is None and isinstance(aper, tuple): for ap2 in self._paths: if not isinstance(ap2, tuple): continue if len(aper) != len(ap2): continue err = 0 for c1, c2 in zip(aper, ap2): err += (c1[0] - c2[0])**2 err += (c1[1] - c2[1])**2 if err > 100: break else: aper = ap2 paths = self._paths[aper] break if paths is None: paths = Paths() self._paths[aper] = paths paths.add(*path)
def fill_model(self, model): fixture_name = model['fixture'] if fixture_name in self: fixture = self[fixture_name] model['im-original-scale'] = fixture.model.scale model['fixture-file'] = fixture.path else: model['im-original-scale'] = 1.0 model['im-scale'] = 1.0 model['fixture-file'] = Paths().get_fixture_path(model['fixture'], only_name=True)
def update(self): directory = Paths().fixtures extension = ".config" list_fixtures = map(lambda x: x.split(extension, 1)[0], [fixture for fixture in os.listdir(directory) if fixture.lower().endswith(extension)]) self._fixtures = dict() for f in list_fixtures: if f.lower() != "fixture": fixture = FixtureSettings(f, directory) self._fixtures[fixture.model.name] = fixture
def v_slide(params): """ """ paths = Paths() try: try: scn_file = OpenSlide(paths.slice_80) except OpenSlideUnsupportedFormatError: logging.error("OpenSlideUnsupportedFormatError!") return except OpenSlideError: logging.error("OpenSlideError!") return start_point = params["start_point"] x0 = start_point[0] y0 = start_point[1] bound_y = params["bound_y"] tile_path = params["tile_path"] save_tiles = params["save_tiles"] q = params["queue"] AVG_THRESHOLD = 170 pid = os.getpid() data = {} while y0 < bound_y: img = scn_file.read_region((x0, y0), 0, (299, 299)) green_c_avg = np.average(np.array(img)[:, :, 1]) if green_c_avg < AVG_THRESHOLD: sufix = "_" + str(x0) + "_" + \ str(y0) + ".png" file_name = "scn80" + sufix img = np.array(img) img = img[:, :, 0:3] data['pred'] = img data['xlabel'] = np.array([x0]) data['ylabel'] = np.array([y0]) q.put(dict(data)) if save_tiles: img.save(os.path.join(tile_path, file_name)) y0 += 150 return pid finally: scn_file.close()
def __init__(self, conf, actions_options, time_units, edit): wx.Dialog.__init__(self, None, title=_('Add action'), size=(330, 290)) panel = wx.Panel(self) self.conf = conf self.actions_options = actions_options list_actions = [] for i in self.actions_options: list_actions.append(i[0]) wx.StaticText(panel, label=_('action'), pos=(10, 10)) self.action_select = wx.ComboBox(panel, choices=list_actions, style=wx.CB_READONLY, size=(310, 32), pos=(10, 35)) self.action_select.Bind(wx.EVT_COMBOBOX, self.onSelect) wx.StaticText(panel, label=_('data'), pos=(10, 70)) self.data = wx.TextCtrl(panel, size=(310, 32), pos=(10, 95)) wx.StaticText(panel, label=_('repeat after'), pos=(10, 130)) self.repeat = wx.TextCtrl(panel, size=(150, 32), pos=(10, 155)) self.repeat.Disable() self.repeat_unit = wx.ComboBox(panel, choices=time_units, style=wx.CB_READONLY, size=(150, 32), pos=(170, 155)) self.repeat_unit.Bind(wx.EVT_COMBOBOX, self.onSelectUnit) self.repeat_unit.SetValue(_('no repeat')) if edit != 0: self.action_select.SetValue(list_actions[edit[1]]) self.data.SetValue(edit[2]) if edit[3] != 0.0: self.repeat.SetValue(str(edit[3])) self.repeat.Enable() self.repeat_unit.SetValue(time_units[edit[4]]) cancelBtn = wx.Button(panel, wx.ID_CANCEL, pos=(70, 205)) okBtn = wx.Button(panel, wx.ID_OK, pos=(180, 205)) paths = Paths() self.home = paths.home self.currentpath = paths.currentpath
def z_histogram_plot(node, split, iter_no, phase): with tr.no_grad(): if split == 'train': data = dl_set[node.id].data[split] elif split == 'test': data = x_seed Z = node.post_gmm_encode(data) for i in range(Z.shape[1]): plot_data = Z[:, i] plt.hist(plot_data) fig_histogram = plt.gcf() node.trainer.writer[split].add_histogram(node.name + '_' + phase + '_embedding_' + str(i), plot_data, iter_no) path_embedding_hist = Paths.get_result_path(node.name + '_' + split + '_embedding_histogram/' + phase + 'embedding_%03d_%01d' % (iter_no, i)) fig_histogram.savefig(path_embedding_hist) plt.close(fig_histogram)
def __init__(self, language): paths=Paths() gettext.install('openplotter', paths.currentpath+'/locale', unicode=False) presLan_en = gettext.translation('openplotter', paths.currentpath+'/locale', languages=['en']) presLan_ca = gettext.translation('openplotter', paths.currentpath+'/locale', languages=['ca']) presLan_es = gettext.translation('openplotter', paths.currentpath+'/locale', languages=['es']) presLan_fr = gettext.translation('openplotter', paths.currentpath+'/locale', languages=['fr']) presLan_nl = gettext.translation('openplotter', paths.currentpath+'/locale', languages=['nl']) presLan_de = gettext.translation('openplotter', paths.currentpath+'/locale', languages=['de']) if language=='en':presLan_en.install() if language=='ca':presLan_ca.install() if language=='es':presLan_es.install() if language=='fr':presLan_fr.install() if language=='nl':presLan_nl.install() if language=='de':presLan_de.install()
def main(): args = parse_args() paths = Paths() checkpoints_path = str(paths.CHECKPOINTS_PATH) logging_path = str(paths.LOG_PATH) callbacks = [PrintCallback()] checkpoint_callback = ModelCheckpoint(filepath=checkpoints_path + '/{epoch}-{val_acc:.3f}', save_top_k=True, verbose=True, monitor='val_acc', mode='max', prefix='') early_stop_callback = EarlyStopping(monitor='val_acc', mode='max', verbose=False, strict=False, min_delta=0.0, patience=2) gpus = gpu_count() log_save_interval = args.log_save_interval logger = TensorBoardLogger(save_dir=logging_path, name='tuna-log') logger.log_hyperparams(args) max_epochs = args.epochs model = LeNet(hparams=args, paths=paths) trainer = Trainer( callbacks=callbacks, checkpoint_callback=checkpoint_callback, early_stop_callback=early_stop_callback, fast_dev_run=True, gpus=gpus, log_save_interval=log_save_interval, logger=logger, max_epochs=max_epochs, min_epochs=1, show_progress_bar=True, weights_summary='full', ) trainer.fit(model)
def get_marker_path(self): paths = Paths() if self.model.orentation_mark_path: marker_paths = (self.model.orentation_mark_path, os.path.join(paths.images, os.path.basename(self.model.orentation_mark_path)), paths.marker) else: marker_paths = (paths.marker,) for path in marker_paths: try: with open(path, 'rb') as _: self._logger.info("Using marker at '{0}'".format(path)) return path except IOError: self._logger.warning("The designated orientation marker file does not exist ({0})".format(path)) return None
def _configure(self): ################################# # Global logging level ################################# p = self.p u.verbose.set_level(p.verbose_level) ################################# # Global data type switch ################################# self.data_type = p.data_type assert p.data_type in ['single', 'double'] self.FType = np.dtype( 'f' + str(np.dtype(np.typeDict[p.data_type]).itemsize)).type self.CType = np.dtype( 'c' + str(2 * np.dtype(np.typeDict[p.data_type]).itemsize)).type logger.info(_('Data type', self.data_type)) ################################# # Prepare interaction server ################################# if parallel.master: # Create the inteaction server self.interactor = interaction.Server(p.interaction) # Start the thread self.interactor.activate() # Register self as an accessible object for the client self.interactor.objects['Ptycho'] = self # Check if there is already a runtime container if not hasattr(self, 'runtime'): self.runtime = u.Param() # Generate all the paths self.paths = Paths(self.p.paths, self.runtime)
def __init__(self,root="."): self.root = os.path.abspath(root) self.paths = Paths(root=self.root) self.extensions = Extensions() self.aliases = {}
class Crawl: def __init__(self,root="."): self.root = os.path.abspath(root) self.paths = Paths(root=self.root) self.extensions = Extensions() self.aliases = {} def prepend_paths(self,*paths): new = Paths(paths) new.extend(self.paths) self.paths = new def prepend_path(self,*paths): self.prepend_paths(*paths) def append_paths(self,*paths): for path in paths: self.paths.append(path) def append_path(self,*paths): self.append_paths(*paths) def remove_path(self,path): if path in self.paths: self.paths.remove(path) def prepend_extensions(self,*extensions): new = Extensions(extensions) new.extend(self.extensions) self.extensions = new def prepend_extension(self,*extensions): self.prepend_extensions(*extensions) def append_extensions(self,*extensions): for extension in extensions: self.extensions.append(extension) def append_extension(self,*extensions): self.append_extensions(*extensions) def remove_extension(self,extension): if extension in self.extensions: self.extensions.remove(extension) def alias_extension(self,new_extension,old_extension): new_extension = self.extensions.normalize_element(new_extension) self.aliases[new_extension] = self.extensions.normalize_element(old_extension) def unalias_extension(self,extension): del self.aliases[self.extensions.normalize_element(extension)] def find(self,*args,**kwargs): return self.index().find(*args,**kwargs) def index(self): return Index(self.root,self.paths,self.extensions,self.aliases) def entries(self,*args): return self.index().entries(*args) def stat(self,*args): return self.index().stat(*args)
def dump_log_filename(self): Paths.create_paths() with open(Paths.LOG_PATHNAME_FILE, "wb") as f: f.write(self.LOG_FILE.encode("UTF-8")) f.close()
def __init__(self, path=None): path = join(dirname(realpath(path)), ".deck") Paths.__init__(self, path)
def prepend_paths(self,*paths): new = Paths(paths) new.extend(self.paths) self.paths = new
def __init__(self, path=None): if path is None: path = self.PATH Paths.__init__(self, path)
def plot_cluster_graphs(node, split, threshold, iter_no, phase): no_of_classes = H.no_of_classes with tr.no_grad(): if split == 'train': data = dl_set[node.id].data[split] labels = dl_set[node.id].labels[split] elif split == 'test': data = x_seed labels = l_seed Z = node.post_gmm_encode(data) if split == 'train': p = node.kmeans.pred elif split == 'test': p = node.gmm_predict_test(Z, threshold) """ plot the count of unassigned vs assigned labels purple -- unassigned green -- assigned """ unassigned_labels = [0 for i in range(no_of_classes)] assigned_labels = [0 for i in range(no_of_classes)] for i in range(len(p)): if p[i] == 2: unassigned_labels[labels[i]] += 1 else: assigned_labels[labels[i]] += 1 barWidth = 0.3 r1 = np.arange(len(unassigned_labels)) r2 = [x + barWidth for x in r1] plt.bar(r1, unassigned_labels, width=barWidth, color='purple', edgecolor='black', capsize=7) plt.bar(r2, assigned_labels, width=barWidth, color='green', edgecolor='black', capsize=7) plt.xticks([r + barWidth for r in range(len(unassigned_labels))], [str(i) for i in range(no_of_classes)]) plt.ylabel('count') fig_assigned = plt.gcf() node.trainer.writer[split].add_figure( node.name + '_' + phase + '_assigned_labels_count', fig_assigned, iter_no) path_assign = Paths.get_result_path(node.name + '_' + split + '_assigned/' + phase + 'assigned_%03d' % (iter_no)) fig_assigned.savefig(path_assign) plt.close(fig_assigned) """ plot the percentage of assigned labels in cluster 0 and cluster 1 red -- cluster 0 blue -- cluster 1 """ l_seed_ch0 = labels[np.where(p == 0)] l_seed_ch1 = labels[np.where(p == 1)] count_ch0 = [0 for i in range(no_of_classes)] count_ch1 = [0 for i in range(no_of_classes)] prob_ch0 = [0 for i in range(no_of_classes)] prob_ch1 = [0 for i in range(no_of_classes)] for i in l_seed_ch0: count_ch0[i] += 1 for i in l_seed_ch1: count_ch1[i] += 1 for i in range(no_of_classes): if (count_ch0[i] + count_ch1[i]) != 0: prob_ch0[i] = count_ch0[i] * 1.0 / (count_ch0[i] + count_ch1[i]) prob_ch1[i] = count_ch1[i] * 1.0 / (count_ch0[i] + count_ch1[i]) else: prob_ch0[i] = 0 prob_ch1[i] = 0 plt.bar(r1, prob_ch0, width=barWidth, color='red', edgecolor='black', capsize=7) plt.bar(r2, prob_ch1, width=barWidth, color='blue', edgecolor='black', capsize=7) plt.xticks([r + barWidth for r in range(len(prob_ch0))], [str(i) for i in range(no_of_classes)]) plt.ylabel('percentage') fig_confidence = plt.gcf() node.trainer.writer[split].add_figure( node.name + '_' + phase + '_confidence', fig_confidence, iter_no) path_confidence = Paths.get_result_path(node.name + '_' + split + '_confidence/' + phase + 'confidence_%03d' % (iter_no)) fig_confidence.savefig(path_confidence) plt.close(fig_confidence) """ get count of points that exceed the threshold of phase 1 part 2 """ aboveThresholdLabels_ch0 = [0 for i in range(no_of_classes)] aboveThresholdLabels_ch1 = [0 for i in range(no_of_classes)] for i in range(len(p)): if p[i] == 0: if (distance.mahalanobis(Z[i], node.kmeans.means[0], node.kmeans.covs[0])) > threshold: aboveThresholdLabels_ch0[labels[i]] += 1 elif p[i] == 1: if (distance.mahalanobis(Z[i], node.kmeans.means[1], node.kmeans.covs[1])) > threshold: aboveThresholdLabels_ch1[labels[i]] += 1 plt.bar(r1, aboveThresholdLabels_ch0, width=barWidth, color='red', edgecolor='black', capsize=7) plt.bar(r2, aboveThresholdLabels_ch1, width=barWidth, color='blue', edgecolor='black', capsize=7) plt.xticks( [r + barWidth for r in range(len(aboveThresholdLabels_ch0))], [str(i) for i in range(no_of_classes)]) plt.ylabel('count') fig_above_threshold = plt.gcf() node.trainer.writer[split].add_figure( node.name + '_' + phase + '_above_threshold', fig_above_threshold, iter_no) path_above_threshold = Paths.get_result_path(node.name + '_' + split + '_above_threshold/' + phase + '%03d' % (iter_no)) fig_above_threshold.savefig(path_above_threshold) plt.close(fig_above_threshold)
def __init__(self, backup_root=None): if backup_root is None: backup_root = '/' Paths.__init__(self, join(backup_root, self.PATH))