def generate_configs_pubmed(expdir, dataname, model_name, gpu): """Generate configs for all.""" # create experiment dir config_dir = os.path.join(expdir, ''.join([dataname, '/configs'])) utils.makedir(config_dir) # default setting default_config_path = 'configs/default.yaml' with open(default_config_path, 'r') as stream: default_config = utils._ordered_load(stream) # read config for specific task specific_config = read_specific_config(model_name) # generate config for each task task_config = default_config.copy() task_config['gpu'] = gpu task_config['task_name'] = task_config['task_name'].replace( 'cg', model_name) task_config['model_path'] = task_config['model_path'].replace( 'cg', model_name) task_config['saved_params'] = task_config['saved_params'].replace( 'cg', model_name) task_config['ev_eval_script_path'] = task_config[ 'ev_eval_script_path'].replace('cg', model_name) # for raw text predict_test_config = task_config.copy() gen_predict_config_pubmed(predict_test_config, specific_config, config_dir, expdir, dataname) print('Generate configs: Done!') return
def run(ts_path, output_path, savefigure, faster=False): x = np.loadtxt(ts_path, delimiter=',') alphas = np.arange(0.1, 0.8, 0.2) output_path = join(output_path, 'hypergraph_parcellation') makedir(output_path) hypergraph_list = [] for alpha in alphas: alpha = round(alpha, 2) print('Computing a HyperGraph with ' + str(alpha) + ' sparse level') output_file = join(output_path, '_hypergraph_' + str(alpha) + '.txt') if not path.exists(output_file): hypergraph = compute_hypergraph_elastic_net(time_series=x, alpha=alpha, savefigure=savefigure + str(alpha) + '.png') np.savetxt(output_file, hypergraph, delimiter=',', fmt='%i') else: hypergraph = np.loadtxt(output_file, delimiter=',') hypergraph_list.append(hypergraph) np_hypergraph_list = np.asarray(hypergraph_list).astype(np.int8) print(np_hypergraph_list.shape) median_h = np.median(np_hypergraph_list, axis=0) figure = plt.figure(figsize=(6, 6)) plotting.plot_matrix(median_h, figure=figure, vmax=1., vmin=-1.) figure.savefig(savefigure + 'median.png', dpi=200) plt.close(figure) return hypergraph_list
def plot_pie(target, prefix, path_save, class_map=None, verbose=False): """ Generate a pie chart of activity class distributions :param target: a list of activity labels corresponding to activity data segments :param prefix: data split, can be train, val or test :param path_save: path for saving the activity distribution pie chart :param class_map: a list of activity class names :param verbose: :return: """ if not os.path.exists(path_save): makedir(path_save) if not class_map: class_map = [str(idx) for idx in range(len(set(target)))] color_map = sns.color_palette( "husl", n_colors=len(class_map)) # a list of RGB tuples target_dict = { label: np.sum(target == label_idx) for label_idx, label in enumerate(class_map) } target_count = list(target_dict.values()) if verbose: print(f"[-] {prefix} target distribution: {target_dict}") print("--" * 50) fig, ax = plt.subplots() ax.axis("equal") explode = tuple(np.ones(len(class_map)) * 0.05) patches, texts, autotexts = ax.pie( target_count, explode=explode, labels=class_map, autopct="%1.1f%%", shadow=False, startangle=0, colors=color_map, wedgeprops={ "linewidth": 1, "edgecolor": "k" }, ) box = ax.get_position() ax.set_position([box.x0, box.y0, box.width * 0.8, box.height]) # ax.set_title(dataset) ax.legend(loc="center left", bbox_to_anchor=(1.2, 0.5)) plt.tight_layout() # plt.show() save_name = os.path.join(path_save, prefix + ".png") fig.savefig(save_name, bbox_inches="tight") plt.close()
def plot_segment(data, target, index, prefix, path_save, num_class, target_pred=None, class_map=None): """ Plot a data segment with corresonding activity label :param data: data segment :param target: ground-truth activity label corresponding to data segment :param index: index of segment in dataset :param prefix: data split, can be train, val or test :param path_save: path for saving the generated plot :param num_class: number of activity classes :param target_pred: predicted activity label corresponding to data segment :param class_map: a list of activity class names :return: """ if not os.path.exists(path_save): makedir(path_save) if not class_map: class_map = [str(idx) for idx in range(num_class)] gt = int(target) title_color = "black" if target_pred is not None: pred = int(target_pred) msg = f"#{int(index)} ground-truth:{class_map[gt]} prediction:{class_map[pred]}" title_color = "green" if gt == pred else "red" else: msg = "#{int(index)} ground-truth:{class_map[gt]} " fig, ax = plt.subplots(figsize=(5, 2)) ax.plot(data.numpy()) ax.set_xlim(0, data.shape[0]) ax.set_ylim(-5, 5) ax.set_title(msg, color=title_color) plt.tight_layout() save_name = os.path.join( path_save, prefix + "_" + class_map[int(target)] + "_" + str(int(index)) + ".png", ) fig.savefig(save_name, bbox_inches="tight") plt.close()
def __init__(self, dataroot, videolist, video_len, input_shape, every_nth, crop): self.dataroot = dataroot with open(videolist, 'r') as f: self.lines = f.readlines() self.video_len = video_len self.every_nth = every_nth self.crop = crop self.classes = [ 'boxing', 'handwaving', 'handclapping', 'running', 'jogging', 'walking' ] self.image_size = input_shape.width self.lengths = [] self.cases = [] self.cacheroot = os.path.join(self.dataroot, 'npy_%s' % self.image_size) makedir(self.cacheroot) cache = os.path.join( self.cacheroot, 'cache_%s.db' % videolist.split('/')[-1].split('_')[0]) if cache is not None and os.path.exists(cache): with open(cache, 'r') as f: self.cases, self.lengths = pickle.load(f) else: for idx, line in enumerate( tqdm.tqdm(self.lines, desc="Counting total number of frames")): video_name, start_idx, end_idx = line.split() start_idx, end_idx = int(start_idx), int(end_idx) if end_idx - start_idx > video_len * every_nth: self.lengths.append(end_idx - start_idx + 1) self.cases.append(line) video_path = os.path.join(self.dataroot, video_name + '_uncomp.avi') video = self.load_video(video_path, start_idx - 1, end_idx - 1) np.save( os.path.join( self.cacheroot, video_name + '_%d_%d.npy' % (start_idx, end_idx)), video) if cache is not None: with open(cache, 'w') as f: pickle.dump((self.cases, self.lengths), f) self.cumsum = np.cumsum([0] + self.lengths) print "Total number of frames {}".format(np.sum(self.lengths))
def generate_configs(taskdir, task, gpu): """Generate configs for all.""" # create experiment dir config_dir = os.path.join(taskdir, 'configs') utils.makedir(config_dir) # default setting default_config_path = 'configs/default.yaml' with open(default_config_path, 'r') as stream: default_config = utils._ordered_load(stream) # read config for specific task specific_config = read_specific_config(task) # generate config for each task task_config = default_config.copy() task_config['gpu'] = gpu task_config['task_name'] = task_config['task_name'].replace('cg', task) task_config['model_path'] = task_config['model_path'].replace('cg', task) task_config['saved_params'] = task_config['saved_params'].replace( 'cg', task) task_config['ev_eval_script_path'] = task_config[ 'ev_eval_script_path'].replace('cg', task) # predict config predict_dev_config = task_config.copy() gen_predict_config(predict_dev_config, specific_config, 'dev', config_dir, task, taskdir) predict_test_config = task_config.copy() gen_predict_config(predict_test_config, specific_config, 'test', config_dir, task, taskdir) # for raw text predict_test_config = task_config.copy() gen_predict_config(predict_test_config, specific_config, 'raw-text', config_dir, task, taskdir) print('Generate configs: Done!') return
def __init__( self, model, dataset, input_dim, hidden_dim, filter_num, filter_size, enc_num_layers, enc_is_bidirectional, dropout, dropout_rnn, dropout_cls, activation, sa_div, num_class, train_mode, experiment, ): super(AttendDiscriminate, self).__init__() self.experiment = f"train_{experiment}" if train_mode else experiment self.model = model self.dataset = dataset self.hidden_dim = hidden_dim print(paint(f"[STEP 3] Creating {self.model} HAR model ...")) self.fe = FeatureExtractor( input_dim, hidden_dim, filter_num, filter_size, enc_num_layers, enc_is_bidirectional, dropout, dropout_rnn, activation, sa_div, ) self.dropout = nn.Dropout(dropout_cls) self.classifier = Classifier(hidden_dim, num_class) self.register_buffer("centers", (torch.randn(num_class, self.hidden_dim).cuda())) # do not create log directories if we are only testing the models module if experiment != "test_models": if train_mode: makedir(self.path_checkpoints) makedir(self.path_logs) makedir(self.path_visuals)
def plot_confusion(y_true, y_pred, path_save, epoch, normalize=True, cmap=plt.cm.Blues, class_map=None): """ Plot the confusion matrix :param y_true: a list of ground-truth activity labels :param y_pred: a list of predicted activity labels :param path_save: path for saving the generated confusion matrix :param epoch: epoch corresponding to the generated confusion matrix :param normalize: normalize the values :param cmap: colormap for the confusion matrix :param class_map: a list of activity class names :return: """ if not os.path.exists(path_save): makedir(path_save) # Compute confusion matrix cm = confusion_matrix(y_true, y_pred) # Only use the labels that appear in the data if not class_map: class_map = [str(idx) for idx in range(len(set(y_true)))] if normalize: cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] fig, ax = plt.subplots(figsize=(6, 6)) im = ax.imshow(cm, interpolation="nearest", cmap=cmap) ax.set( xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), xticklabels=class_map, yticklabels=class_map, title="Epoch {epoch}", ylabel="True label", xlabel="Predicted label", ) plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") fmt = ".1f" if normalize else "d" thresh = cm.max() / 2.0 for i in range(cm.shape[0]): for j in range(cm.shape[1]): ax.text( j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black", ) high, low = ax.get_ylim() ax.set_ylim(high + 0.5, low - 0.5) fig.tight_layout() plt.tight_layout() plt.savefig(os.path.join(path_save, "cm_" + str(epoch) + ".png"), bbox_inches="tight") # plt.show() plt.close()
detrend=True, standardize=True, low_pass=0.08, high_pass=0.009, t_r=2, confounds=confunds_path, ensure_finite=True, mask_img=nmi_brain_mask_path) nib.save(image_cleaned, fmri_cleaned_path) else: print('Image cleaned found') image_cleaned = nib.load(fmri_cleaned_path) folder_output = join(preprocessing_path, subject, 'parcellation_from_lasso') time_series_path = join(folder_output, 'time_series.txt') makedir(folder_output) if not path.exists(time_series_path): time_series = np.transpose(np.asarray(change_resolution(image_cleaned.get_data(), gm_data))) np.savetxt(time_series_path, time_series, delimiter=',', fmt='%10.2f') print('Time series Shape: ' + str(time_series.shape)) hypergraphs = run(time_series_path, folder_output, savefigure=join(folder_output, 'hypergraph_'), faster=True) hypergraphs_list.append(hypergraphs) np_hypergraph = np.asarray(hypergraphs_list).astype(np.int8) print(np_hypergraph.shape) median_hypergraph = np.mean(np.mean(np_hypergraph, axis=1), axis=0) figure = plt.figure(figsize=(6, 6)) plotting.plot_matrix(median_hypergraph, figure=figure, reorder=False, cmap='Greys')