def spot_it(request): if request.method == 'POST': form = WordListForm(request.POST) if form.is_valid(): words = form.cleaned_data.get("words") qs_lemmas = models.Lemma.objects.filter(lemma__in=words) qs_only_lemmas = qs_lemmas.distinct().values_list('lemma', flat=True) not_found_words = [w for w in list_diff(qs_only_lemmas, words) if w !=''] dict_lemmas = [lemma.to_dict() for lemma in qs_lemmas[:57]] return render(request, 'parser_tool/spot_it_start_game.html', {"data": json.dumps(dict_lemmas),"not_found_words": not_found_words}) else: logger.warning(f"errors: {form.errors}") return render(request, 'parser_tool/spot_it_options.html', {'form':form}) form = WordListForm() return render(request, 'parser_tool/spot_it_options.html', {'form':form})
def print_interpretation(interpretations, feature_name, metric_parameters, model_parameters): """Visualize the interpretations. Args: - interpretations: interpretations of each patient - temporal features: y-axis of the heatmap - metric_parameters: parameters for the problem and labels - model_parameters: parameters for the predictor model (concatenation) Returns: - Feature and temporal importance for each patient on heatmap """ label_name = metric_parameters['label_name'] # Define feature name temporal_features = feature_name['temporal'] static_features = feature_name['static'] if model_parameters['static_mode'] == 'concatenate': temporal_features = temporal_features + static_features if model_parameters['time_mode'] == 'concatenate': temporal_features = temporal_features + ['time'] if label_name[0] in temporal_features: temporal_features = list_diff(temporal_features, label_name) figs = [] # Generate heatmap for i in range(interpretations.shape[0]): fig = plt.figure(figsize=(8, 10)) plt.imshow(np.transpose(interpretations[i, :, :]), cmap='Greys_r') plt.xticks(np.arange(interpretations.shape[1])) plt.yticks(np.arange(interpretations.shape[2]), temporal_features) plt.colorbar() plt.clim(0, 1) plt.xlabel('Sequence Length', fontsize=10) plt.ylabel('Features', fontsize=10) plt.title('Feature and temporal importance for patient ID: ' + str(i + 1), fontsize=10) plt.show() fig.patch.set_facecolor('#f0f2f6') figs.append(fig) return figs
def validate(self, value): """Check if value contains MIN words.""" super().validate(value) # Check if words in the database value = [w for w in value if w != ''] qs_lemmas = Lemma.objects.filter( lemma__in=value).distinct().values_list('lemma', flat=True) logger.info( f"WordListField validate: {qs_lemmas} length={len(qs_lemmas)}") actual_words = len(qs_lemmas) if actual_words < self.min_words: not_found_words = list_diff(qs_lemmas, value) invalid_message = self.error_messages['invalid'].format( min_words=self.min_words, actual_words=actual_words, not_found_words="Not Found Words: " + ', '.join(not_found_words) if not_found_words else "") raise ValidationError(invalid_message, code='invalid')
def start_game_in_editor(self): if not (settings.ip == '127.0.0.1'): print("can not start the game in a remote machine") exit(0) unreal_pids_before_launch = utils.find_process_id_by_name("UE4Editor.exe") subprocess.Popen(self.cmd, shell=True) time.sleep(2) unreal_pids_after_launch = utils.find_process_id_by_name("UE4Editor.exe") diff_proc = [] # a list containing the difference between the previous UE4 processes # and the one that is about to be launched # wait till there is a UE4Editor process while not (len(diff_proc) == 1): time.sleep(3) diff_proc = (utils.list_diff(unreal_pids_after_launch, unreal_pids_before_launch)) settings.game_proc_pid = diff_proc[0] #time.sleep(30) client = airsim.MultirotorClient(settings.ip) connection_established = False connection_ctr = 0 # counting the number of time tried to connect # wait till connected to the multi rotor time.sleep(1) while not (connection_established): try: #os.system(self.press_play_file) time.sleep(2) connection_established = client.confirmConnection() except Exception as e: if (connection_ctr >= settings.connection_count_threshold and msgs.restart_game_count >= settings.restart_game_from_scratch_count_threshold): print("couldn't connect to the UE4Editor multirotor after multiple tries") print("memory utilization:" + str(psutil.virtual_memory()[2]) + "%") exit(0) if (connection_ctr == settings.connection_count_threshold): self.restart_game() print("connection not established yet") time.sleep(5) connection_ctr += 1 client = airsim.MultirotorClient(settings.ip) pass """
def simulate_graph(graph): """ Generate dynamics on graph """ # create system J = np.copy(nx.to_numpy_matrix(graph)) np.fill_diagonal(J, -1) D = np.zeros((J.shape[0],)) E = np.zeros((J.shape[0],)) I = np.ones((J.shape[0],)) # add input to nodes of zero in-degree zero_indgr = [] for i, row in enumerate(J.T): inp = np.sum(row) inp += 1 # compensate for self-inhibition if inp == 0: zero_indgr.append(i) D[zero_indgr] = 1 E[zero_indgr] = 1 print('>', '{}/{} nodes with zero indegree'.format(len(zero_indgr), len(graph.nodes()))) # simulate system syst = SDESystem(J, D, E, I) syst, mat, sol = analyze_system(syst, filter_trivial_ss=False) # plot results fig = plt.figure(figsize=(30, 15)) gs = mpl.gridspec.GridSpec(1, 2, width_ratios=[1, 2]) if not mat is None: # only keep non-zero indegree node correlations mat = extract_sub_matrix(mat, zero_indgr) node_inds = list_diff(range(J.shape[0]), zero_indgr) used_nodes = np.array(graph.nodes())[node_inds] plot_corr_mat( mat, plt.subplot(gs[0]), show_values=False, labels=used_nodes) plot_system_evolution(sol, plt.subplot(gs[1]), show_legend=False) save_figure('images/peak_network_simulation.pdf', bbox_inches='tight', dpi=300)
def fit(self, dataset): """Fit the ensemble models for uncertainty estimation. Args: - dataset: temporal, static, label, time, treatment information Returns: - self.ensemble_model: trained ensemble model """ # Define model parameters self.model_parameters = { "h_dim": self.h_dim, "n_head": self.n_head, "n_layer": self.n_layer, "batch_size": self.batch_size, "epoch": self.epoch, "learning_rate": self.learning_rate, "model_type": self.model_type, "static_mode": self.static_mode, "time_mode": self.time_mode, "verbose": self.verbose, } # Define ensemble model type if self.model_parameters["model_type"] in self.ensemble_model_type: self.ensemble_model_type = list_diff( self.ensemble_model_type, [self.model_parameters["model_type"]]) # Initialize ensemble model to the currently trained model self.ensemble_model = [self.predictor_model] for each_model_type in self.ensemble_model_type: self.model_parameters["model_type"] = each_model_type pred_class = prediction(each_model_type, self.model_parameters, self.task) pred_class.fit(dataset) self.ensemble_model = self.ensemble_model + [pred_class] return
def greedy_feature_selection(self, dataset, feature_selection_model): """Select subset of the features in greedy way. Args: - dataset: dataset with subset of temporal features - feature_selection_model: 'addition' or 'deletion' Returns: - selected_feature_index: selected feature index """ assert feature_selection_model in ['addition', 'deletion'] if self.model_parameters['static_mode'] != 'concatenate': assert self.feature_type == 'temporal' # Save original data if self.feature_type == 'temporal': ori_temporal_feature = dataset.temporal_feature.copy() # Parameters no, seq_len, dim = ori_temporal_feature.shape elif self.feature_type == 'static': ori_static_feature = dataset.static_feature.copy() # Parameters no, dim = ori_static_feature.shape ## Initialization # Entire feature set feature_set = [i for i in range(dim)] # Save results in dictionary result = dict() # Greedy way of evaluating the importance of each feature for f in tqdm(feature_set): # For addition option, just select certain feature if feature_selection_model == 'addition': temp_feature = [f] # For deletion option, remove certain feature from the entire feature set elif feature_selection_model == 'deletion': temp_feature = list_diff(feature_set, [f]) # Set the temporal data only with the subset of features if self.feature_type == 'temporal': dataset.temporal_feature = ori_temporal_feature[:, :, temp_feature].copy( ) elif self.feature_type == 'static': dataset.static_feature = ori_static_feature[:, temp_feature].copy( ) # Model train and test result[f] = self.model_predict_and_evaluate(dataset) # For deletion, worse performance represents better. if feature_selection_model == 'deletion': result[f] = -result[f] # Select top feature_number features by result if self.metric_name in ['auc', 'apr']: selected_feature_index = sorted(result, key=result.get, reverse=True)[:self.feature_number] elif self.metric_name in ['mse', 'mae']: selected_feature_index = sorted( result, key=result.get, reverse=False)[:self.feature_number] # Recover the original data if self.feature_type == 'temporal': dataset.temporal_feature = ori_temporal_feature elif self.feature_type == 'static': dataset.static_feature = ori_static_feature return selected_feature_index
def recursive_feature_selection(self, dataset, feature_selection_model): """Select subset of the features in recursive way. Args: - dataset: dataset with subset of temporal features - feature_selection_model: 'addition' or 'deletion' Returns: - curr_set: selected feature index """ assert feature_selection_model in ['addition', 'deletion'] if self.model_parameters['static_mode'] != 'concatenate': assert self.feature_type == 'temporal' # Save original data if self.feature_type == 'temporal': ori_temporal_feature = dataset.temporal_feature.copy() # Parameters no, seq_len, dim = ori_temporal_feature.shape elif self.feature_type == 'static': ori_static_feature = dataset.static_feature.copy() # Parameters no, dim = ori_static_feature.shape ## Initialization # Entire feature set feature_set = [i for i in range(dim)] # current feature set. # Deletion starts from the entire feature set if feature_selection_model == 'deletion': curr_set = [i for i in range(dim)] iterations = dim - self.feature_number # Addition starts from empty set elif feature_selection_model == 'addition': curr_set = list() iterations = self.feature_number # Iterate until the number of selected features = feature_number for i in tqdm(range(iterations)): # Save results in dictionary result = dict() # For each feature for f in tqdm(feature_set): # For addition option, just select certain feature if feature_selection_model == 'addition': temp_feature = curr_set + [f] # For deletion option, remove certain feature from the entire feature set elif feature_selection_model == 'deletion': temp_feature = list_diff(curr_set, [f]) # Set the temporal data only with the subset of features if self.feature_type == 'temporal': dataset.temporal_feature = ori_temporal_feature[:, :, temp_feature].copy( ) elif self.feature_type == 'static': dataset.static_feature = ori_static_feature[:, temp_feature].copy( ) # Model training and testing result[f] = self.model_predict_and_evaluate(dataset) # Find the feature with best performance if self.metric_name in ['auc', 'apr']: temp_feature_index = max(result, key=result.get) elif self.metric_name in ['mse', 'mae']: temp_feature_index = min(result, key=result.get) # Recursively set the current feature set if feature_selection_model == 'deletion': curr_set = list_diff(curr_set, [temp_feature_index]) if feature_selection_model == 'addition': curr_set = curr_set + [temp_feature_index] # Remove the selected feature from the entire feature set feature_set = list_diff(feature_set, [temp_feature_index]) # Recover the original data if self.feature_type == 'temporal': dataset.temporal_feature = ori_temporal_feature elif self.feature_type == 'static': dataset.static_feature = ori_static_feature return curr_set
def remaining_cities(self): return list_diff(self.all_cities, self.full_route)