def exportResults(self): if self.solver is not None: utils.export_results(pixels=self.solver.pixels, shift_x=self.solver.shift_x, shift_y=self.solver.shift_y, shift_p=self.solver.shift_p, shift_x_y_error=self.solver.shift_x_y_error, box_shift=self.solver.box_shift, fps=self.solver.fps, res=self.solver.res, output_basepath=self.output_basepath, boxes_dict=self.boxes_dict, start_frame=self.solver.start_frame) print("Files exported.")
def evaluate(experiment_dir, args): """ Evaluate the model stored in the given directory. It loads the latest available checkpoint and iterates over the test set. Args: experiment_dir: The model directory. args: Commandline arguments. """ gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9, allow_growth=True) with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess: test_model, test_data, config, means, vars, stand = create_and_restore_test_model( sess, experiment_dir, args) print("Evaluating test set ...") eval_result = evaluate_model(sess, test_model, test_data, means, vars, stand) if args.export: # Export the results into a csv file that can be submitted. fname = os.path.join( experiment_dir, "predictions_in{}_out{}.csv".format(config['source_seq_len'], config['target_seq_len'])) export_results(eval_result, fname) # Export a zip file containing the code that generated the results. code_files = glob.glob('./*.py', recursive=False) export_code(code_files, os.path.join(experiment_dir, 'code.zip')) if args.visualize: # Visualize the seed sequence and the prediction for some random samples in the test set. fk_engine = SMPLForwardKinematics() visualizer = Visualizer(fk_engine) n_samples_viz = 10 rng = np.random.RandomState(42) idxs = rng.randint(0, len(eval_result), size=n_samples_viz) sample_keys = [list(sorted(eval_result.keys()))[i] for i in idxs] for k in sample_keys: visualizer.visualize(eval_result[k][1], eval_result[k][0], title=k)
from sklearn.neural_network import MLPClassifier from utils import split_feats_targs, capture_features, capture_targets, export_results (train_features, train_targets) = split_feats_targs( 'train_1.csv') # pass training set with targets test_features = capture_features('test_no_label_1.csv', False) # pass test set without targets actual_targets = capture_targets( 'test_with_label_1.csv') # pass test set with targets fitted_mlp = MLPClassifier(activation='logistic', solver='sgd').fit( train_features, train_targets) # fits model with training set values predicted_targets = list(fitted_mlp.predict( test_features)) # gets predictions from model and record them export_results(actual_targets, predicted_targets, 'Base-MLP-DS1.csv')
from sklearn.model_selection import GridSearchCV """ Store the necessary training/test set values into variables """ (train_features, train_targets) = split_feats_targs('train_1.csv') # pass training set with targets test_features = capture_features('test_no_label_1.csv', False) # pass test set without targets actual_targets = capture_targets('test_with_label_1.csv') # pass test set with targets """ Run GNB model """ fitted_gnb = GaussianNB().fit(train_features, train_targets) # fit model with training set values predicted_targets = list(fitted_gnb.predict(test_features)) # get predictions from model and record them export_results(actual_targets, predicted_targets, 'GNB-DS1.csv') """ Run PER model """ fitted_per = Perceptron().fit(train_features, train_targets) # fit model with training set values predicted_targets = list(fitted_per.predict(test_features)) # get predictions from model and record them export_results(actual_targets, predicted_targets, 'PER-DS1.csv') """ Run BaseDT model """ fitted_baseDT = DecisionTreeClassifier(criterion='entropy').fit(train_features, train_targets) # fit model with training set values predicted_targets = list(fitted_baseDT.predict(test_features)) # get predictions from model and record them export_results(actual_targets, predicted_targets, 'Base-DT-DS1.csv')
""" Parameter options to tune: • splitting criterion: gini and entropy • maximum depth of the tree: 10 and no maximum • minimum number of samples to split an internal node: experiment with values of your choice • minimum impurity decrease: experiment with values of your choice • class weight: None and balanced """ print("Finding best hyperparameters for DT....") best_dt = GridSearchCV(DecisionTreeClassifier(), { 'criterion': ['gini', 'entropy'], 'max_depth': [10, None], 'min_samples_split': [2,3,5], 'min_impurity_decrease': [0.0, 1e-250, 1e-900], 'class_weight': [None, 'balanced'] }, return_train_score = False, n_jobs = -1) best_dt.fit(val_features, val_targets) best_params = best_dt.best_params_ # records best found params from gridsearch print("Best hyperparameters for DT:") print(best_params) print("\n") best_dt = DecisionTreeClassifier(criterion = best_params['criterion'], max_depth = best_params['max_depth'], min_samples_split=best_params['min_samples_split'], min_impurity_decrease=best_params['min_impurity_decrease'], class_weight = best_params['class_weight']) fitted_dt = best_dt.fit(train_features, train_targets) # fits model with training set values predicted_targets = list(fitted_dt.predict(test_features)) # gets predictions from model and record them export_results(actual_targets, predicted_targets, 'Best-DT-DS2.csv')
from sklearn.linear_model import Perceptron from utils import split_feats_targs, capture_features, capture_targets, export_results (train_features, train_targets) = split_feats_targs( 'train_2.csv') # pass training set with targets test_features = capture_features('test_no_label_2.csv', False) # pass test set without targets actual_targets = capture_targets( 'test_with_label_2.csv') # pass test set with targets fitted_per = Perceptron().fit( train_features, train_targets) # fits model with training set values predicted_targets = list(fitted_per.predict( test_features)) # gets predictions from model and record them export_results(actual_targets, predicted_targets, 'PER-DS2.csv')
if config.model_name == 'frozen-bert-gmax': model = models.create_bert_GloMaxPoo(config) elif config.model_name == 'frozen-bert-crnn-gmaxpool': model = models.create_bert_CRnnMax(config) elif config.model_name == 'frozen-bert-rnnatt': model = models.create_bert_rnn_att(config) elif config.model_name == 'frozen-bert-pos-fuse-rnnatt': model = models.create_bert_pos_fuse(config) else: print('Model type not found.') exit() model, history = train(config, model, X_train, y_train, X_devel, y_devel, y_train_df) utils.export_results(model, config, X_train, X_devel, X_test, y_train, y_devel, y_test) utils.visualise_training(config, history) features_dic = features.output_model_features(config, task_name, model, data_dict, df) features.score_features_with_SVM( config=config, experiment_name=config.experiment_name, task_name=task_name, features_dic=features_dic, labels_df_cat=labels_df_cat, label=label) features.feature_fusion_and_scoring(config=config,
Store the necessary training/test set values into variables """ (train_features, train_targets) = split_feats_targs('<demo_training_set_file_name>') test_features = capture_features( '<demo_test_set_file_name>', False) # pass False if test set has no targets, otherwise pass True actual_targets = capture_targets('<demo_test_set_w_targets_file_name>') """ Run GNB model """ fitted_gnb = GaussianNB().fit( train_features, train_targets) # fit model with training set values predicted_targets = list(fitted_gnb.predict( test_features)) # get predictions from model and record them export_results(actual_targets, predicted_targets, '<demo_output_file_name>') """ Run PER model """ fitted_per = Perceptron().fit( train_features, train_targets) # fit model with training set values predicted_targets = list(fitted_per.predict( test_features)) # get predictions from model and record them export_results(actual_targets, predicted_targets, '<demo_output_file_name>') """ Run BaseDT model """ fitted_baseDT = DecisionTreeClassifier(criterion='entropy').fit( train_features, train_targets) # fit model with training set values predicted_targets = list(fitted_baseDT.predict( test_features)) # get predictions from model and record them
from utils import load_data_dir, export_results from solutions import PracticeSolution """ Google Hashcode 2020 - Practice Round! """ data_set = [] solution_set = [] # Load our data set and format it properly using the keymap below. for data in load_data_dir('data/practice', extension='.in', delimiter=' '): data_dict = { 'set_name': data[0], 'max_weight': int(data[1][0]), 'set_size': int(data[1][1]), 'values': [(i, int(x)) for i, x in enumerate(data[2])] } data_set.append(data_dict) # Run our solution code. for data in data_set: if 'also_big' in data['set_name']: s = PracticeSolution(data) solution_set.append(s.fast_solution()) # Check each file in the solution set and their scores. for s in solution_set: print(f"Name: {s['set_name']}, Score: {s['score'] / s['max_weight']}") # Write our results to file. export_results('results', solution_set)
from sklearn.naive_bayes import GaussianNB from utils import split_feats_targs, capture_features, capture_targets, export_results (train_features, train_targets) = split_feats_targs('train_1.csv') # pass training set with targets test_features = capture_features('test_no_label_1.csv', False) # pass test set without targets actual_targets = capture_targets('test_with_label_1.csv') # pass test set with targets fitted_gnb = GaussianNB().fit(train_features, train_targets) # fits model with training set values predicted_targets = list(fitted_gnb.predict(test_features)) # gets predictions from model and record them export_results(actual_targets, predicted_targets, 'GNB-DS1.csv')
timer = core.Clock() keys = event.waitKeys(keyList=["d", "k"]) response_time = timer.getTime() key_pressed = keys[0] correct_key = "d" if trial["center_stimulus_color"] == trial["left_stimulus_word"] else "k" trail_result = { "correct": key_pressed == correct_key, "response_time": response_time, "congruent": trial["center_stimulus_color"] == trial["center_stimulus_word"] } trial_results.append(trail_result) event.clearEvents() window.flip(clearBuffer=True) return trial_results if __name__ == "__main__": participant_info = get_participant_info() window = create_window() write_on_screen("introduction") write_on_screen("start_practice") start_practice_trials() write_on_screen("start_experiment") trial_results = start_experiment() path_to_results = export_results(trial_results, participant_info, config["dir_to_store_results"]) write_on_screen("experiment_done", string_to_insert=path_to_results)
def main(Param): train_df, val_df, test_df = read_text_file() if args.fine_tuning: weights_list = class_weights(train_df) model = ClassificationModel(args.model_type, args.model_name, num_labels=3, weight=weights_list, use_cuda=CUDA, args=Param) model.train_model(train_df, eval_df=val_df, f1=f1, uar=uar, verbose=False) model = ClassificationModel(args.model_type, Param['best_model_dir'], num_labels=3, use_cuda=CUDA) evaluate(model, train_df, val_df, test_df) extract_dutch_bert_embedding(train_df, 'train', Param['best_model_dir']) extract_dutch_bert_embedding(val_df, 'devel', Param['best_model_dir']) extract_dutch_bert_embedding(test_df, 'test', Param['best_model_dir']) else: extract_dutch_bert_embedding(train_df, 'train') extract_dutch_bert_embedding(val_df, 'devel') extract_dutch_bert_embedding(test_df, 'test') if args.fusion_type == 'rnnatt': config = set_config() X_train, y_train = load_feature_vectors(train_df, config) X_devel, y_devel = load_feature_vectors(val_df, config) X_test, _ = load_feature_vectors(test_df, config) y_train_df = train_df['label'] model = models.create_bert_rnn_att(config) model, history = train_model(config, model, X_train, y_train, X_devel, y_devel, y_train_df) export_results(model, config, X_train, X_devel, y_train, y_devel) output_model_features(model, data={ 'train': X_train, 'devel': X_devel, 'test': X_test }, df={ 'train': train_df, 'devel': val_df, 'test': test_df }) # to be save - free memory del model torch.cuda.empty_cache()