def unify_dir(in_path,out_path): all_files=os.listdir(in_path) def is_file(f): return os.path.isfile(os.path.join(in_path,f)) dataset_paths=filter(is_file ,all_files) dataset_paths=[in_path+ path for path in dataset_paths] print(dataset_paths) new_dataset=dataset.annotated_to_dataset(dataset_paths[0]) for i in range(1,len(dataset_paths)): partial_dataset=dataset.annotated_to_dataset(dataset_paths[i]) new_dataset=unify(new_dataset,partial_dataset) return new_dataset
print(cf_matrix) def show_error(dataset,clf): y_true, y_pred = dataset.y, clf.predict(dataset.X) result=(y_pred==y_true) for i,y in enumerate(result): if(not y): label=y_true[i] label_pred=y_pred[i] person=dataset.anno[i] print(str(i)+" "+str(label)+" "+str(label_pred)+" "+str(person)) def parse_args(args): if(len(args)>1): random=int(args[1]) random=bool(random) else: random=False return random if __name__ == "__main__": random=parse_args(sys.argv) if(random): in_path="../af/cascade4/full_dataset"#"../af/result/full_dataset" dataset=dataset.annotated_to_dataset(in_path)#labeled_to_dataset(in_path) random_eval(dataset) else: train_path="../af/cascade4/full_dataset_train" test_path="../af/cascade4/full_dataset_test" determistic_eval(train_path,test_path,False)