def parse_args(): """This function parses and return arguments passed in """ descr = 'Plot the Bifurcation Diagram of Logistic, Cubic, and Sine Maps' examples = ''' %(prog)s -r 1:4 %(prog)s -r 4:6.5 --map=cubic %(prog)s --map=sine -s 200 -n 200 %(prog)s -r 3.:4. -s 500 -n 600 %(prog)s -r 3.5:3.6 -y .3:.6 -s 800 -n 1000''' parser = argparser(descr, examples) # By default, make 300 iterations (n) and do no plot the first 200 ones (s) # By default select the Logistic Equation parser.add_argument( "-r", "--rate", action="store", dest="r", help="range of the growth rate parameter (default: the entire range)") parser.add_argument( "-y", "--people", action="store", dest="y", help="normalized range of the population (default: the entire range)") parser.add_argument( "-s", "--skip", action="store", dest="s", type=int, default=200, help="skip plotting the first 's' iterations (default: %(default)s)") parser.add_argument("-n", "--steps", action="store", dest="n", type=int, default=100, help="number of iterations (default: %(default)s)") parser.add_argument( "-m", "--map", action="store", dest="map_name", default="logistic", choices=["logistic", "cubic", "sine"], help="select the desired map (logistic, cubic, or sine)") return parser.parse_args()
def parse_args(): """This function parses and return arguments passed in """ descr = 'Plot of Logistic Equation Time Series' examples = ''' # time series with a stable fixed point %(prog)s -0 0.4 -r 3.2 -n 50 %(prog)s -0 0.4 -1 0.45 -r 3.2 -n 50 # chaotic results (randon output) %(prog)s --x0 0.2 --x1 0.2000001 -r 4.0 -n 50 %(prog)s -0 0.2 -r 3.6 -n 5000 --dots-only %(prog)s -0 0.9 -r 4.5 -n 50 --map=cubic %(prog)s -0 0.4 -r 0.8 -n 50 --map=sine''' parser = argparser(descr, examples) # By default select the Logistic Equation parser.add_argument( "-0", "--x0", action="store", dest="x0", type=float, required=True, help="1st initial condition") parser.add_argument( "-1", "--x1", action="store", dest="x1", type=float, help="2nd initial condition (optional)") parser.add_argument( "-d", "--dots-only", action="store_true", dest="dotsonly", help="do not connect the dots with lines (default: %(default)s)") parser.add_argument( "-r", "--rate", action="store", dest="r", type=float, required=True, help="growth rate parameter") parser.add_argument( "-s", "--skip", action="store", dest="s", type=int, default=0, help="skip plotting the first 's' iterations") parser.add_argument( "-n", "--steps", action="store", dest="n", type=int, required=True, help="number of iterations") parser.add_argument( "-m", "--map", action="store", dest="map_name", default="logistic", choices = ["logistic", "cubic", "sine"], help = "select the desired map (logistic, cubic, or sine)") return parser.parse_args()
def main(): args = argparser()["image"] image, orig, ratio = loadimg(args) gray, edged = detectedge(image) show(image) show(edged) image, screenCnt = findcontour(image, edged) show(image) warped = warping(orig, screenCnt, ratio) showcomp(orig, warped) save(args, warped)
def parse_args(): """This function parses and return arguments passed in""" descr = ("Plot m mod n for 0 <= m,n <= SIZE.\n" "Optionally save the plot to a PNG file") parser = argparser(descr) parser.add_argument( "-s", "--size", action="store", dest="size", type=int, default=500, help="range of the mod values to be plotted (default: %(default)s)") parser.add_argument("-f", "--outfile", action="store", dest="outfile", help="name of the output PNG file") return parser.parse_args()
def parse_args(): """This function parses and return arguments passed in """ descr = 'Plot the Bifurcation Diagram of Logistic, Cubic, and Sine Maps' examples = ''' %(prog)s -r 1:4 %(prog)s -r 4:6.5 --map=cubic %(prog)s --map=sine -s 200 -n 200 %(prog)s -r 3.:4. -s 500 -n 600 %(prog)s -r 3.5:3.6 -y .3:.6 -s 800 -n 1000''' parser = argparser(descr, examples) # By default, make 300 iterations (n) and do no plot the first 200 ones (s) # By default select the Logistic Equation parser.add_argument( "-r", "--rate", action="store", dest="r", help="range of the growth rate parameter (default: the entire range)") parser.add_argument( "-y", "--people", action="store", dest="y", help="normalized range of the population (default: the entire range)") parser.add_argument( "-s", "--skip", action="store", dest="s", type=int, default=200, help="skip plotting the first 's' iterations (default: %(default)s)") parser.add_argument( "-n", "--steps", action="store", dest="n", type=int, default=100, help="number of iterations (default: %(default)s)") parser.add_argument( "-m", "--map", action="store", dest="map_name", default="logistic", choices=["logistic", "cubic", "sine"], help="select the desired map (logistic, cubic, or sine)") return parser.parse_args()
def parse_args(): """This function parses and return arguments passed in """ descr = 'Plot of the Final State Diagram' examples = ''' %(prog)s -r 3.492 %(prog)s -r 3.614 -s 200 -n 300 %(prog)s -0 0.4 -r 3.2 -s 10 -n 50 %(prog)s -0 0.8 -r 6.2 -n 20 --map=cubic''' parser = argparser(descr, examples) # By default, make 3000 iterations (n) and # do no plot the first 2000 ones (s) # By default select the Logistic Equation parser.add_argument( "-0", "--x0", action="store", dest="x0", type=float, default=.5, help="initial condition (default: %(default)s)") parser.add_argument( "-r", "--rate", action="store", dest="r", type=float, required=True, help="growth rate parameter") parser.add_argument( "-s", "--skip", action="store", dest="s", type=int, default=2000, help="skip plotting the first 's' iterations (default: %(default)s)") parser.add_argument( "-n", "--steps", dest="n", type=int, default=1000, action="store", help="number of iterations (default: %(default)s)") parser.add_argument( "-m", "--map", action="store", dest="map_name", default = "logistic", choices = ["logistic", "cubic", "sine"], help = "select the desired map (logistic, cubic, or sine)") return parser.parse_args()
import torch.nn.functional as F from torch import autograd from torch.utils.data import Dataset, DataLoader, TensorDataset from torch.optim.lr_scheduler import StepLR, MultiStepLR import data_load, BCP, utils if __name__ == "__main__": args = utils.argparser(data='mnist', epochs=60, warmup=1, rampup=20, batch_size=50, epsilon=1.58, epsilon_infty=0.1, epsilon_train=1.58, epsilon_train_infty=0.1, augmentation=False, lr=0.0003, lr_scheduler='multistep', wd_list=[21, 30, 40], gamma=0.1, opt_iter=10) print(datetime.now()) print(args) print('saving file to {}'.format(args.prefix)) setproctitle.setproctitle(args.prefix) train_log = open(args.prefix + "_train.log", "w") test_log = open(args.prefix + "_test.log", "w") train_loader, _ = data_load.data_loaders(args.data, args.batch_size,
import re import time from datetime import datetime import os import tensorflow as tf from utils import argparser, logging from train import train from test import test if __name__ == '__main__': FLAGS = argparser() logging(str(FLAGS), FLAGS) logdir = FLAGS.log_dir fout = open(FLAGS.out_file,'w') fout.write('global_step,timestamp,TP,FP,TN,FN,Sens,Spec,Prec,Acc,MCC,F1,AUC\n') fout.close() ### iterate over max_epoch for i in range( FLAGS.max_epoch ): logging("%s: Run epoch %d" % (datetime.now(), i), FLAGS) # for training FLAGS.is_training = True FLAGS.keep_prob = 0.7 FLAGS.log_dir = os.path.join( logdir, "train" ) train( FLAGS ) # for test
### torch modules import torch import torch.nn as nn import torch.optim as optim from torch.autograd import Variable from torchvision import datasets, transforms from torch.optim.lr_scheduler import StepLR, MultiStepLR import torch.nn.functional as F from torch import autograd from torch.utils.data import Dataset, DataLoader, TensorDataset from torch.optim.lr_scheduler import StepLR, MultiStepLR import utils, data_load, BCP if __name__ == "__main__": args = utils.argparser() print(args) setproctitle.setproctitle(args.prefix) test_log = open(args.prefix + "_test.log", "w") _, test_loader = data_load.data_loaders(args.data, args.test_batch_size, args.normalization, args.augmentation, args.drop_last, args.shuffle) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) args.print = False t = 100 aa = torch.load(args.test_pth)['state_dict'][0] model_eval = utils.select_model(args.data, args.model) model_eval.load_state_dict(aa) print('verification testing ...')
return resp.code == 206 def handle_request(url, nprocs, chunk_size, outfile, verify): try: request = urllib.request.Request(url, method='HEAD') response = urllib.request.urlopen(request) response.close() except Exception as e: print(e) sys.exit() etag = response.headers['Etag'] if accepts_range(url, response): multi_stream_download(url, nprocs, chunk_size, outfile) else: print('Server does not accept byte range requests. Reverting to a ' 'single stream download.') single_stream_download(url, chunk_size, outfile) if verify: verify_with_etag(etag, outfile) if __name__ == '__main__': args = argparser() handle_request(args.url, args.nprocs, args.chunk_size, args.outfile_path, args.verify_with_md5)
import torch.nn as nn import torch.optim as optim from torch.autograd import Variable from torchvision import datasets, transforms from torch.optim.lr_scheduler import StepLR, MultiStepLR import torch.nn.functional as F from torch import autograd from torch.utils.data import Dataset, DataLoader, TensorDataset from torch.optim.lr_scheduler import StepLR, MultiStepLR import utils, data_load, BCP if __name__ == "__main__": args = utils.argparser(data='mnist', epsilon=1.58, epsilon_infty=0.1, epochs=60, rampup=20, wd_list=[21, 30, 40]) print(args) setproctitle.setproctitle(args.prefix) test_log = open(args.prefix + "_test.log", "w") _, test_loader = data_load.data_loaders(args.data, args.test_batch_size, args.normalization, args.augmentation, args.drop_last, args.shuffle) torch.manual_seed(args.seed) torch.cuda.manual_seed(args.seed) args.print = True t = 100
from keras.optimizers import Adagrad from sklearn.model_selection import StratifiedKFold import os from load import load_all from preprocessing import prepro, weight_computation, get_prepared_data, \ train_val_splitting from define_metrics import Metrics_DDI_classification, multi_micro_f1_, \ get_callbacks_list from utils import argparser, combine_params from network_functions import define_network, train_network, get_predictions from calculate_metrics import get_metrics_cv if __name__ == '__main__': in_path, out_path, out_path_figure = argparser() #in_path = '/content/gdrive/My Drive/Master/Subjects/TFM/code/DOC/two_steps/second_step/' ## 1. Load embedding, position matrices and word2int_dict of train set, # train and test sets emb_matrix,pos_matrix,w2i_dict,VOCAB_SIZE,WV_VECTOR_SIZE,POS_VECTOR_LENGTH,\ MAX_SENTENCE_LENGTH,X_train,y_train,X_test,y_test = load_all(in_path) ## 2. Preprocessing label2int = {'INT': 0, 'ADVISE': 1, 'EFFECT': 2, 'MECHANISM': 3} X_train_i_pad, X_train_p1_pad, X_train_p2_pad, y_train_1h, y_train_i = \ prepro(X_train,y_train, w2i_dict, label2int, MAX_SENTENCE_LENGTH) X_test_i_pad, X_test_p1_pad, X_test_p2_pad, y_test_1h, y_test_i = \ prepro(X_test,y_test, w2i_dict, label2int, MAX_SENTENCE_LENGTH, test=True, y_trainset=y_train) original_train_data = y_train_i, y_train_1h, X_train_i_pad, X_train_p1_pad, X_train_p2_pad
def parse_args(): """This function parses and return arguments passed in """ descr = 'Plot of Logistic Equation Time Series' examples = ''' # time series with a stable fixed point %(prog)s -0 0.4 -r 3.2 -n 50 %(prog)s -0 0.4 -1 0.45 -r 3.2 -n 50 # chaotic results (randon output) %(prog)s --x0 0.2 --x1 0.2000001 -r 4.0 -n 50 %(prog)s -0 0.2 -r 3.6 -n 5000 --dots-only %(prog)s -0 0.9 -r 4.5 -n 50 --map=cubic %(prog)s -0 0.4 -r 0.8 -n 50 --map=sine''' parser = argparser(descr, examples) # By default select the Logistic Equation parser.add_argument("-0", "--x0", action="store", dest="x0", type=float, required=True, help="1st initial condition") parser.add_argument("-1", "--x1", action="store", dest="x1", type=float, help="2nd initial condition (optional)") parser.add_argument( "-d", "--dots-only", action="store_true", dest="dotsonly", help="do not connect the dots with lines (default: %(default)s)") parser.add_argument("-r", "--rate", action="store", dest="r", type=float, required=True, help="growth rate parameter") parser.add_argument("-s", "--skip", action="store", dest="s", type=int, default=0, help="skip plotting the first 's' iterations") parser.add_argument("-n", "--steps", action="store", dest="n", type=int, required=True, help="number of iterations") parser.add_argument( "-m", "--map", action="store", dest="map_name", default="logistic", choices=["logistic", "cubic", "sine"], help="select the desired map (logistic, cubic, or sine)") return parser.parse_args()
@author: antonio """ from utils import one_df_per_label, argparser from parse_ann import parse_ann import os from clean_ner_output import clean_ner_output from obtain_statistics import obtain_statistics from format_negation_uncertainty import integrate_negation_uncertainty import time if __name__ == '__main__': path, ndocs = argparser() # Create tmp and out directory if not os.path.exists(os.path.join(path, 'tmp')): os.mkdir(os.path.join(path, 'tmp')) if not os.path.exists(os.path.join(path, 'out')): os.mkdir(os.path.join(path, 'out')) #### 1. Parse NER outputs #### # OUTPUT: Dataframe with raw NER output: detected entites with the information # about their position and label print('Parsing NER outputs...') df = parse_ann(path) df = df.drop(['annotator', 'bunch', 'mark'], axis=1) df['filename'] = df['filename'].apply(lambda x: '.'.join(x.split('.')[:-1])) df['offset1'] = df['offset1'].astype(int)
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed Apr 29 10:37:54 2020 @author: antonio """ from sklearn.feature_extraction.text import CountVectorizer import kmeans_elbow from utils import argparser, docs2list, print_clusters, save_cluster_info if __name__ == '__main__': ##### Parse arguments datapath, mode, k = argparser() #datapath = '/home/antonio/Documents/Work/BSC/Projects/COVID-19/annotation-radiologia/data/bunch2' ##### Documents to list # TODO: further clean plain text all_documents, filenames = docs2list(datapath) ##### Text 2 Vector (Bag of Words) # TODO: substitute BOW by something more sofisticated (Doc2Vec, TF-iDC?) vectorizer = CountVectorizer() X = vectorizer.fit_transform(all_documents) ##### KMeans if mode == 'findK': kmeans_elbow.elbow(X.toarray())