def getArgumentParser(ap = ap.ArgumentParser(description = DESCRIPTION)): ap.add_argument("-sn", "--surfacenumber", nargs=1, type=int, metavar=('INT'), required=False) ap.add_argument("-pr", "--permutationrange", nargs=2, type=int, metavar=('INT','INT'), required=False) ap.add_argument("-os", "--outputstats", help = "Calculates the stats without permutation testing, and outputs the tmi.", action = 'store_true') ap.add_argument("--path", nargs=1, type=str, metavar=('STR'), required=True) ap.add_argument("--seed", nargs=1, type=int, metavar=('INT')) return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-i", "--image", nargs=1, help="Input a image (Nifti, MGH, MINC)", metavar=('*'), required=True) ap.add_argument("-n4", "--AntsN4correction", action='store_true', help="Perform ANTS N4 Bias correction.") ap.add_argument( "-t", "--thresholdalgorithm", help= "Method used to set the the lower threshold if thresholds are not supplied (Default is otsu).", choices=[ 'otsu', 'otsu_p', 'li', 'li_p', 'yen', 'yen_p', 'zscore', 'zscore_p' ]) ap.add_argument("-o", "--output", nargs=1, help="Set output") ap.add_argument("-r", "--replace", nargs=1, help="Replaces a image file (creates a backup)") return ap
def getArgumentParser(ap=ap.ArgumentParser( description=DESCRIPTION, formatter_class=ap.RawTextHelpFormatter)): datatype = ap.add_mutually_exclusive_group(required=True) datatype.add_argument("--voxel", help="Voxel input", action="store_true") datatype.add_argument("--vertex", help="Vertex input", action="store_true") ap.add_argument("-o", "--output", nargs=1, help="[4D_image]", metavar=('*.nii.gz or *.mgh'), required=True) ap.add_argument("-i", "--input", nargs='+', help="[3Dimage] ...", metavar=('*.nii.gz or *.mgh'), required=True) ap.add_argument("-m", "--mask", nargs=1, help="[3Dimage]", metavar=('*.nii.gz or *.mgh')) ap.add_argument("-s", "--scale", action='store_true') ap.add_argument( "--fastica", help= "Independent component analysis. Input the number of components (e.g.,--fastica 8 for eight components). Outputs the recovered sources, and the component fit for each subject. (recommended to scale first)", nargs=1, metavar=('INT')) return ap
def getArgumentParser(ap = ap.ArgumentParser(description = DESCRIPTION, formatter_class=ap.RawTextHelpFormatter)): #input type group = ap.add_mutually_exclusive_group(required=True) group.add_argument("-i", "--input", help="Two csv files: [regressor(s)] [covariates]. Please note that if the -r option is used, the regressor(s) will be treated as dependent variable(s).", nargs=2, metavar=('*.csv', '*.csv')) group.add_argument("-f", "--file", help="One csv file: [regressors(s)]", nargs=1, metavar=('*.csv')) #options ap.add_argument("-d", "--demean", help="demean columns", action="store_true") ap.add_argument("-s", "--stddemean", help="demean and standardize columns", action="store_true") #which tool to use proceducetype = ap.add_mutually_exclusive_group(required=True) proceducetype.add_argument("-o", "--orthogonalize", help="orthogonalize the inputs", action="store_true") proceducetype.add_argument("-r", "--residuals", help="residuals after regressing covariates", action="store_true") proceducetype.add_argument("-j", "--juststandarize", help="Just demean or standardize the regressors. i.e., no regression or orthogonization", action="store_true") return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-i", "--image", help="image_?h.mgh OR image_lh.mgh image_rh.mgh", nargs='+', metavar=('*.mgh'), required=True) ap.add_argument("-s", "--surface", help="fsaverage surface (e.g., pial, inflated)", default=['midthickness'], nargs=1) ap.add_argument("--hemi", help="Hemisphere (default is lh)", choices=['lh', 'rh'], default=['lh']) ap.add_argument("-l", "--lower", help="Lower threshold (default is 0.95)", default=['0.95'], nargs=1) ap.add_argument("-u", "--upper", help="Upper threshold (default is 1)", default=['1.00'], nargs=1) return ap
def getArgumentParser(ap=ap.ArgumentParser( description=DESCRIPTION, formatter_class=ap.RawTextHelpFormatter)): ap.add_argument("-i", "--input", help="[1D Subgrouping Variable]", nargs=1, metavar=('*.csv')) return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-i", "--input", nargs=1, help="[max TFCE permuations file]", metavar=('*.csv'), required=True) return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-i", "--input", nargs=2, help="[Surface] [Num_cores]", metavar=('*.mgh', 'INT'), required=True) ap.add_argument("--nosmoothing", help="No smoother", action="store_true") return ap
def getArgumentParser(ap = ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-r", "--range", nargs=2, help="permutation [start] [stop]", required=True) ap.add_argument("-m", "--medtype", nargs=1, help="mediation type [M or Y or I].", choices=['M', 'Y', 'I'], required=True) return ap
def getArgumentParser(ap = ap.ArgumentParser(description = DESCRIPTION, formatter_class=ap.RawTextHelpFormatter)): ap.add_argument("-i", "--input", help="1D Subgrouping Variable] [surface (area or thickness)]", nargs=2, metavar=('*.csv','surface'), required=True) ap.add_argument("-f", "--fwhm", help="FWHM of all surface file (Default: %(default)s))", nargs=1, default=['03B'], metavar=('??B')) return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-i", "--input", nargs=2, metavar=('*.nii.gz', '*.csv'), help="[tfce_image] [perm_tfce_max]", required=True) ap.add_argument("-l", "--outputneglog10", help='Outputs the -log10(FWEp) image', action='store_true') return ap
def getArgumentParser(ap = ap.ArgumentParser(description = DESCRIPTION, formatter_class=ap.RawTextHelpFormatter)): ap.add_argument("-o", "--out", help="Specify output name for 4D volume. Default: %(default)s).", nargs=1, metavar=('*.nii.gz'), default=['all_FA_skeletonised.nii.gz']) ap.add_argument("-n", "--specifiyrawdata", help="Specify which *.npy data to convert. Default: %(default)s).", nargs=1, metavar=('*.npy'), default=['python_temp/raw_nonzero.npy']) return ap
def getArgumentParser(ap = ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-r", "--range", nargs=2, help="permutation [start] [stop]", metavar=('INT','INT'), required=True) ap.add_argument("-v", "--specifyvars", nargs=2, type=int, help="Optional. Specify which regressors are permuted [first] [last]. For one variable, first=last.", metavar=('INT','INT')) return ap
def getArgumentParser(ap = ap.ArgumentParser(description = DESCRIPTION)): group = ap.add_mutually_exclusive_group(required=True) group.add_argument("-i", "--inputtxt", nargs=1, help="input a text file surface values (i.e., a list of cortical thickness values)") group.add_argument("-s", "--inputsurface", nargs=1, help="input a surface", metavar=('*.mgh')) ap.add_argument("-t", "--transpose", action='store_true', help="Transpose input file.") return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-i", "--image", help="stat_image.nii.gz", nargs=1, metavar=('*.nii.gz'), required=True) ap.add_argument("-t", "--threshold", help="1-P(FWE) threshold (default is 0.95)", default=[0.95], nargs=1) return ap
def getArgumentParser(ap=ap.ArgumentParser( description=DESCRIPTION, formatter_class=ap.RawTextHelpFormatter)): ap.add_argument("-i_tmi", "--inputtmi", help="Edit existing *.tmi file.", nargs=1, metavar='*.tmi', required=True) ap.add_argument("-oh", "--history", help="Output tmi file history and exits.", action='store_true') ap.add_argument( "-r", "--revert", help= "Revert tmi to earlier time-point (removed elements cannot be restored!). Make sure to check the history first (-oh) or by using tm_multimodal read-tmi-header. Input the time-point that you wish to revert the tmi file. e.g. -r 5", nargs=1, metavar='int', required=False) ap.add_argument( "-o", "--outputnewtmi", help="Output a new tmi file (instead of editing existing one).", nargs=1, metavar='*.tmi', required=False) return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-i", "--input", help="[surface] (e.g., lh.all.area.00.mgh)", nargs=1, metavar=('*.mgh'), required=True) ap.add_argument("-l", "--label", help="[label file]", nargs=1, metavar=('*.label-????'), required=True) return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-r", "--range", nargs=2, type=int, help="permutation [start] [stop]", metavar=('INT', 'INT'), required=True) ap.add_argument("-s", "--surface", nargs=1, help="surface (area or thickness)", metavar=('STR'), required=True) ap.add_argument( "-v", "--specifyvars", nargs=2, type=int, help= "Optional. Specify which regressors are permuted [first] [last]. For one variable, first=last.", metavar=('INT', 'INT')) ap.add_argument("-e", "--exchangeblock", nargs=1, help="Exchangability blocks", metavar=('*.csv'), required=False) return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-i", "--input", nargs=2, help="[predictor file] [dependent file]", metavar=('*.csv', '*.csv'), required=True) ap.add_argument("-c", "--covariates", nargs=1, help="[covariate file]", metavar=('*.csv')) ap.add_argument("-m", "--medtype", nargs=1, help="Voxel-wise mediation type", choices=['I', 'M', 'Y'], required=True) ap.add_argument("-t", "--tfce", help="H E Connectivity. Default is 2 1 26.", nargs=3, default=[2, 1, 26], metavar=('H', 'E', '[6 or 26]')) return ap
def getArgumentParser(ap=ap.ArgumentParser( description=DESCRIPTION, formatter_class=ap.RawTextHelpFormatter)): ap.add_argument("-t", "--threshold", help="The threshold distance from each vertex for storing", nargs=1, default=[9.0], metavar=('float')) ap.add_argument("-n", "--numcores", help="The number of cores used for parallel processing", nargs=1, metavar=('int'), required=True) return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): group = ap.add_mutually_exclusive_group(required=True) group.add_argument("-l", "--label", metavar=('*_label_surface.mgh'), nargs=1) group.add_argument("-a", "--annot", metavar=('*annot'), nargs=1) ap.add_argument("-i", "--input", nargs=1, help="Load 4D surface files", metavar=('?h.all.???.mgh'), required=True) ap.add_argument("-r", "--range", nargs=2, help="Input range of labels to extract. (e.g.: -r 1 12)", metavar=('INT', 'INT')) return ap
def getArgumentParser(ap=ap.ArgumentParser( description=DESCRIPTION, formatter_class=ap.RawTextHelpFormatter)): datatype = ap.add_mutually_exclusive_group(required=True) datatype.add_argument("--voxel", help="Voxel input", action="store_true") datatype.add_argument("--vertex", help="Vertex input", action="store_true") datatype.add_argument( "--bothhemi", help= "Special case in which vertex images from both hemispheres are input and processed together.", action="store_true") ap.add_argument( "-i", "--input", help="Text file of compoments to remove (as a single column).", nargs=1, metavar=('*[.txt or .csv]'), required=True) ap.add_argument("-o", "--output", help="[Output Image Basename]", nargs=1, required=True) ap.add_argument("--clean", help="Remove the ICA_temp directory.", action='store_true') return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): group = ap.add_mutually_exclusive_group(required=True) group.add_argument("-i", "--input", nargs=2, help="[Predictor(s)] [Covariate(s)] (recommended)", metavar=('*.csv', '*.csv')) group.add_argument("-r", "--regressors", nargs=1, help="Single step regression", metavar=('*.csv')) ap.add_argument( "-i_tmi", "--tmifile", help= "Vertex analysis. Input surface: e.g. --vertex [area or thickness]", nargs=1, metavar=('*.tmi'), required=True) ap.add_argument( "-sa", "--setadjacencyobjs", help= "Specify the adjaceny object to use for each mask. The number of inputs must match the number of masks in the tmi file. Note, the objects start at zero. e.g., -sa 0 1 0 1", nargs='+', type=str, metavar=('int')) ap.add_argument("-n", "--numperm", nargs=1, type=int, help="# of permutations", metavar=('INT'), required=True) parallel = ap.add_mutually_exclusive_group(required=False) parallel.add_argument("-p", "--gnuparallel", nargs=1, type=int, help="Use GNU parallel. Specify number of cores", metavar=('INT')) parallel.add_argument("-c", "--condor", help="Use HTCondor.", action="store_true") parallel.add_argument("-f", "--fslsub", help="Use fsl_sub script.", action="store_true") parallel.add_argument( "-t", "--cmdtext", help="Outputs a text file with one command per line.", action="store_true") return ap
def getArgumentParser(ap=ap.ArgumentParser( description=DESCRIPTION, formatter_class=ap.RawTextHelpFormatter)): ap.add_argument("-i", "--input", nargs=1, metavar=('*.mgh'), required=True) ap.add_argument("-t", "--threshold", nargs=1, required=True) ap.add_argument("-n", "--neg", help='output negative threshold image', required=False, action="store_true") return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): datatype = ap.add_mutually_exclusive_group(required=True) datatype.add_argument("--voxel", help="Voxel analysis", action="store_true") datatype.add_argument( "--vertex", help= "Vertex analysis. Input surface: e.g. --vertex [area or thickness]", nargs=1, metavar=('surface')) ap.add_argument( "-m", "--mediation", help= "Mediation analysis [M or I or Y]. If not specified, then multiple regression is performed.", nargs=1, choices=['I', 'M', 'Y'], metavar=('STR')) ap.add_argument("-n", "--numperm", nargs=1, type=int, help="# of permutations", metavar=('INT'), required=True) ap.add_argument( "-v", "--specifyvars", nargs=2, type=int, help= "Optional for multiple regression. Specify which regressors are permuted [first] [last]. For one variable, first=last.", metavar=('INT', 'INT')) group = ap.add_mutually_exclusive_group(required=False) group.add_argument("-p", "--gnuparallel", nargs=1, type=int, help="Use GNU parallel. Specify number of cores", metavar=('INT')) group.add_argument("-c", "--condor", help="Use HTCondor.", action="store_true") group.add_argument("-f", "--fslsub", help="Use fsl_sub script.", action="store_true") return ap
def getArgumentParser(ap = ap.ArgumentParser(description = DESCRIPTION)): ap.add_argument("-d", "--distance", nargs = 2, help = "[minimum distance(mm)] [maximum distance(mm)]", metavar = ('Float', 'Float'), required = True) ap.add_argument("-s", "--stepsize", nargs = 1, help = "[step size (mm)] default: %(default)s).", metavar = ('Float'), default = [1.0], type=float) ap.add_argument("--surfaces", nargs = 2, help = "[lh.surface] [rh.surface]") return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-l", "--label", help="Default is JHU-ICBM-labels-1mm.nii.gz", default=[ '%s/data/atlases/JHU/JHU-ICBM-labels-1mm.nii.gz' % os.environ.get('FSLDIR') ], nargs=1) ap.add_argument("-r", "--range", help="Label range. Default is 1 48", type=int, default=[1, 48], nargs=2, metavar=('INT', 'INT')) ap.add_argument("-p", "--pca", help="[Number of components] Perform PCA on mean FA ROIs.", type=int, nargs=1, metavar=('INT')) ap.add_argument("-m", "--mask", help="Specify mask", nargs=1) return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-i", "--image", help="stat_image_?h.mgh", nargs=1, metavar=('*.mgh'), required=True) ap.add_argument("--hemi", help="Hemisphere", choices=['lh', 'rh'], required=True) ap.add_argument("-t", "--threshold", help="1-P(FWE) threshold (default is 0.95)", default=['0.95'], nargs=1) return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): group = ap.add_mutually_exclusive_group(required=True) group.add_argument("-i", "--input", nargs=2, help="[Predictor(s)] [Covariate(s)] (recommended)", metavar=('*.csv', '*.csv')) group.add_argument("-r", "--regressors", nargs=1, help="Single step regression", metavar=('*.csv')) group.add_argument( "-m", "--onesample", nargs=1, help="One sample t-test. i.e., test sample mean or intercept.", metavar=('*.csv or none')) ap.add_argument("-f", "--ftest", help=""" Perform mixed-effect model ANOVA on predictors (experimental). The square root of the f-statistic image undergoes TFCE. """, action="store_true") ap.add_argument( "-t", "--tfce", help= "TFCE settings. H (i.e., height raised to power H), E (i.e., extent raised to power E), Connectivity (either 26 or 6 directions). Default: %(default)s).", nargs=3, default=[2, 1, 26], metavar=('H', 'E', '[6 or 26]')) ap.add_argument( "-v", "--voxelregressor", nargs=1, help= "Add a voxel-wise independent regressor (beta feature). A variance inflation factor (VIF) image will also be produced to check for multicollinearity (generally, VIF > 5 suggest problematic collinearity.)", metavar=('*.nii.gz')) return ap
def getArgumentParser(ap=ap.ArgumentParser(description=DESCRIPTION)): ap.add_argument("-i", "--image", help="Clusters Nifti image", nargs=1, required=True, metavar=('*_clusters.nii.gz')) ap.add_argument("-c", "--cluster", help="Which clusters to extract from results file", nargs='+', type=int, metavar=('INT')) ap.add_argument("-d", "--dir", help="python_temp directory", default=['../../python_temp'], nargs=1) return ap
logging.error("Unexpected socket close during broadcasting message to all: %s", e) def on_close(self): del self.users[self.username] self.loggedin_users.discard(self.username) self.broadcast_userlist() class IndexPage(RequestHandler): def get(self): self.render("templates/client.html") if __name__ == '__main__': argparse = argparse.ArgumentParser(__doc__) argparse.add_argument("-p", "--port", type=int, action="store", dest="port", default=8888, help="on which port run wsServer") argparse.add_argument("--loglevel", type=str, action="store", dest="loglevel", default=logging.DEBUG) args = argparse.parse_args() logging.basicConfig(format=u'%(levelname)-8s [%(asctime)s] %(message)s', level=args.loglevel) users = {} rooms = {} user2room = {} loggedin_users = set() application = Application([ (r'/websocket', WebSocketChat, dict(users=users, rooms=rooms, user2room=user2room, loggedin_users=loggedin_users)), (r'/', IndexPage)
import argparse import os import re from counters import * from counters import modules import sys import os.path argparse = argparse.ArgumentParser() argparse.add_argument("-d", "--dir", dest = "dir") argparse.add_argument("-e", "--extension", dest = "extension") argparse.add_argument("-v", "--verbose", action = "store_true") args = argparse.parse_args() global folder folder = args.dir or "." global extension extension = args.extension or "." global verbose verbose = args.verbose def index(source): list = [] for root, dirs, files in os.walk(source): relroot = os.path.abspath(os.path.join(source)) dir = os.path.relpath(root, relroot)
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # # GNU General Public License for more details. # # # # You should have received a copy of the GNU General Public License # # along with this program. If not, see <http://www.gnu.org/licenses/>.# # # # Copyright (C) 2014 by Marco Guerrini # ######################################################################## import argparse from libbackuppro import BackupPro import libitunitsconversion # Setting arguments argparse = argparse.ArgumentParser(prog='backup-pro', description='A program to generate backups') argparse.add_argument('--version', action='version', version='%(prog)s version 1.0.0') argparse.add_argument('patch', action='store', type=str, help='patch to backup') argparse.add_argument('--olddatabase', '-d', action='store', help='old database patch to make an incremental bakcup') argparse.add_argument('--split', '-s', action='store', help='in how many bytes split the backup? You can use the suffix MB and GB. Insert CD or DVD of 700MB or 4.7GB') #argparse.add_argument('--buildiso', '-b', action='store_true', help='if the software have to build the iso to burn backups on CDs or DVDs') args = argparse.parse_args() # Start backup bp = BackupPro(str(args.patch)) # Get all files files = bp.getFiles() # Get all sums sums = bp.getSums(files)
def add_entries_to_argparse(self,argparse): """ Adds required entries to an argparse Object supplied """ argparse.add_argument("--qsys-args", "-q", metavar="args", default=None, type=str,help="Extra options for the queuing system " "(e.g. \"-l mem=40 -l vmem=20\" or \"-l intel\")." ) argparse.add_argument("--workdir", "-d", metavar="dir", default=None, type=str, help="Change job execution directory on the node") argparse.add_argument("--scratchdir", metavar="dir", default=None, type=str, help="Change the path of the node-local scratch directory") argparse.add_argument("--mail", "-m", metavar="user@host", default=None, type=str, help="EMail Address to which messages are sent") argparse.add_argument("--wt", metavar="time", default=None, type=utils.interpret_string_as_time_interval, help="Walltime; format: [[[days:]hours:]minutes:]seconds or integer[suffix]") argparse.add_argument("--mem",metavar="size", default=None, type=utils.interpret_string_as_file_size, help="Physical memory in the format integer[suffix]") argparse.add_argument("--vmem",metavar="size", default=None, type=utils.interpret_string_as_file_size, help="Virtual memory in the format integer[suffix] (Default: What was set for --mem)") argparse.add_argument("--np",metavar="#", default=None, type=int, help="Number of processors/threads") argparse.add_argument("--name",metavar="jobname", default=None,type=str,help="Name of the Job") argparse.add_argument("--priority", "-p", metavar="num", default=None, type=int, help="Priority of the Job. Larger values imply a higher priority. " "Higher priorities have (some) influence on how early jobs are started. " "The default is no priority, which is equivalent to 0.", choices=range(-1024, 1023) ) argparse.add_argument("--queue",metavar="queue[@host]", default=None,type=str,help="Select queue to use to run the job") argparse.add_argument("--merge-error", action='store_const', default=None, const=True, help="Merge stdout and stderr streams", dest="merge_error") argparse.add_argument("--no-merge-error", action='store_const', default=None, const=False, help="Do not merge stdout and stderr streams", dest="merge_error") argparse.add_argument("--email", default=None, type=str, nargs='+', help="When to send an email about the job", choices=["begin", "end", "error"] )
author: lukaimin update: 20171222 """ import sys from itertools import groupby from operator import itemgetter from collections import Counter, OrderedDict from rugis import dbscan, Distance, GPS2GCJ import json import argparse as parser sys.path.append("./scipy") from sklearn.cluster import MeanShift parser = parser.ArgumentParser() parser.add_argument("-a", "--act", type=str, required = True) parser.add_argument("-d", "--date", type=str, help="run date") parser.add_argument("-w", "--window", type=int, default=90, help="date window") args = parser.parse_args() """ offline version, after mapper, you need groupby by the key useage: 1. [calculate all location] cat training_data.TXT|python aoi_loc_cluster_model_local.py --act "m"|sort -n |python aoi_loc_cluster_model_local.py --act "runModel" --date "20160516" --window 90 2. [calculate weekend location] cat training_data.TXT|python aoi_loc_cluster_model_local.py --act "mWeekend"|sort -n |python aoi_loc_cluster_model_local.py --act "runModel" --date "20160516" --window 90 3. [calculate weekday location] cat training_data.TXT|python aoi_loc_cluster_model_local.py --act "mWeekday"|sort -n |python aoi_loc_cluster_model_local.py --act "runModel" --date "20160516" --window 90
import argparse import os import os.path import time global exts exts = [ ".png", ".jpg", ".jpeg" ] argparse = argparse.ArgumentParser() argparse.add_argument("-r", "--recursive", action = "store_true", dest = "recursive") argparse.add_argument("--dry-run", action = "store_true", dest = "dry") argparse.add_argument("-d", "--dir", dest = "dir") args = argparse.parse_args() global recursive recursive = args.recursive or False global folder folder = args.dir or "." global dry dry = args.dry or False def index(source): for root, dirs, files in os.walk(source): relroot = os.path.abspath(os.path.join(source)) dir = os.path.relpath(root, relroot) for file in files: filename = os.path.join(root, file)
#!/usr/bin/python import sys import argparse import random argparse = argparse.ArgumentParser(description="Human Para") argparse.add_argument('--keyword', type=str, required=True) argparse.add_argument('--input', nargs='+', required=True, default=[]) argparse.add_argument('--top', type=int,default=100) args = argparse.parse_args() SENT_MAP = 2 test_set = {} for i in range(10): # For each Fold question_f = open("%s/run-0/fold-%i/test.fullsent" % (args.keyword, i)) keyword_f = open("%s/run-0/fold-%i/test.sent" % (args.keyword, i)) for j, (q, k) in enumerate(zip(question_f, keyword_f)): q, k = (q.strip(), k.strip()) test_set[i,j] = [q,k, {}] for i in range(10): for output_f in args.input: if output_f.endswith("direct"): continue with open(output_f + "/run-0/fold-%i/test/out.txt" % (i)) as of: for j, line in enumerate(of): line = line.strip().split(" |COL| ")[0] test_set[i,j][SENT_MAP][output_f] = line
# Flail is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Flail. If not, see <http://www.gnu.org/licenses/>. import argparse import json import pprint import re from netaddr import IPAddress, IPNetwork, IPSet argparse = argparse.ArgumentParser() argparse.add_argument('-n', '--networks', help='target network in CIDR format (comma-separated if >1)') argparse.add_argument('-i', '--input', help='file containing target networks in CIDR format, one per line') argparse.add_argument('-d', '--domain', help="substring to search in domains") argparse.add_argument('-c', '--crop', help="path to crop.json") args = argparse.parse_args() if args.crop: cropfile = args.crop else: cropfile = 'crop.json' with open(cropfile, 'rb') as f: harvest = json.load(f) if args.networks: nets = args.networks.split(',')
import argparse as ap if __name__ == "__main__": parser = ap.ArgumentParser() # parser.add_argument("test", help="this is a test") parser.add_argument("-v", help="run with debug logging", action="store_true") # optional parser.add_argument("-action", help="specify action to take on EC2 instance: create, stop, start, delete, list, list-running", default="list") args = parser.parse_args() # print(args.echo) print("all arguments={}".format(args)) import argparse if __name__ == "__main__": ap = argparse.ArgumentParser() ap.add_argument("--action", type=str, default="list-all", choices=["list-all", "list-running"], help="legume") my_args = ap.parse_args() print(my_args.action) # todo below # list-all is default # start / stop - can take optional pem # key - can take optional filename # default filename needs to be set # # create - can take # of instances (not yet)
:param fields: the fields to parse and print from the vcf file. """ # begin parsing (and printing) with open(field + ".txt", "w+") as out: out.write(header()) for record in vcf_file: for sample in record: if sample[field] is None: out.write("{0}\t{1}\t{2}\t{3}\n". format(record.CHROM, record.POS, NA, sample.sample)) else: out.write("{0}\t{1}\t{2}\t{3}\n". format(record.CHROM, record.POS, str(sample[field]), sample.sample)) if __name__ == '__main__': argparse = argparse.ArgumentParser() argparse.add_argument('vcf_file', help="the vcf file to parse") argparse.add_argument('fields', nargs='+', type=str, help="the sections to parse from the vcf file") args = argparse.parse_args() # feel free to change this NA = './.' for field in [field.upper() for field in args.fields]: vcf_file = vcf.Reader(open(args.vcf_file, 'r')) parse(vcf_file, field)
import imutils import time import cv2 from skimage.io import imread from skimage.feature import hog from sklearn.externals import joblib import argparse as ap from nms import nms from config import * from PIL import Image from resizeimage import resizeimage import numpy as np # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-v", "--video", help="path to the video file") ap.add_argument("-a", "--min-area", type=int, default=5000, help="minimum area size") ap.add_argument('-t', "--tempfolder", help="Folder for temp files", required=True) args = vars(ap.parse_args()) # if the video argument is None, then we are reading from webcam if args.get("video", None) is None: camera = cv2.VideoCapture(0) time.sleep(0.25) # otherwise, we are reading from a video file else: camera = cv2.VideoCapture(args["video"]) # initialize the first frame in the video stream
# version. # # This program is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS # FOR A PARTICULAR PURPOSE. See the GNU General Public License for more # details. # # You should have received a copy of the GNU General Public License along with # this program. If not, see <http://www.gnu.org/licenses/>. import argparse import sys if __name__ == '__main__': argparse = argparse.ArgumentParser() argparse.add_argument('vcf_file', help="the VCF file to rename") argparse.add_argument('names', help="file with new names") args = argparse.parse_args() standard = ["#CHROM", "POS", "ID", "REF", "ALT", "QUAL", "FILTER", "INFO", "FORMAT"] # get the new names with open(args.names, "r") as names: for line in names: standard.append(line.replace('\n', '')) new_header = '\t'.join(standard) + '\n' # rewrite the VCF file with open(args.vcf_file, "r") as vcf_file:
# map for labels used label_map = { 1 : "Person", 2 : "Motorcycle", 3 : "Bicycle", 4 : "Car", 5 : "Rickshaw", 6 : "Autorickshaw" } # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-v", "--video", help="path to the video file", required=True) ap.add_argument("-a", "--min-area", type=int, default=900, help="minimum area size") ap.add_argument("-l", "--max-area", type=int, default=100000, help="maximum area size") ap.add_argument('-t', "--tempfolder", help="Folder for temp files", required=True) ap.add_argument('-m', "--modelpath", help="Path to model used", required=True) ap.add_argument("-j", "--json", help="path to the json metadata") fourcc = cv2.cv.CV_FOURCC('m', 'p', '4', 'v') video = cv2.VideoWriter('just_classifier_wrt_boxes.avi', fourcc, 25, (640,480), True) args = vars(ap.parse_args()) # parse the json metadata
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import gzip import cPickle as pickle from sklearn.externals import joblib from sklearn.ensemble import RandomForestClassifier if __name__ == '__main__': import argparse argparse = argparse.ArgumentParser() argparse.add_argument('-i', '--indata', type=str, required=True) argparse.add_argument('-o', '--output', type=str, required=True) args = argparse.parse_args() assert os.path.exists(args.indata), '! in data...' X, y, labels = pickle.load(gzip.open(args.indata)) cls = RandomForestClassifier(n_estimators=20) cls.fit(X, y) joblib.dump((cls, labels), args.output)
self._hd = HandsDetector(smod) self._cd = ChordDetector(cmod) def run(self, vf): fr = FrameReader(vf) for frame in fr.next(): hands = self._hd.run(frame) frameg = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ch = self._cd.run(frameg, hands.left) self.__show(frame, hands.left.box, ch) cv2.waitKey(4) def __show(self, frame, box, label): tmp = frame p, q = tuple(box[0]), tuple(box[1]) cv2.rectangle(tmp, p, q, (255, 0, 0), 2) cv2.putText(tmp, label, (p[0] + 10, p[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, 255, 3) cv2.imshow("chord", tmp) if __name__ == "__main__": import argparse argparse = argparse.ArgumentParser() argparse.add_argument("-i", "--infile", type=str, required=True) args = argparse.parse_args() test = Test() test.run(args.infile)
import argparse import logging import datetime import logging import os from dateutil.parser import * from datetime import timedelta import pandas as pd import numpy as ny __author__="kincy" __date__ ="$Jan 11, 2011 10:18:04 AM$" developers = json.load(open('/Users/kincy/Projects/masheryStuff/developers.json'))['kincy'] argparse = argparse.ArgumentParser() argparse.add_argument("--startdate", type=str, help="Start Date") argparse.add_argument("--enddate", type=str, help="End Date") argparse.add_argument('--keys', nargs='+', help='List of keys to include in results, space separated') argparse.add_argument('--services', nargs='+', help='List of services to include in results, space separated') argparse.add_argument('--reports', nargs='+', choices=['status', 'errorcodes', 'methods'], help='List of services to include in results, space separated') args = argparse.parse_args() # areaID, uuid = 780, 'ff368e6f-4f41-4d4b-8f9b-10ea76b301f2' # trainingarea4 # areaID, uuid = 93, '78ff7613-ae2a-4481-827b-9255b8307ebd' # solutions areaID, uuid = 295, 'cfea444a-e396-442f-b38b-a2dafc000195' # nordstrom tokenPath = "https://api.mashery.com/v3/token" v3endpoint = "https://api.mashery.com/v3/rest" endpoint = "https://api.mashery.com" path = "/v2/rest/" + str(areaID)
import argparse if __name__ == '__main__': argparse = argparse.ArgumentParser(description="Facebook Puzzle: maximize the total robbed amount") argparse.add_argument("-i", dest="input", metavar="<g_1,g_2,...,g_n>", required=True) args = argparse.parse_args() amounts = [int(x) for x in args.input.split(",")] mem = [-1,]*len(amounts) for i in range(0,len(amounts)): if i == 0: mem[0] = amounts[0] elif i == 1: mem[1] = max(mem[0], amounts[1]) else: mem[i] = max(mem[i-2] + amounts[i], mem[i-1]) robs = [] i=len(amounts)-1 while (i>=0): if(i>=2): if mem[i] == mem[i-2] + amounts[i]: robs.append(i) i = i-2 else: i = i-1 elif (i == 1): if mem[i] == amounts[i]: robs.append(i) i=-1 else:
#!/usr/bin/python import sys import argparse import random from collections import defaultdict argparse = argparse.ArgumentParser(description="Human Para") argparse.add_argument("--unstemmed_question", type=str, required=True) argparse.add_argument("--keyword", type=str, required=True) argparse.add_argument("--input", nargs="+", type=str, required=True, default=[]) argparse.add_argument("--top", type=int, default=300) argparse.add_argument("--nbest", type=int, default=5) argparse.add_argument("--stem", type=str) argparse.add_argument("--test_dir", nargs="+", type=str) args = argparse.parse_args() if args.test_dir == None: args.test_dir = ["test"] * len(args.input) SENT_MAP = 2 test_set = {} for i in range(10): # For each Fold question_f = open("%s/run-0/fold-%i/test.sent" % (args.unstemmed_question, i)) keyword_f = open("%s/run-0/fold-%i/test.sent" % (args.keyword, i)) for j, (q, k) in enumerate(zip(question_f, keyword_f)): q, k = (q.strip(), k.strip()) test_set[i, j] = [q, k, defaultdict(lambda: [])] for i in range(10):
import socket import requests import socks import stem.process import hashlib import tempfile import os import argparse from stem.descriptor.remote import DescriptorDownloader from stem.util import term argparse = argparse.ArgumentParser() argparse.add_argument("-f", "--file", dest = "filepath", help = "File path") argparse.add_argument("-u", "--url", dest = "url", help = "URL to download through nodes") args = argparse.parse_args() global file file = args.filepath global file_hash if not file is None: m = hashlib.sha256() fis = open(file) m.update(fis.read()) fis.close() file_hash = m.hexdigest() else: file_hash = "dc8d3ab6669b0a634de3e48477e7eb1282a770641194de2171ee9f3ec970c088"
sub_messages.append(sub_message) combined_message = MIMEMultipart() for msg in sub_messages: combined_message.attach(msg) return str(combined_message) def get_userdata(filepath): with gzip.open(filepath, 'wb') as g: g.writelines(make_mime(['init:x-shellscript'])) with open(filepath, 'rb') as f: return f.read() argparse = argparse.ArgumentParser() argparse.add_argument('-o', '--output', metavar='path', default='-') argparse.add_argument('-z', '--compress', action='store_true') argparse.add_argument('part', nargs='*', default=['init:x-shellscript']) args = argparse.parse_args() mime = make_mime(args.part) if args.output == '-': # ignore compress flag print mime else: if args.compress: with gzip.open(args.output, 'wb') as g: g.writelines(mime) else: with open(args.output, 'wb') as f: f.writelines(mime)
csv_file.next() # read the first line and discard if args.to_stdout: out = sys.stdout else: out = open(os.path.splitext(csv_name)[0] + ".vcf", "w+") out.write(header()) write_fields(csv_file, out) os.remove(csv_name) out.close() if __name__ == '__main__': argparse = argparse.ArgumentParser() argparse.add_argument("files", nargs='+', help="the xls(x) files to process") argparse.add_argument("--stdout", action="store_true", dest="to_stdout", help="output goes to stdout instead of a file") args = argparse.parse_args() # Constants FORMAT = "GT:COV:QS:TS:CM:PM:OAS:EAS" # fields from the csv cols = ["CHR", "CO", "REF", "VAR", "COV", "QS", "ZYG", "GENE", "TRANS", "CM", "PM", "DB", "OAS", "EAS"] FIELDS = dict(zip(cols, range(len(cols)))) for xls in args.files: process_xls(xls)
except Exception as e: logging.info( 'Error: delete did not work.. on file {}'.format(fileItem['name']) ) logging.info( 'Error: response text {}'.format(r.text) ) raise e if __name__ == "__main__": argParser = argParser.ArgumentParser(description='Process a csv file for rollback. ' 'The csv file should have the following columns' '(some are not used and can be left empty, but structure is important here):\n' '1. Date\n' '2. User name\n' '3. User email (not used)\n' '4. IP Address (not used)\n' '5. Action (not used)\n' '6. item name (file or folder name)\n' '7. Size (not used)\n' '8. Parent folder\n' '9. Change details\n') argParser.add_argument('--csv', dest='csv_file', required=True, help='csv file to process') args = argParser.parse_args() print "CSVFile Processing {}".format(args.csv_file) if (args.csv_file): restore = Restore(shouldAuthenticate=True) restore.rollbackFilesInCSV(args.csv_file) else: print "Error: Requires csv_file to process" argParser.print_help()