コード例 #1
0
ファイル: report2es.py プロジェクト: 971sec/multiscanner
def parse_args():
    """
    Parses arguments
    """
    import argparse
    import parser
    #argparse stuff
    parser = argparse.ArgumentParser(description="Scan files and store results in elastic search")
    parser.add_argument("-v", "--verbose", action="store_true")
    parser.add_argument('Report', help="Report file with one json report per line")
    return parser.parse_args()
コード例 #2
0
def parse_args():
    parser = argparse.ArgumentParser(description=__doc__);
    parser.add_argument("--name", help="attribute name");
    parser.add_argument("--formula", help="formula to compute new attribute");
    parser.add_argument("input_mesh", help="input mesh");
    parser.add_argument("output_mesh", help="output mesh");
    return parser.parse_args();
コード例 #3
0
ファイル: argdoc.py プロジェクト: chriskiehl/ArgDoc
def main():
	parser = argparse.ArgumentParser(
		description='argdoc turns all argparse arguments into beautiful, end-user friendly documentation',
		formatter_class=argparse.RawTextHelpFormatter)
	
	parser.add_argument(
		'filename', 
		help="Name of the file for which you want to create documentation")
	
	parser.add_argument(
		'-f', '--format', 
		help="Format of the outputted documentation.\nOptions: ['md', 'html']" + 
			"\nDefault: 'html'",
		choices=['md', 'html'],
		default="html",
		metavar="")

	parser.add_argument(
		'-n', '--noob', 
		help=("Set whether or not to include the beginner instructions in the Docs"
			'\n(See templates for example of beginner info.)'),
		action="store_false", 
		default=True)

	parser.add_argument(
		'-q', '--quiet',
		help="Supresses success message",
		action='store_false')

	args = parser.parse_args()
	parse_pyfile(args.filename, format=args.format, noob=args.noob)
コード例 #4
0
ファイル: examine.py プロジェクト: nivvedan/narratr
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('-v', '--verbose', action="store_true", default=False)
	parser.add_argument('source', action="store")
	args = parser.parse_args(sys.argv[1:])

	global verbose
	verbose = args.verbose

	source = args.source

	show_tokens(source)
	ast, symtab = show_ast(source)
	show_code(ast, symtab)
コード例 #5
0
ファイル: new.py プロジェクト: waymanls/auditmusic
def main():
    parser = argparse.ArgumentParser(description='Music Scanner')
    parser.add_argument('-d','--directory', help='Directory', required=True)
    args = parser.parse_args()
    x = 0
    L = []
    badalbums = []

    for dirName,subdirList,fileList in os.walk(args.directory):
        #Prune bad directories from list
        if re.search(r'([Ss]ingles|lost\+found|System\ Volume\ Information|.*RECYCLE?)',dirName):
            pass
        else:
            try:
                # Separate out Artist Name from Album Title
                em = re.search(r'^.*/(newmusic|music|MUSIC|Music)/(.*)_-_(.*)/?',dirName)
                # Prune off extra "/". if group 2 contains a "/" character, don't print
                if re.search(r'/',em.group(2)):
                    pass
                else:
                    #print em.group(1) ,"~~~", em.group(2)
                    for fname in fileList:
                        # Get actual music files, not other files
                        if re.search(r'\.(flac$|wav$|mp3$|m4a$|mp4$|wma$)',fname):
                            L.append(fname)
                            x = x+1
                    if x == 0:
                        pass
                    else:
                        pass
                        # Print out total files contained in Album
                        #print x , "songs in", em.group(1) , em.group(2)
                        # Do you want to print this data to stdout or write it to a file? 
                        group2 = re.sub(r"_",' ', em.group(2))
                        group3 = re.sub(r"_",' ', em.group(3))
                        if re.search(r'/',group3):
                            group3 = group3.rstrip('/')
                        #print group2,group3
                        # Function that compares my albums to musicBrainz goes here!
                        foundtracks = querymusicbrainz.gather(group2,group3)
                        if int(x) != int(foundtracks):
                          print bcolors.WARNING + "You're missing some tracks bro!" + bcolors.ENDC
                        print x , "songs in", dirName, foundtracks, "in MusicBrainz"
                    L = []
                    x = 0
            except AttributeError:
                print "Cannot parse ", dirName
                badalbums.append(dirName)
コード例 #6
0
ファイル: main.py プロジェクト: olls/maze-interpreter-v2
def get_args():
    parser = argparse.ArgumentParser(
        description='A Maze interpreter (http://esolangs.org/wiki/Maze)')
    parser.add_argument('file', type=open,
        help='the program to run')
    parser.add_argument('-d', '--debug', action='store_true',
        help='display the maze during interpretation.')
    parser.add_argument('-l', '--log-length', default=10, type=int,
        help='Max length of debug log.')
    parser.add_argument('-c', '--no-colors', action='store_false',
        help='shows the maze without color when in debug mode.')
    parser.add_argument('-f', '--fps', default=10, type=int,
        help='the fps of the maze when in debug mode.')

    args = parser.parse_args()
    return args
コード例 #7
0
ファイル: analyze.py プロジェクト: rainest/fluff
def args():
    parser = argparse.ArgumentParser(description="Get information needed to \
            parse the log")
    parser.add_argument('script', nargs='?') # Why does argparse read the 
    # script name? Who knows.
    parser.add_argument('infile', nargs='?', default=sys.stdin)
    parser.add_argument('parser', nargs='?', default='IrssiBitlbee')
    parser.add_argument('viewer', nargs='?', default='Null')
    return parser.parse_args(sys.argv)
コード例 #8
0
def main():
    parser = argparse.ArgumentParser(description = 'Fetch soccer league, team and player data from Wikipedia.')
    parser.add_argument('-L', dest = 'specific_league', type = str,
            default = '', help = 'fetch only one league')
    parser.add_argument('-l', dest = 'fetch_only_leagues', action = 'store_true', help = 'fetch only leagues')
    parser.add_argument('-o', dest = 'output_dir', action = 'store', type = str, default = '', help = 'output directory')

    args = parser.parse_args()
    Globals.setDataDir(args.output_dir)
    if args.fetch_only_leagues:
        Globals.fetchTeams = False
        Globals.dumpTextFiles = True
    try:
        fetchLeagueData(args.specific_league.decode('utf-8'))
    except:
        # http://www.doughellmann.com/articles/how-tos/python-exception-handling/index.html
        try:
            raise
        finally:
            try:
                cleanup()
                print Globals.progress
            except Exception, e:
                print >> sys.stderr, "Error: couldn't save progress:", str(e)
                pass
コード例 #9
0
ファイル: cmd_parser.py プロジェクト: fancysimon/flame
 def AddBuildArgs(self, parser):
     parser.add_argument("-j", "--jobs", type=int, dest='jobs',
             default=0, help="Number of jobs to run simultaneously.")
     parser.add_argument("-p", "--profile", type=str, dest='profile',
             default='release', help="Build profile: debug or release.")
     parser.add_argument("--generate-scons", dest='generate_scons',
             action="store_true", help="Generate scons file.")
コード例 #10
0
ファイル: gen_mmu.py プロジェクト: rsalveti/zephyr
def parse_args():
    global args

    parser = argparse.ArgumentParser(description = __doc__,
                                     formatter_class = argparse.RawDescriptionHelpFormatter)

    parser.add_argument("-e", "--big-endian", action="store_true",
                        help="Target encodes data in big-endian format"
                        "(little endian is the default)")

    parser.add_argument("-i", "--input",
                        help="Input file from which MMU regions are read.")
    parser.add_argument("-o", "--output",
                        help="Output file into which the page tables are written.")
    parser.add_argument("-v", "--verbose", action="store_true",
                        help="Lists all the relavent data generated.")
    args = parser.parse_args()
コード例 #11
0
ファイル: get_aemet_data.py プロジェクト: anrojaslat/aemet
def parse_options():
    parser = argparse.ArgumentParser(
        description=("Download the hourly weather data for all the"
                     " stations available in aemet.es.")
    )
    parser.add_argument('-d', '--debug', action='store_true',
                        help="Enable debug mode.")
    parser.add_argument('-v', '--verbose', default="2",
                        help="Verbosity level. Options: 0=ERROR, 1=WARNING,"
                             " 2=INFO or 3=DEBUG. Default: 2.")
    parser.add_argument('-o', '--output', default=settings.DEFAULT_OUTPUT_DIR,
                        help="Output directory path where files will be"
                             " downloaded. Default: aemet_data.")
    parser.add_argument('-f', '--format', default='txt',
                        help="Store file in the specified format."
                             "Options: csv or txt. Default: csv.")
    return parser.parse_args()
コード例 #12
0
ファイル: scan2es.py プロジェクト: kaoscoach/multiscanner
def parse_args():
    """
    Parses arguments
    """
    import argparse
    import parser
    #argparse stuff
    parser = argparse.ArgumentParser(description="Scan files and store results in elastic search")
    parser.add_argument("-r", "--recursive", action="store_true")
    parser.add_argument("-v", "--verbose", action="store_true")
    parser.add_argument('Files', help="Files and Directories to attach", nargs='+')
    return parser.parse_args()
コード例 #13
0
ファイル: main.py プロジェクト: antiface/Celltone
def main():
    import argparse
    parser = argparse.ArgumentParser(description = 'Process Celltone code')
    parser.add_argument('--update', '-u', action = 'store_true',
                        help = 'Allow for dynamic updating of source file during runtime')
    parser.add_argument('--file', '-f', help = 'Output to MIDI file instead of the MIDI device')
    parser.add_argument('--length', '-l', help = 'Stop after <LENGTH> seconds')
    group = parser.add_mutually_exclusive_group()
    group.add_argument('-v', action = 'store_true', help = 'verbose')
    group.add_argument('-vv', action = 'store_true', help = 'more verbose')
    group.add_argument('-vvv', action = 'store_true', help = 'even more verbose')
    parser.add_argument('filename', nargs = '?', help = 'if omitted reads from stdin')
    args = parser.parse_args()

    verbosity = 0
    if args.v:
        verbosity = 1
    if args.vv:
        verbosity = 2
    if args.vvv:
        verbosity = 3

    try:
        if not args.filename or args.filename == '-':
            if args.update:
                self.error('Cannot dynamically update from stdin')
            code = ''.join(sys.stdin.readlines())
        else:
            if not os.path.exists(args.filename):
                self.error('Error: No such file \'%s\'' % args.filename)
            with open(args.filename) as f:
                code = ''.join(f.readlines())
    except KeyboardInterrupt:
        sys.exit(0)

    ct = Celltone(code, verbosity, args.filename, args.update, args.file, args.length)
    ct.start()
    sys.exit(0)
コード例 #14
0
def parse_args():
    parser = argparse.ArgumentParser(
        description='DNS for libvirt',
        )
    parser.add_argument(
        '--config',
        metavar='CONFIGFILE',
        help='path to YAML config file',
        )
    parser.add_argument(
        '--server',
        action='store_true', default=False,
        help='Run as the server (http/sql access).',
        )
    parser.add_argument(
        'remainder',
        nargs=argparse.REMAINDER,
        help='Remainder arguments for webpy, ip:port for listen address'
        )
    args = parser.parse_args()
    return args
コード例 #15
0
ファイル: argparser.py プロジェクト: youtang1993/EnAET
def argparser():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '-F', type=str, required=True,
        help='training data path')  #File path for our MAINMAST code
    parser.add_argument(
        '-M',
        type=str,
        default='Train_Model',
        help='model path for evluation and the path to save model')
    parser.add_argument('--mode',
                        type=int,
                        required=True,
                        help='0:default mode to run\n')
    parser.add_argument('--epochs',
                        default=1024,
                        type=int,
                        help='number of total epochs to run')
    parser.add_argument('--lr',
                        '--learning-rate',
                        default=0.002,
                        type=float,
                        help='initial learning rate')
    parser.add_argument('--lr1',
                        default=0.1,
                        type=float,
                        help='initial learning rate for new optimizer1')
    parser.add_argument('--reg',
                        default=1e-7,
                        type=float,
                        help='initial l2 regularization lambda')
    parser.add_argument('--gamma',
                        type=float,
                        default=0.1,
                        help='LR is multiplied by gamma on schedule.')
    parser.add_argument('--momentum',
                        default=0.9,
                        type=float,
                        metavar='M',
                        help='momentum')
    parser.add_argument('--patience',
                        default=10,
                        type=int,
                        help='patient epoch for updating the lr scheduler')
    parser.add_argument('--weight_decay',
                        '--wd',
                        default=5e-4,
                        type=float,
                        metavar='W',
                        help='weight decay (default: 5e-4)')
    parser.add_argument('--seed', type=int, default=888, help='manual seed')
    parser.add_argument('--batch_size',
                        type=int,
                        default=512,
                        help='batch size')
    parser.add_argument('--choose',
                        type=str,
                        default='0',
                        help='specified gpu')
    parser.add_argument('--num_workers',
                        type=int,
                        default=16,
                        help='number of data loading workers')
    parser.add_argument('--cuda', type=bool, default=True, help='enables cuda')
    parser.add_argument(
        '--rot',
        type=float,
        default=180,
        help='range of angle for rotation, default:[-180, 180]')
    parser.add_argument('--shear',
                        type=float,
                        default=30,
                        help='range of angle for shearing, default: [-30, 30]')
    parser.add_argument(
        '--translate',
        type=float,
        default=0.2,
        help='range of ratio of translation to the height/width of the image')
    parser.add_argument('--shrink',
                        type=float,
                        default=0.8,
                        help='the lower bound of scaling')
    parser.add_argument('--enlarge',
                        type=float,
                        default=1.2,
                        help='the higher bound of scaling')
    parser.add_argument(
        '--shift',
        type=float,
        default=4,
        help='shift parameter for projective data changing method')
    parser.add_argument('--log_path',
                        type=str,
                        default='train_log',
                        help='training log record path')
    parser.add_argument('--model_path',
                        type=str,
                        default='train_model',
                        help="training result record path")
    parser.add_argument('--type',
                        type=int,
                        default=0,
                        help='specify the model\n'
                        '0: Wide ResNet-28-2\n'
                        '1: Wide ResNet-28-2-Large')

    parser.add_argument('--resume',
                        type=int,
                        default=0,
                        help='reload trained model to continue training')
    parser.add_argument(
        '--KL_Lambda',
        default=1.0,
        type=float,
        help='hyper parameter to control the KL divergence loss term')
    parser.add_argument(
        '--lambda',
        type=float,
        default=10,
        help=
        'warm factor in combined loss of projective AET and classifier loss')
    parser.add_argument(
        '--lambda1',
        type=float,
        default=7.5,
        help='warm factor in combined loss of affine AET and classifier loss')
    parser.add_argument(
        '--lambda2',
        type=float,
        default=5,
        help=
        'warm factor in combined loss of similarity AET and classifier loss')
    parser.add_argument(
        '--lambda3',
        type=float,
        default=2,
        help='warm factor in combined loss of euclidean AET and classifier loss'
    )
    parser.add_argument(
        '--lambda4',
        type=float,
        default=0.5,
        help='warm factor in combined loss ofCCBS AET and classifier loss')
    parser.add_argument(
        '--max_lambda',
        type=float,
        default=1.0,
        help='balanced factor in combined loss of AET and classifier loss')
    parser.add_argument(
        '--max_lambda1',
        type=float,
        default=0.75,
        help=
        'balanced factor in combined loss of affine AET and classifier loss')
    parser.add_argument(
        '--max_lambda2',
        type=float,
        default=0.5,
        help=
        'balanced factor in combined loss of similarity AET and classifier loss'
    )
    parser.add_argument(
        '--max_lambda3',
        type=float,
        default=0.2,
        help=
        'balanced factor in combined loss of euclidean AET and classifier loss'
    )
    parser.add_argument(
        '--max_lambda4',
        type=float,
        default=0.05,
        help='balanced factor in combined loss of CCBS AET and classifier loss'
    )
    parser.add_argument('--portion',
                        type=float,
                        default=0.08,
                        help='percentage of data with labels')
    parser.add_argument(
        '--beta',
        type=float,
        default=75,
        help='hyper parameter for the consistency loss in MixMatch part')
    parser.add_argument('--ema_decay',
                        default=0.999,
                        type=float,
                        help='EMA decay hyper-parameter')
    parser.add_argument('--T',
                        default=0.5,
                        type=float,
                        help='Temperature settings applied for the sharpening')
    parser.add_argument('--alpha',
                        default=0.75,
                        type=float,
                        help='Alpha settings for the mixup part')
    parser.add_argument('--mix_iteration',
                        default=1024,
                        type=int,
                        help='Required iteration for mixmatch in each epoch')
    parser.add_argument('--start_epoch',
                        default=0,
                        type=int,
                        help='startring epoch for resuming situation')
    parser.add_argument('--dataset',
                        default='cifar10',
                        type=str,
                        help='Choose dataset for training')
    parser.add_argument(
        '--tensorboard',
        default=1,
        type=int,
        help='Use tensorboard to keep results or not. Default: True')
    parser.add_argument(
        '--mix_mode',
        default=0,
        type=int,
        help=
        'Mix up mode: 0:choose default mixmatch mixup mode\n 1: choose mixup with mosaic operations'
    )
    parser.add_argument(
        '--Mixmatch_warm',
        default=16,
        type=int,
        help='Steps that necessary for warming up the enloss term in Mixmatch')
    parser.add_argument(
        '-S',
        default='',
        type=str,
        help='the path to save all the training logs and models')
    parser.add_argument('--use_ema',
                        default=1,
                        type=int,
                        help='Use ema or not during training:default=1')
    args = parser.parse_args()
    params = vars(args)
    return params
コード例 #16
0
def get_parser():
    parser = argparse.ArgumentParser(
        description='Deep learning multiclass classification')
    parser.add_argument('-dataset', '--datasetPath', help='Path to dataset')

    parser.add_argument('-rate',
                        '--learningRate',
                        help='Set the learning rate')

    parser.add_argument('-epochs', '--epochs', help='Set the epochs')

    parser.add_argument('-batch-size',
                        '--batchSize',
                        help='Set the batch size')

    parser.add_argument('-classes',
                        '--numClasses',
                        help='Set the number of classes')

    parser.add_argument('-zscore', '--zScore', help='Set the Z score')

    parser.add_argument('-decay', '--weightDecay', help='Set the weight decay')

    main(parser)
コード例 #17
0
    global post_url
    print("1: {}".format(data))
    session = requests.Session()
    session.verify = False
    # session.post('http://{}:3000/users/1/web_requests/7/super_s3cret_mf1_string'.format(post_url), data=data)

def dataReceived2(data):
    global post_url
    print("2: {}".format(data))
    session = requests.Session()
    session.verify = False


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('port', help="port", default=7682, nargs="?")
    parser.add_argument('username', help='username', default='ubnt', nargs="?")
    parser.add_argument('pwd', help='password', default='ubnt', nargs="?")
    parser.add_argument('post_url', help='url to post data changes', nargs="?")
    args = parser.parse_args()
    loop = asyncio.get_event_loop()
    #
    discovery = MFiDiscovery()
    post_url = args.post_url
    asyncio.get_event_loop().run_until_complete(asyncio.sleep(10))
    mymFI = []

    d1 = discovery.devices[0](discovery.devices[0].address, args.port, args.username, args.pwd)
    d1.callback = dataReceived
    d2 = discovery.devices[1](discovery.devices[1].address, args.port, args.username, args.pwd)
    d2.callback = dataReceived2
コード例 #18
0
def parseArguments(args=None):

    # valid datetime args
    def valid_date(s):
        try:
            return datetime.strptime(s, "%d/%m/%Y")

        except ValueError:
            msg = "Not a valid date: '{0}'.".format(s)
            raise argparse.ArgumentTypeError(msg)

    # parse arguments
    parser = argparse.ArgumentParser(description='data-prepare-dem')

    parser.add_argument('dem', action="store")
    parser.add_argument('product', action="store")

    parser.add_argument('-r', '--res', type=int, help='resolution', default=20)

    parser.add_argument('-f', '--file', help='scene filename', default=None)

    parser.add_argument('-p',
                        '--path',
                        help='scene root path',
                        default='/data/raw/fiji')

    parser.add_argument('-s',
                        '--start',
                        help='start date DD/MM/YYYY',
                        default=None,
                        type=valid_date)

    parser.add_argument('-e',
                        '--end',
                        help='end date DD/MM/YYYY',
                        default=None,
                        type=valid_date)

    return parser.parse_args(args)
コード例 #19
0
            if dict_conf[option] == -1:
                DebugPrint("skip: %s" % option)
        except:
            print("exception on %s!" % option)
            dict_conf[option] = None
    return dict_conf


# ./dada_dbdisk.py -a ../config/pipeline.conf -b 0 -c 0 -d /beegfs/DENG/docker
if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description=
        'To transfer data from shared memeory to disk with a docker container')
    parser.add_argument('-a',
                        '--pipeline_conf',
                        type=str,
                        nargs='+',
                        help='The configuration of pipeline')
    parser.add_argument('-b',
                        '--beam',
                        type=int,
                        nargs='+',
                        help='The beam id from 0')
    parser.add_argument('-c',
                        '--part',
                        type=int,
                        nargs='+',
                        help='The part id from 0')
    parser.add_argument('-d',
                        '--directory',
                        type=str,
コード例 #20
0
ファイル: resolver.py プロジェクト: jskripchuk/CISC481
def main():
    parser = argparse.ArgumentParser(
        description=
        "Resolution Theorem Prover for CISC481. James Skripchuk 2019")
    parser.add_argument(
        "--file",
        type=str,
        help="A file containing a set of premise clauses and a goal clause.")
    parser.add_argument(
        "--folder",
        type=str,
        help="A folder containing different files of test cases to resolve.")
    parser.add_argument("--method",
                        type=str,
                        help="Search Method. 'bfs' or 'dfs'. Defaults to dfs.")
    parser.add_argument(
        "--max_depth",
        type=int,
        help=
        "How long to search until terminating early. Used to prevent infinite loops. Defaults to 300."
    )
    parser.add_argument("--test_cases",
                        action="store_true",
                        help="Run through the provided test cases.")
    parser.add_argument("--helper_function_test",
                        action="store_true",
                        help="Run through tests on helper functions.")

    args = parser.parse_args()

    method = "dfs"
    max_depth = 300
    folder = ""

    if args.helper_function_test:
        helper_function_test()

    if args.method:
        method = args.method

    if method != "dfs" and method != "bfs":
        print(
            "Search method must be 'dfs' or 'bfs'. Run 'python3 resolver.py' -h for details"
        )
        quit()

    if args.max_depth:
        max_depth = args.max_depth

    if args.folder:
        folder = args.folder

    if args.test_cases:
        folder = "./test_cases"

    if folder:
        test_resolve_folder(folder, method, max_depth)
    elif args.file:
        resolve_file(args.file, method, max_depth)

    if not folder and not args.file and not args.test_cases:
        print(
            "No file or folder specified. Use --folder, --file, or --test_cases arguments. Run 'python3 resolver.py -h' for details."
        )
コード例 #21
0
def main():
    parser = argparse.ArgumentParser(
        description="Twitter sentiment analysis classification")
    parser.add_argument(
        "--probability_threshold",
        type=float,
        default=None,
        help=
        "an numeric between 0.5 and 1 that will be used as a threshold to classify the tweet. If probability is lower than it, then the twitter is classified as neutral (polarity=2)"
    )
    parser.add_argument(
        "--test",
        dest="test",
        action="store_true",
        help=
        "run the classification for the test file available and save it to /test folder"
    )
    parser.add_argument("--search_keyword",
                        type=str,
                        help="a word used to search Twitter")
    parser.add_argument(
        "--fetch_size",
        type=int,
        default=100,
        help=
        "an integer with the amount of tweets to fetch during each run (default is 100)"
    )

    args = parser.parse_args()
    if "probability_threshold" in args:
        probability_threshold = args.probability_threshold
    else:
        probability_threshold = 0

    if "test" in args:
        if args.test == True:
            inbound_dataset = load_test_dataset()
            outbound_dataset = classify_tweets(inbound_dataset,
                                               probability_threshold)
            Y_test = outbound_dataset["polarity"]
            predicted = outbound_dataset["predicted"]
            test_folder = os.path.join(os.getcwd(), 'test_model')
            if not os.path.exists(test_folder):
                os.makedirs(test_folder)

            # Save evaluations
            sys.stdout = open(
                os.path.join(test_folder, "confusion_matrix.txt"), "w")
            print("Confusion Matrix:\n", confusion_matrix(Y_test, predicted))
            sys.stdout.close()

            sys.stdout = open(
                os.path.join(test_folder, "classification_report.txt"), "w")
            print("Classification Report:\n",
                  classification_report(Y_test, predicted))
            sys.stdout.close()

            sys.stdout = open(os.path.join(test_folder, "precision.txt"), "w")
            print("Precision:\n", accuracy_score(Y_test, predicted))
            sys.stdout.close()

            # Save Dataset
            file_name = os.path.join(test_folder, "outbound_test.csv")
            outbound_dataset.to_csv(file_name, index=False)
        else:
            if "search_keyword" not in args:
                print("A search keyword must be informed!")
                sys.exit()
            else:
                search_keyword = args.search_keyword
                if search_keyword.strip() == "":
                    print("A search keyword must be informed!")
                    sys.exit()
            if "fetch_size" not in args:
                print(
                    "The size of fetch was not informed! Default (100) will be used"
                )
                fetch_size = 100
            else:
                fetch_size = args.fetch_size

            inbound_dataset = get_twitter(search_keyword, fetch_size)
            outbound_dataset = classify_tweets(inbound_dataset,
                                               probability_threshold)
            load_into_database(outbound_dataset, search_keyword)

            output_folder = os.path.join(os.getcwd(), 'outbound')
            if not os.path.exists(output_folder):
                os.makedirs(output_folder)
            file_name = "outbound_" + time.strftime("%Y%m%d_%H%M%S") + ".csv"
            file_name = os.path.join(output_folder, file_name)
            outbound_dataset.to_csv(file_name, index=False)
コード例 #22
0
ファイル: mep.py プロジェクト: fredsobon/stuff
    print ('ATTENTION UN FICHIER DE LOCK EXISTE DEJA')
    do_purge()
elif not os.path.isdir(MEP_CONF):
    #print (xtermcolor.colorize('ATTENTION LE REPERTOIRE MEP
    #N EXISTE PAS', 0xaf0000))
    print ('ATTENTION LE REPERTOIRE MEP N EXISTE PAS')
    do_purge()
elif not os.path.isdir(MEP_REP):
    #print (xtermcolor.colorize('ATTENTION LE REPERTOIRE LOG DES
    #MEP N EXISTE PAS', 0xaf0000))
    print ('ATTENTION LE REPERTOIRE LOG DES MEP N EXISTE PAS')
    do_purge()

##### recuperation utilisateur + tag
parser = argparse.ArgumentParser(usage="mep.py -a application -t tag")
parser.add_argument('-a', '--application', action='store')
parser.add_argument('-t', '--tag', action='store')
args = parser.parse_args()

if (args.application):
    application = args.application
    do_lock()
else:
    application = raw_input('Entrez projet: ')
    do_lock()

if not os.path.isdir(os.path.join(SVN_LOCAL, application)):
    #print (xtermcolor.colorize('ATTENTION LE REPOSITORY
    # DU PROJET N EXISTE PAS', 0xaf0000))
    print ('ATTENTION LE REPOSITORY DU PROJET N EXISTE PAS')
    do_purge()
コード例 #23
0
		# cv2.namedWindow("frame", cv2.WND_PROP_FULLSCREEN)
		# cv2.setWindowProperty("frame", cv2.WND_PROP_FULLSCREEN, cv2.cv.CV_WINDOW_FULLSCREEN)
		cv2.imshow('frame', draw)
		cv2.imwrite(SAVE_PATH + utils.get_frame_fig_name(i), draw)
		k = cv2.waitKey(30) & 0xff
		i += 1
		if k == 27:
			break

	cap.release()
	cv2.destroyAllWindows()

if __name__ == "__main__":
	parser = argparse.ArgumentParser()
	parser.add_argument("video", help = "A: Suturing: B: Pizza making")
	parser.add_argument("type", help = "1: MOG 2: MOG2 3: Ken's algorithm")
	args = parser.parse_args()
	cap = None
	if args.video == 'A':
		cap = cv2.VideoCapture('/home/animesh/DeepMilestones/jigsaws/Suturing_video/frames/Suturing_E003_capture2/cropped_scaled.avi')
	elif args.video == 'B':
		cap = cv2.VideoCapture('/home/animesh/C3D/examples/c3d_feature_extraction/input/frm/pizza8/videos/cropped_scaled.avi')
	else:
		print "Invalid video type"
		sys.exit()

	if (int(args.type) == 1):
		params = (500, 10, 0.9, 1)
		run_video_with_bsub(cap, cv2.BackgroundSubtractorMOG, params = None)
	elif (int(args.type) == 2):
コード例 #24
0
ファイル: extract.py プロジェクト: xivk/crab-tools
    import psyco
    psyco.full()
except ImportError:
    pass

from dbfpy.dbf import Dbf
from constants.extensions import CSV

import sys
import parser
import argparse
from elementtree.SimpleXMLWriter import XMLWriter
from lambert import Belgium1972LambertProjection

parser = argparse.ArgumentParser(description='Reads the AGIV CRAB database in DBF and converts this to .csv/.osm format.')
parser.add_argument('path', help='The path to the CRAB DBF files.')
parser.add_argument('--output-csv', default='crab.csv', help='The path to the output csv file.')
parser.add_argument('--filter-postcode', help='The postocde to filter on, will restrict data to this postcode only.', default='')
parser.add_argument('--write-postcodes', action='store_true', default=False)
parser.add_argument('--output-osm', default='crab.osm', help='The path to the output OSM XML file.')
args = parser.parse_args()

straatnm_dbf = args.path + 'straatnm.dbf'
huisnr_dbf = args.path + 'huisnr.dbf'
pkancode_dbf = args.path + 'pkancode.dbf'
gemnm_dbf = args.path + 'gemnm.dbf'
gem_dbf = args.path + 'gem.dbf'
tobjhnr_dbf = args.path + 'tobjhnr.dbf'
terrobj_dbf = args.path + 'terrobj.dbf'

do_terrobj = 1
コード例 #25
0
    logger.info("ovs-ofctl add-flow br-int priority=66,dl_type=0x800,in_port="+lastport+",nw_src="+src+",nw_dst="+dst+",actions=output:"+brintport)
    print "ovs-ofctl add-flow br-int priority=66,dl_type=0x800,in_port="+lastport+",nw_src="+src+",nw_dst="+dst+",actions=output:"+brintport
    os.system("ovs-ofctl add-flow br-int priority=66,dl_type=0x800,in_port="+lastport+",nw_src="+src+",nw_dst="+dst+",actions=output:"+brintport)
    fo.write("ovs-ofctl --strict del-flows br-int priority=66,dl_type=0x800,in_port="+lastport+",nw_src="+src+",nw_dst="+dst+"\n")

    return (returnflag)

def get_ip():
    s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
    s.connect(("8.8.8.8", 80))
    return s.getsockname()[0]

###   Main Part ###

parser = argparse.ArgumentParser()   #handler for arguments passed 
parser.add_argument("-s", "--server",help="pass the local server ip. If not, it finds it automatically",type=str)  # option configurations, needs to be required
parser.add_argument("-i", "--brint",help="pass the connection of br-int to br-ex port, or use default '2' ",type=str)
parser.add_argument("-e", "--brex",help="pass the connection of br-ex to br-int port, or use default '2' ",type=str)
parser.add_argument("-t", "--breth",help="pass the connection of br-eth0 to br-ex port, or use default '3' ",type=str)
args = parser.parse_args()  # pass the arguments to the parser
# default values for ports 
brexport = "2"
brintport = "2"
breth0port = "3"

print ""
print ""
print "===SONATA PROJECT==="
print "SFC-AGENT Initializing..."
print ""
print ""
コード例 #26
0
                    status = get_value(id, 'status', s.provider_failed)
                    message = get_value(id, 'message', s.provider_failed)
                    value = status if message is None else "%s // %s" % (status, message)
                    row.append(value)
                csvwriter.writerow(row)
            csvfile.close()
        return True
    except:
        return None
        traceback.print_exc()
        pass


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--inputFolder', default='none',
                        help='path to folder that contains all summaries [Required]')
    parser.add_argument('-o', '--outputFile', default='summary_all.csv', help='path to output file [Required]')
    input_folder = parser.parse_args().inputFolder
    output_path = parser.parse_args().outputFile
    summaries = []
    for file_name in os.listdir(input_folder):
        full_path = os.path.realpath(os.path.join(input_folder, file_name))
        summary = GlobalSummary.load_from_json_file(full_path)
        if summary:
            summaries.append(summary)
    statistic_all_rerun_to_csv(summaries, output_path)
    print "Final report is write to %s " % (output_path)



コード例 #27
0
R = 0.12*W_ref

canvas = ROOT.TCanvas('canvas', 'canvas', 50, 50, W, H)
canvas.SetFillColor(0)
canvas.SetBorderMode(0)
canvas.SetFrameFillStyle(0)
canvas.SetFrameBorderMode(0)
canvas.SetLeftMargin( L/W )
canvas.SetRightMargin( R/W )
canvas.SetTopMargin( T/H )
canvas.SetBottomMargin( B/H )
canvas.SetTickx(0)
canvas.SetTicky(0)

parser = argparse.ArgumentParser()
parser.add_argument('file', help='JSON file you want to draw')
parser.add_argument('output', help='Output filename')

args = parser.parse_args()

data = {}
with open(args.file, 'r') as f:
    data = json.load(f)

if data['dimension'] != 2:
    raise Exception("Only 2-d scale-factors / efficiencies are supported")

if 'formula' in data and data['formula']:
    raise Exception("Formula are not supported")

x_min = data['binning']['x'][0]
コード例 #28
0
## Main part 
logger.info("")
logger.info("===SONATA PROJECT===")    
logger.info("Starting SFC Node Agent")

server = get_ip()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (server, 55555)
logger.info('starting up on %s port %s' % server_address)
sock.bind(server_address)
sock.listen(5)

#configurations
parser = argparse.ArgumentParser()   #handler for arguments passed
parser.add_argument("-m", "--compute",help="pass the name of the other node",type=str)  # option configurations, needs to be required
parser.add_argument("-i", "--brint",help="pass the connection of br-int to br-tun port, or use default '1' ",type=str)
parser.add_argument("-c", "--control",help="pass the connection of br-tun to contoller, or use default '2' ",type=str)
parser.add_argument("-n", "--node",help="pass the connection of br-int to the other node, or use default '3' ",type=str)   # TODO number of nodes. and port corresponding to nodes

args = parser.parse_args()  # pass the arguments to the parser
# default values for ports
server = get_ip()

if args.compute:  # parse the server adress passed
    compute = args.compute
if args.brint:
    brintport = args.brint
else:
	brintport = '1'
if args.control:
コード例 #29
0
ファイル: extract.py プロジェクト: gplv2/aptum.github.io
import os 
import io
import sys
import string
import parser
import argparse
import time
import json
import shapefile
from collections import namedtuple

from lambert import Belgium1972LambertProjection

parser = argparse.ArgumentParser(description='Reads the AGIV CRAB database in Shapefile format and converts this to a number of json files.')
parser.add_argument('path', help='Path to the CrabAdr.shp file.')
parser.add_argument('--output-dir', default='data/', help='The path to the output files.')
parser.add_argument('--osm-output', action="store_true", help='Also write one OSM file next to the tree of JSON files')
args = parser.parse_args()

# Check the output directory first, before all calculations
outputDir = args.output_dir

if (outputDir[-1] != '/'):
    outputDir += '/'

if os.path.exists(outputDir):
    sys.exit("The directory " + outputDir + " already exists. Please delete the directory before proceeding.")

# Statistical variables
stats = {
コード例 #30
0
def argparser(dataset=None,
              metric=None,
              epsilon=0.3,
              k=50,
              q=0.8,
              clusters=10,
              iter=30,
              repeat=10,
              alpha=0.01,
              seed=0,
              ratio=0.2,
              verbose=2000,
              delta=0.01):
    parser = argparse.ArgumentParser()

    # image dataaset
    parser.add_argument('--dataset',
                        type=str,
                        default=dataset,
                        help='dataset name')
    parser.add_argument('--seed',
                        type=int,
                        default=seed,
                        help='random seed number')
    parser.add_argument('--ratio',
                        type=float,
                        default=ratio,
                        help='ratio of validation dataset')

    # perturbation family
    parser.add_argument('--metric',
                        type=str,
                        default=metric,
                        help='type of perturbations')
    parser.add_argument('--epsilon',
                        type=float,
                        default=epsilon,
                        help='perturbation strength')

    # k nearest neighbour
    parser.add_argument('--k',
                        type=int,
                        default=k,
                        help='number of nearest neighbors for knn')
    parser.add_argument('--q',
                        type=float,
                        default=q,
                        help='initial covered density quantile')

    # kmeans clustering
    parser.add_argument('--clusters',
                        type=int,
                        default=clusters,
                        help='number of clusters for kmeans')
    parser.add_argument('--iter',
                        type=int,
                        default=iter,
                        help='number of iterations for kmeans')
    parser.add_argument('--repeat',
                        type=int,
                        default=repeat,
                        help='number of repeated experiments')

    # other arguments
    parser.add_argument('--alpha',
                        type=float,
                        default=alpha,
                        help='risk threshold')
    parser.add_argument('--delta', type=float, default=delta)
    parser.add_argument('--verbose', type=int, default=verbose)
    parser.add_argument('--proctitle', type=str, default="")

    args = parser.parse_args()

    if args.dataset is not None:
        args.proctitle += args.dataset + '/'

        if args.metric is not None:
            args.proctitle += args.metric + '/' + 'epsilon_' + str(
                args.epsilon)

            banned = [
                'proctitle', 'dataset', 'metric', 'epsilon', 'alpha', 'seed',
                'ratio', 'k', 'iter', 'repeat', 'verbose', 'delta'
            ]
            if metric == 'euclidean':
                banned += 'q'

            for arg in sorted(vars(args)):
                if arg not in banned and getattr(args, arg) is not None:
                    args.proctitle += '_' + arg + '_' + str(getattr(args, arg))
        else:
            raise ValueError('Need to specify the family of perturbations.')

    else:
        raise ValueError('Need to specify the image dataset.')

    return args
コード例 #31
0
    previous_container_name = "paf-diskdb"
    current_container_name  = "paf-dbdisk"
    kfname_b2f              = "baseband2baseband.beam{:02d}part{:02d}.key".format(beam, part)

    ddir                    = ConfigSectionMap(pipeline_conf, "BASEBAND2BASEBAND")['dir']
    key_b2f                 = ConfigSectionMap(pipeline_conf, "BASEBAND2BASEBAND")['key']
    dvolume                 = '{:s}:{:s}'.format(ddir, ddir)
    
    com_line = "docker run --rm -it --ipc=container:{:s} -v {:s} -u {:d}:{:d} --cap-add=IPC_LOCK --ulimit memlock=-1:-1 --name {:s} xinpingdeng/paf-base taskset -c {:d} dada_dbdisk -k {:s} -D {:s} -s".format(previous_container_name, dvolume, uid, gid, current_container_name, cpu, key_b2f, ddir)
    print com_line
    os.system(com_line)
    
# ./baseband2baseband_dbdisk.py -a ../config/pipeline.conf -b 0 -c 0 -d 8 9 -e J1939+2134.par
if __name__ == "__main__":    
    parser = argparse.ArgumentParser(description='To transfer data from shared memeory to disk with a docker container')
    parser.add_argument('-a', '--pipeline_conf', type=str, nargs='+',
                        help='The configuration of pipeline')    
    parser.add_argument('-b', '--beam', type=int, nargs='+',
                        help='The beam id from 0')
    parser.add_argument('-c', '--part', type=int, nargs='+',
                        help='The part id from 0')
    parser.add_argument('-d', '--cpu', type=int, nargs='+',
                        help='Bind threads to cpu')
    parser.add_argument('-e', '--par_fname', type=str, nargs='+',
                        help='The name of pulsar par file')

    args          = parser.parse_args()
    
    t_dbdisk            = threading.Thread(target = dbdisk, args = (args,))
    t_baseband2baseband = threading.Thread(target = baseband2baseband, args = (args,))

    t_dbdisk.start()
コード例 #32
0
import os 
import io
import sys
import string
import parser
import argparse
import time
import json
import shapefile
from collections import namedtuple

from lambert import Belgium1972LambertProjection

parser = argparse.ArgumentParser(description='Reads the AGIV CRAB database in Shapefile format and converts this to a number of json files.')
parser.add_argument('path', help='Path to the CrabAdr.shp file.')
parser.add_argument('--output-dir', default='data/', help='The path to the output files.')
parser.add_argument('--osm-output', action="store_true", help='Also write one OSM file next to the tree of JSON files')
args = parser.parse_args()

# Check the output directory first, before all calculations
outputDir = args.output_dir

if (outputDir[-1] != '/'):
    outputDir += '/'

if os.path.exists(outputDir):
    sys.exit("The directory " + outputDir + " already exists. Please delete the directory before proceeding.")

# Statistical variables
stats = {
コード例 #33
0
def parse_args(availableCommands):
    parser = argparse.ArgumentParser(description=__doc__)
    commandsHelp = "Available commands: %s" % (", ".join(availableCommands))
    parser.add_argument(metavar="CMD", dest="command", help=commandsHelp)
    parser.add_argument(metavar="ZIPFILE",
                        dest="files",
                        nargs="+",
                        help="Space separated list of zip files to handle")
    parser.add_argument(
        "-r",
        "--randomize",
        dest="randomize",
        action="store_true",
        help="Randomize the processing order of zip files and responses")
    parser.add_argument(
        "-c",
        "--convert2odt",
        dest="convert2odt",
        action="store_true",
        help=
        "Converts almost all non-odt:s to the .odt format for further processing. This takes time."
    )
    parser.add_argument("--progress",
                        metavar="N",
                        dest="progress",
                        type=int,
                        default=0,
                        help="Show number of files processed (every N file)")
    parser.add_argument("--names",
                        dest="printNames",
                        action="store_true",
                        help="Print filenames of all processed files")
    parser.add_argument(
        "-j",
        dest="processes",
        default=max(multiprocessing.cpu_count() - 1, 1),
        type=int,
        help="Number of processes to use. Defaults to number of CPUs - 1.")
    parser.add_argument("-n",
                        "--num",
                        dest="numberOfFiles",
                        type=int,
                        metavar="NUM",
                        default=0,
                        help="Abort after NUM files analyzed")
    parser.add_argument("-q",
                        "--queue-size",
                        dest="queueSize",
                        type=int,
                        default=10,
                        metavar="SIZE",
                        help="Size of the queue of files to analyze")
    parser.add_argument("--file-pattern",
                        dest="filePattern",
                        metavar="PATTERN",
                        default="*",
                        help="File pattern for files to analyze")
    parser.add_argument("--offset",
                        dest="offset",
                        metavar="NUM",
                        type=int,
                        help="Skip the first NUM files")
    parser.add_argument(
        "--wipe-db",
        dest="wipeDatabase",
        action="store_true",
        help="Wipe the SQLite file before saving analysis results")

    return parser.parse_args()
コード例 #34
0
def argparser():
    parser = argparse.ArgumentParser()
    parser.add_argument('-F', type=str, required=True,
                        help='map path')  #File path for our MAINMAST code
    parser.add_argument(
        '--mode',
        type=int,
        required=True,
        help='0: Predict structures for EM MAP\n'
        '1: Predict structures for EM maps with pdb structure\n'
        '2: Predict structure for experimental maps with 4 models\n'
        '3: Predict and evaluate structure for experimental maps with 4 models\n'
    )
    parser.add_argument(
        '--resize',
        type=int,
        default=0,
        help=
        "0: resizing maps with numba optimized (some maps size are not supported);\n"
        "1: resizing maps with scipy (relatively slow but support almost all maps)."
    )
    parser.add_argument('-P',
                        type=str,
                        default="",
                        help="PDB path for evaluating Model's performance")
    parser.add_argument(
        '-M',
        type=str,
        default="best_model",
        help="Trained model path which saved all the trained models")
    parser.add_argument(
        '--type',
        type=int,
        help=
        '0:simulated map at 6 A 1: simulated map at 10 A 2: simulated map at 6-10 A 3:experimental map'
    )
    parser.add_argument('--gpu',
                        type=str,
                        default='0',
                        help='gpu id choose for training')
    parser.add_argument('--class',
                        type=int,
                        default='4',
                        help='number of classes')
    parser.add_argument('--batch_size',
                        type=int,
                        default=256,
                        help='batch size for training')
    parser.add_argument('--contour',
                        type=float,
                        default=0.0,
                        help='Contour level for real map')
    parser.add_argument('--cardinality',
                        default=32,
                        type=int,
                        help='ResNeXt cardinality')
    parser.add_argument('--drop_rate',
                        type=float,
                        default=0.3,
                        help="Drop out rate for the phase2 Model")
    parser.add_argument(
        '--fold',
        type=int,
        default=1,
        help='specify the fold Model used for predicting the real map')
    args = parser.parse_args()
    # try:
    #     import ray,socket
    #     rayinit()
    # except:
    #     print('ray need to be installed')#We do not need this since GAN can't be paralleled.
    params = vars(args)
    return params
コード例 #35
0
import numpy as np
import random
from heap import heap_struct
import pdb
import matplotlib.pyplot as plt
import matplotlib.animation as animation

import parser
import argparse
import os
parser = argparse.ArgumentParser(description='node number')
parser.add_argument('--node_num',
                    '-n',
                    type=int,
                    default=10,
                    help='filename in dataset folder')
args = parser.parse_args()
node_num = args.node_num

print(node_num)


def dijkstra(network, weght, start, goal):

    fig = plt.figure()
    ims = []

    flag = []
    d_goal = [10000 for i in range(len(network))]

    im = plt.bar(range(len(d_goal)), d_goal, width=0.5, color='black')
コード例 #36
0
            sec_prd, nchunk, ndf_chk_rbuf, ndf_chk_tbuf, ndf_chk_prd,
            ctrl_socket, hdr_fname, instrument)

    print capture_command
    os.system(capture_command)

    # Delete PSRDADA buffer
    os.system("dada_db -d -k {:s}".format(key))


if __name__ == "__main__":
    parser = argparse.ArgumentParser(
        description='To capture data from given beam and given part')
    parser.add_argument('-a',
                        '--system_conf',
                        type=str,
                        nargs='+',
                        help='The configuration of PAF system')
    parser.add_argument('-b',
                        '--pipeline_conf',
                        type=str,
                        nargs='+',
                        help='The configuration of pipeline')
    parser.add_argument('-c',
                        '--hdr',
                        type=int,
                        nargs='+',
                        help='Record packet header or not')
    parser.add_argument('-d',
                        '--bind',
                        type=int,
コード例 #37
0
def linkedinsearch():
    if request.method == 'POST':
        # check if the post request has the file part
        keyword = request.form['searchkey']
        options = Options()
        options.headless = True
        parser = argparse.ArgumentParser()
        parser.add_argument('-url',
                            '--url',
                            help='URL to the online repository of images')
        args = vars(parser.parse_args())
        url = args['url']
        url = "https://www.linkedin.com/login"

        #userid = str(input("Enter email address or number with country code: "))
        #password = getpass.getpass('Enter your password:'******'permissions.default.image', 2)

        driver = webdriver.Firefox(firefoxProfile)
        driver.get(url)
        driver.implicitly_wait(3)
        driver.find_element_by_id("username").send_keys(
            '*****@*****.**')
        driver.find_element_by_id("password").send_keys('amway2775035')
        driver.find_element_by_xpath(
            "/html/body/div/main/div/form/div[3]/button").click()
        #, 'Manager', 'Web Developer', 'React Developer','Java Developer', 'IOS Developer'

        tmpd = {}
        d = pd.DataFrame(columns=['name', 'linkedin', 'skills'])

        driver.get(
            'https://www.linkedin.com/search/results/people/?keywords=' +
            keyword + '&origin=SUGGESTION')
        #driver.find_element_by_xpath("/html/body/header/div/form/div/div/div/div/div[1]/div/input").send_keys(i+Keys.ENTER)
        content = driver.find_element_by_class_name('blended-srp-results-js')
        source_code = content.get_attribute("innerHTML")
        soup = BeautifulSoup(source_code, "html.parser")

        data = soup.findAll('a', {'class': 'search-result__result-link'})
        count = 0
        for j in range(len(data)):
            if j % 2 == 0:
                pass
            driver.get("https://www.linkedin.com" + str(data[j]['href']))
            time.sleep(2)
            driver.execute_script(
                "window.scrollTo(0, document.body.scrollHeight);")
            time.sleep(2)
            try:
                element = driver.find_element_by_class_name(
                    'pv-skill-categories-section')
                actions = ActionChains(driver)
                actions.move_to_element(element).click()
            except:
                element = driver.find_element_by_class_name(
                    'pv-profile-section__card-heading')
                actions = ActionChains(driver)
                actions.move_to_element(element).click()
            finally:
                driver.execute_script("arguments[0].scrollIntoView();",
                                      element)
                myElem = WebDriverWait(driver, 20).until(
                    EC.presence_of_element_located(
                        (By.CLASS_NAME,
                         'pv-profile-section__card-action-bar')))
            #driver.find_element_by_class_name('pv-skill-categories-section').click()
            content = driver.find_element_by_class_name('core-rail')
            print(str(content))
            tmp = scrap(content.get_attribute("innerHTML"), data[j]['href'])
            d.loc[len(d)] = tmp

            main = []
            main = content.get_attribute("innerHTML")

        a = pd.DataFrame((d['skills'].fillna("Python DataAnalysis"), d['name'],
                          d['linkedin']))
        d = d.reset_index(drop=True)
        a = a.T

        skills = pd.DataFrame(a['skills'].str.split(" ", n=-1, expand=True))

        skills.insert(0, 'Name', a['name'])
        skills.insert(1, 'Linkedin', a['linkedin'])
        linkdata = d.values.tolist()
        return render_template("l_search.html", l=linkdata)
コード例 #38
0
def create_parser():
    parser = argparse.ArgumentParser(description='Run the prediction using NRC and features')
    parser.add_argument('-ng','--ngrams', 
                       help='Should generate ngrams', default=True)
    parser.add_argument('-cg','--chargrams',
                       help='Should generate chargrams', default=True)
    parser.add_argument('-cc','--count_caps', 
                       help='Count number of all cap letters words', default=True)
    parser.add_argument('-cm','--count_mentions', default=True,
                       help='Count number of mentions')
    parser.add_argument('-ch','--count_hash', default=True,
                       help='Count number of hash words')
    parser.add_argument('-cp','--count_pos', default=True,
                       help='Count number of words per pos')
    parser.add_argument('-el','--elongated', default=True,
                       help='Count elongated words')    

    parser.add_argument('-le','--lexicon', 
                       help='Use lexicons BING, MPQA, NRC', default=True)
    parser.add_argument('-cl','--cluster', default=True,
                       help='Use word cluster')
    parser.add_argument('-pu','--punctuation', default=True,
                       help='Create punctuation features')
    parser.add_argument('-em','--emoticon', default=True,
                       help='Count number of words per pos')

    parser.add_argument('-so','--so_cal', default=True,
                       help='Create SO_CAL Features') 
    parser.add_argument('-ss','--syn_strength', default=True,
                       help='Use syn strength features') 
    parser.add_argument('-lw','--liwc', default=False,
                       help='Use LIWC features') 
                     
    return parser
コード例 #39
0
import numpy as np
from Kmeans_def import k_means
from Kmeans_def import plot_classtering
import parser
import argparse
import os
parser = argparse.ArgumentParser(description='data loading trial')
parser.add_argument('--filename',
                    '-f',
                    type=str,
                    default='piyo',
                    help='filename in dataset folder')
args = parser.parse_args()
filename = args.filename
filepath = os.path.join('../../dataset/implementation/k-means/data',
                        '%s.npy' % filename)
dataset = np.load(filepath)


def make_kernel(name):
    if name == 'linear':

        def kernel(x, y):
            K = np.dot(x, y)
            return K
    elif name == 'RBF':
        gamma = float(input('gamma (positive real number)  :'))

        def kernel(x, y):
            K = np.exp(-gamma * (sum((x - y)**2)))
            return K
コード例 #40
0
ファイル: analyze.py プロジェクト: tfla/eucrcon
def parse_args(availableCommands):
    parser = argparse.ArgumentParser(description=__doc__)
    commandsHelp = "Available commands: %s" % (", ".join(availableCommands))
    parser.add_argument(metavar="CMD",
                        dest="command",
                        help=commandsHelp)
    parser.add_argument(metavar="ZIPFILE",
                        dest="files",
                        nargs="+",
                        help="Space separated list of zip files to handle")
    parser.add_argument("-r",
                        "--randomize",
                        dest="randomize",
                        action="store_true",
                        help="Randomize the processing order of zip files and responses")
    parser.add_argument("-c",
                        "--convert2odt",
                        dest="convert2odt",
                        action="store_true",
                        help="Converts almost all non-odt:s to the .odt format for further processing. This takes time.")
    parser.add_argument("--progress",
                        metavar="N",
                        dest="progress",
                        type=int,
                        default=0,
                        help="Show number of files processed (every N file)")
    parser.add_argument("--names",
                        dest="printNames",
                        action="store_true",
                        help="Print filenames of all processed files")
    parser.add_argument("-j",
                        dest="processes",
                        default=max(multiprocessing.cpu_count() - 1, 1),
                        type=int,
                        help="Number of processes to use. Defaults to number of CPUs - 1.")
    parser.add_argument("-n",
                        "--num",
                        dest="numberOfFiles",
                        type=int,
                        metavar="NUM",
                        default=0,
                        help="Abort after NUM files analyzed")
    parser.add_argument("-q",
                        "--queue-size",
                        dest="queueSize",
                        type=int,
                        default=10,
                        metavar="SIZE",
                        help="Size of the queue of files to analyze")
    parser.add_argument("--file-pattern",
                        dest="filePattern",
                        metavar="PATTERN",
                        default="*",
                        help="File pattern for files to analyze")
    parser.add_argument("--offset",
                        dest="offset",
                        metavar="NUM",
                        type=int,
                        help="Skip the first NUM files")
    parser.add_argument("--wipe-db",
                        dest="wipeDatabase",
                        action="store_true",
                        help="Wipe the SQLite file before saving analysis results")

    return parser.parse_args()
コード例 #41
0
ファイル: image_classifier.py プロジェクト: wpfhtl/distiller
def init_classifier_compression_arg_parser():
    '''Common classifier-compression application command-line arguments.
    '''
    SUMMARY_CHOICES = ['sparsity', 'compute', 'model', 'modules', 'png', 'png_w_params']

    parser = argparse.ArgumentParser(description='Distiller image classification model compression')
    parser.add_argument('data', metavar='DIR', help='path to dataset')
    parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18', type=lambda s: s.lower(),
                        choices=models.ALL_MODEL_NAMES,
                        help='model architecture: ' +
                        ' | '.join(models.ALL_MODEL_NAMES) +
                        ' (default: resnet18)')
    parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
                        help='number of data loading workers (default: 4)')
    parser.add_argument('--epochs', type=int, metavar='N', default=90,
                        help='number of total epochs to run (default: 90')
    parser.add_argument('-b', '--batch-size', default=256, type=int,
                        metavar='N', help='mini-batch size (default: 256)')

    optimizer_args = parser.add_argument_group('Optimizer arguments')
    optimizer_args.add_argument('--lr', '--learning-rate', default=0.1,
                    type=float, metavar='LR', help='initial learning rate')
    optimizer_args.add_argument('--momentum', default=0.9, type=float,
                    metavar='M', help='momentum')
    optimizer_args.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
                    metavar='W', help='weight decay (default: 1e-4)')

    parser.add_argument('--print-freq', '-p', default=10, type=int,
                        metavar='N', help='print frequency (default: 10)')
    parser.add_argument('--verbose', '-v', action='store_true', help='Emit debug log messages')

    load_checkpoint_group = parser.add_argument_group('Resuming arguments')
    load_checkpoint_group_exc = load_checkpoint_group.add_mutually_exclusive_group()
    # TODO(barrh): args.deprecated_resume is deprecated since v0.3.1
    load_checkpoint_group_exc.add_argument('--resume', dest='deprecated_resume', default='', type=str,
                        metavar='PATH', help=argparse.SUPPRESS)
    load_checkpoint_group_exc.add_argument('--resume-from', dest='resumed_checkpoint_path', default='',
                        type=str, metavar='PATH',
                        help='path to latest checkpoint. Use to resume paused training session.')
    load_checkpoint_group_exc.add_argument('--exp-load-weights-from', dest='load_model_path',
                        default='', type=str, metavar='PATH',
                        help='path to checkpoint to load weights from (excluding other fields) (experimental)')
    load_checkpoint_group.add_argument('--pretrained', dest='pretrained', action='store_true',
                        help='use pre-trained model')
    load_checkpoint_group.add_argument('--reset-optimizer', action='store_true',
                        help='Flag to override optimizer if resumed from checkpoint. This will reset epochs count.')

    parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
                        help='evaluate model on test set')
    parser.add_argument('--activation-stats', '--act-stats', nargs='+', metavar='PHASE', default=list(),
                        help='collect activation statistics on phases: train, valid, and/or test'
                        ' (WARNING: this slows down training)')
    parser.add_argument('--activation-histograms', '--act-hist',
                        type=distiller.utils.float_range_argparse_checker(exc_min=True),
                        metavar='PORTION_OF_TEST_SET',
                        help='Run the model in evaluation mode on the specified portion of the test dataset and '
                             'generate activation histograms. NOTE: This slows down evaluation significantly')
    parser.add_argument('--masks-sparsity', dest='masks_sparsity', action='store_true', default=False,
                        help='print masks sparsity table at end of each epoch')
    parser.add_argument('--param-hist', dest='log_params_histograms', action='store_true', default=False,
                        help='log the parameter tensors histograms to file '
                             '(WARNING: this can use significant disk space)')
    parser.add_argument('--summary', type=lambda s: s.lower(), choices=SUMMARY_CHOICES, action='append',
                        help='print a summary of the model, and exit - options: | '.join(SUMMARY_CHOICES))
    parser.add_argument('--export-onnx', action='store', nargs='?', type=str, const='model.onnx', default=None,
                        help='export model to ONNX format')
    parser.add_argument('--compress', dest='compress', type=str, nargs='?', action='store',
                        help='configuration file for pruning the model (default is to use hard-coded schedule)')
    parser.add_argument('--sense', dest='sensitivity', choices=['element', 'filter', 'channel'],
                        type=lambda s: s.lower(), help='test the sensitivity of layers to pruning')
    parser.add_argument('--sense-range', dest='sensitivity_range', type=float, nargs=3, default=[0.0, 0.95, 0.05],
                        help='an optional parameter for sensitivity testing '
                             'providing the range of sparsities to test.\n'
                             'This is equivalent to creating sensitivities = np.arange(start, stop, step)')
    parser.add_argument('--extras', default=None, type=str,
                        help='file with extra configuration information')
    parser.add_argument('--deterministic', '--det', action='store_true',
                        help='Ensure deterministic execution for re-producible results.')
    parser.add_argument('--seed', type=int, default=None,
                        help='seed the PRNG for CPU, CUDA, numpy, and Python')
    parser.add_argument('--gpus', metavar='DEV_ID', default=None,
                        help='Comma-separated list of GPU device IDs to be used '
                             '(default is to use all available devices)')
    parser.add_argument('--cpu', action='store_true', default=False,
                        help='Use CPU only. \n'
                        'Flag not set => uses GPUs according to the --gpus flag value.'
                        'Flag set => overrides the --gpus flag')
    parser.add_argument('--name', '-n', metavar='NAME', default=None, help='Experiment name')
    parser.add_argument('--out-dir', '-o', dest='output_dir', default='logs', help='Path to dump logs and checkpoints')
    parser.add_argument('--validation-split', '--valid-size', '--vs', dest='validation_split',
                        type=float_range(exc_max=True), default=0.1,
                        help='Portion of training dataset to set aside for validation')
    parser.add_argument('--effective-train-size', '--etrs', type=float_range(exc_min=True), default=1.,
                        help='Portion of training dataset to be used in each epoch. '
                             'NOTE: If --validation-split is set, then the value of this argument is applied '
                             'AFTER the train-validation split according to that argument')
    parser.add_argument('--effective-valid-size', '--evs', type=float_range(exc_min=True), default=1.,
                        help='Portion of validation dataset to be used in each epoch. '
                             'NOTE: If --validation-split is set, then the value of this argument is applied '
                             'AFTER the train-validation split according to that argument')
    parser.add_argument('--effective-test-size', '--etes', type=float_range(exc_min=True), default=1.,
                        help='Portion of test dataset to be used in each epoch')
    parser.add_argument('--confusion', dest='display_confusion', default=False, action='store_true',
                        help='Display the confusion matrix')
    parser.add_argument('--num-best-scores', dest='num_best_scores', default=1, type=int,
                        help='number of best scores to track and report (default: 1)')
    parser.add_argument('--load-serialized', dest='load_serialized', action='store_true', default=False,
                        help='Load a model without DataParallel wrapping it')
    parser.add_argument('--thinnify', dest='thinnify', action='store_true', default=False,
                        help='physically remove zero-filters and create a smaller model')

    distiller.quantization.add_post_train_quant_args(parser)
    return parser
コード例 #42
0
ファイル: test.py プロジェクト: ki-lm/blueoil
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================

import os
import re
import glob
import parser
import argparse
import subprocess

parser = argparse.ArgumentParser()
parser.add_argument('restore_path', type=str)
parser.add_argument('--host', type=str, default="lmdgx01")
parser.add_argument('--target', type=str, default="lm_x86_avx")
parser.add_argument('--home_dir', type=str, default="/home/ki42/works/blueoil")
parser.add_argument('-n', '--dry_run', action="store_true")
args = parser.parse_args()


def run():
    cmd_download_prj = "rsync -ravz -e ssh "
    # cmd_download_prj += "--update --exclude='*.o' --exclude='*.cpp' "
    cmd_download_prj += "--update --exclude='*.o' "
    cmd_download_prj += "{}:{}/{}.prj tmp/".format(args.host, args.home_dir,
                                                   args.restore_path)
    print(cmd_download_prj)
コード例 #43
0
ファイル: xgrep.py プロジェクト: rrricharrrd/xwords
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument("--answer", action='store_true', help="answer")
    parser.add_argument("word")
    return parser.parse_args()
コード例 #44
0
from deep_models import models
import numpy as np
import tensorflow as tf
import os
import parser
import argparse

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    # Input Arguments

    # Model Specs
    parser.add_argument('--block',
                        help='Block to use in deep layer',
                        type=str,
                        required=True,
                        choices=['van', 'f_E', 'Sf_EM', 'Wf_EM'])

    parser.add_argument('--depth',
                        help='Depth of model',
                        type=int,
                        required=True)

    parser.add_argument('--resolution',
                        help='reolution of images in tfrecords files',
                        type=int,
                        required=True,
                        nargs=2)

    parser.add_argument('--dt',
コード例 #45
0
this_gate = ''
line_input = ''  # input name in line
line_output = ''  # output name in line
connected_gate = [] # used to store the pre_gate and the next_gate
reg_gateName = r'(gate[0-9]+)'
reg_gateType = r'([a-z]+[0-9]+)'
pair_list = []
check = -1  #  used to check whether it is a legal gate
has_pre_gate = False
has_next_gate = False
random_sequence = []
random_counter = 0
i = 0
counter = 0
parser = argparse.ArgumentParser(usage='python wire_CircuitScanner [-h]  <circuit.v> [number]]', description='This program will camouflage <circuit.v> with dummy wire',)
parser.add_argument('<circuit.v>', help='input circuit to be camouflaged')
parser.add_argument('number', action='store', type=int, help='define the maximum number of gates to be selected, trade off between time and difficulty')
args = parser.parse_args()
Num_pair = args.number
circuitIn = sys.argv[1]
CircuitPath = os.path.abspath(circuitIn)

if not os.path.isfile(CircuitPath):
    print 'Invalid input circuit file!!!\n'


# open circuit and scan line by line
with open(circuitIn, 'r') as infile:
    inV = infile.read()
    Vlines = inV.replace('\r','').split(';\n')
コード例 #46
0
ファイル: ars.py プロジェクト: wyz2368/open_spiel_egta
                     delta_std=params['delta_std'], 
                     logdir=logdir,
                     rollout_length=params['rollout_length'],
                     shift=params['shift'],
                     params=params,
                     seed = params['seed'])
        
    ARS.train(params['n_iter'])
       
    return 


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--env_name', type=str, default='HalfCheetah-v1')
    parser.add_argument('--n_iter', '-n', type=int, default=1000)
    parser.add_argument('--n_directions', '-nd', type=int, default=8)
    parser.add_argument('--deltas_used', '-du', type=int, default=8)
    parser.add_argument('--step_size', '-s', type=float, default=0.02)
    parser.add_argument('--delta_std', '-std', type=float, default=.03)
    parser.add_argument('--n_workers', '-e', type=int, default=18)
    parser.add_argument('--rollout_length', '-r', type=int, default=1000)

    # for Swimmer-v1 and HalfCheetah-v1 use shift = 0
    # for Hopper-v1, Walker2d-v1, and Ant-v1 use shift = 1
    # for Humanoid-v1 used shift = 5
    parser.add_argument('--shift', type=float, default=0)
    parser.add_argument('--seed', type=int, default=237)
    parser.add_argument('--policy_type', type=str, default='linear')
    parser.add_argument('--dir_path', type=str, default='data')
コード例 #47
0
ファイル: scraper.py プロジェクト: rrricharrrd/xwords
def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('number', type=int, nargs='*')
    return parser.parse_args()
コード例 #48
0
def main():
    
    parser = argparse.ArgumentParser()

    ## Env setup
    parser.add_argument('--env_name', type=str, default='point-v0')
    parser.add_argument('--num_agents', '-na', type=int, default=5)
    parser.add_argument('--seed', '-sd', type=int, default=0)
    parser.add_argument('--max_iter', '-it', type=int, default=2000)
    parser.add_argument('--policy', '-po', type=str, default='FC')
    parser.add_argument('--embedding', '-em', type=str, default='a_s')
    parser.add_argument('--num_workers', '-nw', type=int, default=4)
    parser.add_argument('--filename', '-f', type=str, default='')
    parser.add_argument('--num_evals', '-ne', type=int, default=0)
    parser.add_argument('--flush', '-fl', type=int, default=1000) # may need this. it resets ray, because sometimes it fills the memory.
    parser.add_argument('--ob_filter', '-ob', type=str, default='MeanStdFilter') # 'NoFilter'
    parser.add_argument('--w_nov', '-wn', type=float, default=-1) # if negative it uses the adaptive method, else it will be fixed at the value you pick (0,1). Note that if you pick 1 itll be unsupervised (ie no reward)
    parser.add_argument('--dpp_kernel', '-ke', type=str, default='rbf')
    parser.add_argument('--states', '-ss', type=str, default='random-20') # 'random-X' X is how many
    parser.add_argument('--update_states', '-us', type=int, default=20) # how often to update.. we only used 20

    args = parser.parse_args()
    params = vars(args)

    params = get_experiment(params)

    ray.init()
    os.environ['RAY_USE_NEW_GCS'] = 'True'

    state_word = [str(params['states'].split('-')[0]) if params['w_nov'] > 0 else ''][0]
    params['dir'] = params['env_name'] + '_Net' + str(params['layers']) + 'x' + str(params['h_dim']) + '_Agents' + str(params['num_agents']) + '_Novelty' + str(params['w_nov']) + state_word + 'kernel_' + params['dpp_kernel'] + '_lr' + str(params['learning_rate']) + '_' + params['filename'] + params['ob_filter']
    
    if not(os.path.exists('data/'+params['dir'])):
        os.makedirs('data/'+params['dir'])
        os.makedirs('data/'+params['dir']+'/weights')
        os.makedirs('data/'+params['dir']+'/results')
    
    train(params)
コード例 #49
0
R = 0.12*W_ref

canvas = ROOT.TCanvas('canvas', 'canvas', 50, 50, W, H)
canvas.SetFillColor(0)
canvas.SetBorderMode(0)
canvas.SetFrameFillStyle(0)
canvas.SetFrameBorderMode(0)
canvas.SetLeftMargin( L/W )
canvas.SetRightMargin( R/W )
canvas.SetTopMargin( T/H )
canvas.SetBottomMargin( B/H )
canvas.SetTickx(0)
canvas.SetTicky(0)

parser = argparse.ArgumentParser()
parser.add_argument('file', help='JSON file you want to draw')
parser.add_argument('output', help='Output filename (pdf extension will be added)')

args = parser.parse_args()

data = {}
with open(args.file, 'r') as f:
    data = json.load(f)

if data['dimension'] != 2:
    raise Exception("Only 2-d scale-factors / efficiencies are supported")

if 'formula' in data and data['formula']:
    raise Exception("Formula are not supported")

x_min = data['binning']['x'][0]
コード例 #50
0
    return received



### Logging Config ###
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('sfc_controller.log')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)


#configurations
parser = argparse.ArgumentParser()   #handler for arguments passed
parser.add_argument("-s", "--server",help="pass the local server ip. If not, it finds it automatically",type=str)  # option configurations, needs to be required
parser.add_argument("-i", "--brint",help="pass the connection of br-int to br-tun port, or use default '2' ",type=str)
parser.add_argument("-p", "--provider",help="pass the connection of br-provider to br-int port, or use default '2' ",type=str)

parser.add_argument("-t", "--tun",help="pass the connection of br-tun to br-int port, or use default '2' ",type=str)   # TODO number of nodes. and port corresponding to nodes

args = parser.parse_args()  # pass the arguments to the parser
# default values for ports
brintTun = "2"
brintport = "2"
brprovider = "2"

logger.info("")
logger.info("===SONATA PROJECT===")    
logger.info("Starting SFC Agent")
コード例 #51
0
    options = Config.options(section)
    for option in options:
        try:
            dict_conf[option] = Config.get(section, option)
            if dict_conf[option] == -1:
                DebugPrint("skip: %s" % option)
        except:
            print("exception on %s!" % option)
            dict_conf[option] = None
    return dict_conf

# ./capture.py -a ../config/system.conf -b ../config/pipeline.conf -c 0 -d 0 -e 0 -f 1
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='To capture data from given beam (with given part if the data arrives with multiple parts) with a docker container')
    
    parser.add_argument('-a', '--system_conf', type=str, nargs='+',
                        help='The configuration of PAF system')
    parser.add_argument('-b', '--pipeline_conf', type=str, nargs='+',
                        help='The configuration of pipeline')    
    parser.add_argument('-c', '--beam', type=int, nargs='+',
                        help='The beam id from 0')
    parser.add_argument('-d', '--part', type=int, nargs='+',
                        help='The part id from 0')
    parser.add_argument('-e', '--hdr', type=int, nargs='+',
                        help='Record packet header or not')
    parser.add_argument('-f', '--bind', type=int, nargs='+',
                        help='Bind threads to cpu or not')
    
    args          = parser.parse_args()
    system_conf   = args.system_conf[0]
    pipeline_conf = args.pipeline_conf[0]
    beam          = args.beam[0]
コード例 #52
0
# Parsing command line argument in normal file.

import json
import parser
import argparse
parser = argparse.ArgumentParser()
#parser.add_argument("name") #If you are using this option then no need to put '--name' option while giving
#commandline arguments.

# while executing the program, you need to specify '--name anyname'(space)'--age integer'(space)'--id integer'
parser.add_argument("--name")
parser.add_argument("--age")
parser.add_argument("--id")

args = parser.parse_args()

print "Hello " + args.name
print "your age is :" + args.age
print "and your ID is :" + args.id
コード例 #53
0
from mnist_m import MNISTM
from torchvision import transforms

from syn_digits import Loadsyn
from torchvision.utils import save_image
from model.InterpolationGAN_v3 import InterpolationGAN

from domain_style_transfer import minibatch_transfer
import numpy as np
import torchvision
import parser
import argparse
parser = argparse.ArgumentParser(description='Domain generalization')

parser.add_argument('--image_size', type=int, default=32)
parser.add_argument('--g_conv_dim', type=int, default=64)
parser.add_argument('--d_conv_dim', type=int, default=64)

parser.add_argument('--num_classes', type=int, default=10)
parser.add_argument('--lambda_cycle', type=float, default=10.0)
parser.add_argument('--n_res_blocks', type=int, default=3)
args = parser.parse_args()

device = torch.device('cuda')
trans = transforms.Compose([
    transforms.Resize(size=[32, 32]),
    transforms.ToTensor(),
    transforms.Lambda(lambda x: x.repeat(3, 1, 1)),
    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
])
コード例 #54
0
ファイル: server.py プロジェクト: rajendraas/cceproject
    def post(self):
        try:
            parser = reqparse.RequestParser()
            parser.add_argument('analytic_name',
                                type=str,
                                location="json",
                                required=True)
            parser.add_argument('method',
                                type=str,
                                location="json",
                                required=True)
            parser.add_argument('request_id',
                                type=str,
                                location="json",
                                required=True)
            args = parser.parse_args()
            path = server_path + "/" + args.get(
                "request_id") + "/" + "preprocess"
            file = os.listdir(path)
            df = pandas.read_csv(path + "/" + file[0])
            module_name = "analytics." + args.get('analytic_name')
            module = importlib.import_module(module_name)
            analytic_class = getattr(module, args.get("analytic_name"))
            if args.get("method") == "train":
                result = analytic_class.train(df)
                if result["status"] == "success":

                    path = server_path + "/" + args.get(
                        "request_id") + "/" + args.get("analytic_name")
                    if os.path.exists(path):
                        pass
                    else:
                        os.mkdir(path)
                    file_name = os.path.join(path, "model.json")
                    fp = open(file_name, "w")
                    json.dump(result, fp)
                    fp.close()
                return result

            elif args.get("method") == "score":
                path = server_path + "/" + args.get(
                    "request_id") + "/" + args.get("analytic_name")
                model_file = os.path.join(path, "model.json")
                fp = open(model_file, "r")
                dct_model = json.load(fp)
                fp.close()
                result, df_out, error = analytic_class.score(
                    df, dct_model["coeff"])
                if result == "success":

                    if os.path.exists(path):
                        pass
                    else:
                        os.mkdir(path)
                    file_name = os.path.join(path, "output.csv")
                    df_out.to_csv(file_name, index=False)
                    return {"status": "success"}
                else:
                    return {"status": "failed", "error": error}
        except Exception as e:
            return {"status": "failed", "error": str(e)}
コード例 #55
0
                        profile, LoadBalancerName, DNSName, OtherPolicies_name,
                        ConnectionDrainingStatus, CrossZoneLoadBalancingStatus,
                        AccessLogStatus, region, elb_owner, elb_env, elb_asv,
                        tag_count
                    ])
        f.close()


if __name__ == '__main__':
    # Define command line argument parser
    parser = argparse.ArgumentParser(
        description=
        'Creates a CSV report about EBS volumes and tracks snapshots on them.')
    parser.add_argument(
        '--regions',
        default=AWS_REGIONS,
        help=
        'AWS regions to create the report on, can add multiple with | as separator. Default will assume all regions'
    )
    parser.add_argument(
        '--profile',
        default=AWS_PROFILE,
        help=
        'AWS profile to create the report on, can add multiple with | as separator. Default will assume all profile'
    )
    parser.add_argument('--file',
                        required=True,
                        help='Path for output CSV file')
    args = parser.parse_args()
    # creates the report
    retval = create_rds_report(args.regions, args.profile, args.file)
    if retval:
コード例 #56
0
            plt.colorbar()         
            plt.axis('off')

            name = int(np.random.uniform()*1e10)
            plt.savefig(\
                output_dir+str(name)+'.png',
                bbox_inches='tight',
                dpi = 2*512,
            )

    return

if __name__ == '__main__':

    parser = argparse.ArgumentParser("testing MOM MRM LF2")
    parser.add_argument('-n', '--n-images', type=int, default = -1, help='number of images to process; -1 to process all the images')
    args = parser.parse_args()

    array_dir = '../FLAT/trans_render/static/'
    data_dir = '../FLAT/kinect/'

    # initialize the camera model
    tof_cam = kinect_real_tf()

    # input the folder that trains the data
    # only use the files listed
    f = open('../FLAT/kinect/list/test_dyn.txt','r')
    message = f.read()
    files = message.split('\n')
    tests = files[0:-1]
    if args.n_images!=-1:
コード例 #57
0
#                                       np.where(data["mutation_type"].str.contains("-"), "deletion",
#                                                np.where(data['mutation_type'].isin(['GA', 'AG', 'CT', 'TC']),
#                                                         'transition', 'transversion')))
#     data = data[data["mutation_class"] != "deletion"]
#     grouped = data.groupby(["source", "Pos"], as_index=False)["counts_for_position"].agg("sum")
#     data = pd.merge(data, grouped, on=["source", "Pos"])
#     data["adjusted_freq"] = (np.where(1 > data["counts_for_position_x"], 1, data["counts_for_position_x"]) / data[
#         "counts_for_position_y"])
#
#     gamma_distributions = []
#     gamma_distributions.extend(learn_gammas(data[data["source"] == label_control]))
#
#     to_call = data[data["source"] == label_sample].copy()
#     to_call.loc[:, "to_call"] = to_call.apply(
#         lambda row: call_by_gamma(row["adjusted_freq"], row["mutation_type"], row["prev"], row["next"],
#                                   gamma_distributions), axis=1)
#
#     output_to_file(to_call[(to_call["source"] == label_sample)], args.output)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("sample", type=str, help="input sample .freqs file")
    parser.add_argument("control", type=str, help="input control .freqs file")
    parser.add_argument("-c", "--coverage", type=int, help="minimum position coverage to fit and call variants",
                        required=False, default=100000)
    parser.add_argument("-o", "--output", type=str, help="output variant file", required=False,
                        default="output.var.csv")

    args = parser.parse_args(sys.argv[1:])
    main(args)
コード例 #58
0
ファイル: ars.py プロジェクト: zhan0903/ARS
                     delta_std=params['delta_std'], 
                     logdir=logdir,
                     rollout_length=params['rollout_length'],
                     shift=params['shift'],
                     params=params,
                     seed = params['seed'])
        
    ARS.train(params['n_iter'])
       
    return 


if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--env_name', type=str, default='HalfCheetah-v1')
    parser.add_argument('--n_iter', '-n', type=int, default=1000)
    parser.add_argument('--n_directions', '-nd', type=int, default=8)
    parser.add_argument('--deltas_used', '-du', type=int, default=8)
    parser.add_argument('--step_size', '-s', type=float, default=0.02)
    parser.add_argument('--delta_std', '-std', type=float, default=.03)
    parser.add_argument('--n_workers', '-e', type=int, default=18)
    parser.add_argument('--rollout_length', '-r', type=int, default=1000)

    # for Swimmer-v1 and HalfCheetah-v1 use shift = 0
    # for Hopper-v1, Walker2d-v1, and Ant-v1 use shift = 1
    # for Humanoid-v1 used shift = 5
    parser.add_argument('--shift', type=float, default=0)
    parser.add_argument('--seed', type=int, default=237)
    parser.add_argument('--policy_type', type=str, default='linear')
    parser.add_argument('--dir_path', type=str, default='data')
コード例 #59
0
import torchvision
import torchvision.transforms as transforms
import numpy as np
import os, sys
import copy as cp
import argparse
import importlib
from utils import progress_bar

import parser

#===============================================================================
parser = argparse.ArgumentParser(description='semi-supervised Training')
parser.add_argument('--resume',
                    '-r',
                    action='store_true',
                    help='resume from checkpoint')
parser.add_argument('--path', '-p', help='path to model where to resume')
args = parser.parse_args()

device = 'cuda' if torch.cuda.is_available() else 'cpu'
best_acc = 0  # best test accuracy
start_epoch = 0  # start from epoch 0 or last checkpoint epoch

cfg = config.Config
architecture, type = cfg.net_arch.rsplit(".", 1)
arch = importlib.import_module(architecture)
network = getattr(arch, type)

path_to_dataset = os.path.join(cfg.dataset_dir, cfg.dataset)
dataset = importlib.import_module(cfg.dataset)