Ejemplo n.º 1
0
	def main(display, previous_objects):
		from pygui.facilities.optionparser import parser
		options, args = parser.parse_args()
		try:
			from peewee.dateutils import TimeManager, WYCLOCK_AUTO_SETTER
			if config.user_config['base']['firstboot']:
				config.user_config['base']['timeupdatemode'] = WYCLOCK_AUTO_SETTER
Ejemplo n.º 2
0
def main():
    parser = argparse.ArgumentParser(description = 'Fetch soccer league, team and player data from Wikipedia.')
    parser.add_argument('-L', dest = 'specific_league', type = str,
            default = '', help = 'fetch only one league')
    parser.add_argument('-l', dest = 'fetch_only_leagues', action = 'store_true', help = 'fetch only leagues')
    parser.add_argument('-o', dest = 'output_dir', action = 'store', type = str, default = '', help = 'output directory')

    args = parser.parse_args()
    Globals.setDataDir(args.output_dir)
    if args.fetch_only_leagues:
        Globals.fetchTeams = False
        Globals.dumpTextFiles = True
    try:
        fetchLeagueData(args.specific_league.decode('utf-8'))
    except:
        # http://www.doughellmann.com/articles/how-tos/python-exception-handling/index.html
        try:
            raise
        finally:
            try:
                cleanup()
                print Globals.progress
            except Exception, e:
                print >> sys.stderr, "Error: couldn't save progress:", str(e)
                pass
Ejemplo n.º 3
0
 def parse_arguments(self):
   parser = self.build_option_parser()
   (self.flags, self.args) = parser.parse_args()
   if self.flags.compile:
     self.compile_flags = self.flags.compile.get_flags()
   else:
     self.compile_flags = None
Ejemplo n.º 4
0
def parse_options(args=sys.argv):
    parser = optparse.OptionParser()

    #
    # Global options.
    #
    parser.add_option('-l', '--loglevel', action='store', default='WARNING',
        choices=['DEBUG', 'INFO', 'WARNING', 'ERROR'],
        help="""Logging level. Choices: DEBUG, INFO, WARNING, ERROR.
            [default: WARNING]""")

    #
    # Options for compilation (joosc).
    #
    stage_opts = [
        'scanner',
        'parser',
        'weeder',
        'ast',
        'hierarchy',
        'name',
        'typecheck',
        'reachability',
        'codegen',
        'asm',
        'end'
    ]
    parser.add_option('-s', '--stage', action='store', dest='stage',
        default='end', choices=stage_opts,
        help="Stage of compiler to run \"up to\" before terminating.")

    parser.add_option('-i', '--include_stdlib', action='store_true',
        default=False,
        dest='include_stdlib', help="Include all stdlib files in compilation")

    parser.add_option('--print_stdlib', action='store_true',
        dest='print_stdlib',
        help="""Override default hiding of output printout for stdlib files.
            Only has any effect if -s and -i are specified.""")

    parser.add_option('-d', '--directory_crawl', action='store_true',
        dest='directory_crawl', default=True,
        help="Recursively crawl directories for compilation units")

    parser.add_option('-c', '--clean_output', action='store_true',
        dest='clean_output', default=False,
        help="Clean the output directory before codegen.")

    #
    # Options for testing.
    #

    parser.add_option('-t', '--test', action='store', dest='test',
        help="Run tests under the specified directory under assignment_testcases.")

    parser.add_option('--show_errors', action='store_true',
        dest='show_errors',
        help="Disable stderr printout suppression. Only usable during tests.")

    return parser.parse_args()
Ejemplo n.º 5
0
def main():
    import sys
    import os
    h = 6.6260689633e-34
    c = 299792458
    electron  = 1.60217653141e-19

    def toenergy(wave):
        energy = (h*c)/(float(wave)*1e-10*electron)
        return energy

    def towave(energy):
        wave = (h*c)/(float(energy)*electron)
        return wave

    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option("--energy","-e",dest="energy",metavar="energy ev",help="Energy in ev")
    parser.add_option("--wave","-w",dest="wave",metavar="wave A",help="wavelenbth in Angstrom")
    (options,spillover) = parser.parse_args()


    if options.energy is not None:
        energy = options.energy
        wave = towave(energy)
        print "The input %s energy in ev corresponds to %s wavelength" % ( energy , wave)


    elif options.wave is not None:
        wave = sys.argv[2]
        energy = toenergy(wave)
        print "The input %s wavelength corressponds to %s ev " % (wave,energy)
    else:
        parser.print_help()
        exit()
Ejemplo n.º 6
0
def main():
	parser = argparse.ArgumentParser(
		description='argdoc turns all argparse arguments into beautiful, end-user friendly documentation',
		formatter_class=argparse.RawTextHelpFormatter)
	
	parser.add_argument(
		'filename', 
		help="Name of the file for which you want to create documentation")
	
	parser.add_argument(
		'-f', '--format', 
		help="Format of the outputted documentation.\nOptions: ['md', 'html']" + 
			"\nDefault: 'html'",
		choices=['md', 'html'],
		default="html",
		metavar="")

	parser.add_argument(
		'-n', '--noob', 
		help=("Set whether or not to include the beginner instructions in the Docs"
			'\n(See templates for example of beginner info.)'),
		action="store_false", 
		default=True)

	parser.add_argument(
		'-q', '--quiet',
		help="Supresses success message",
		action='store_false')

	args = parser.parse_args()
	parse_pyfile(args.filename, format=args.format, noob=args.noob)
Ejemplo n.º 7
0
def parse_args():
    parser = argparse.ArgumentParser(description=__doc__);
    parser.add_argument("--name", help="attribute name");
    parser.add_argument("--formula", help="formula to compute new attribute");
    parser.add_argument("input_mesh", help="input mesh");
    parser.add_argument("output_mesh", help="output mesh");
    return parser.parse_args();
Ejemplo n.º 8
0
def main():
    """ Main function """ 
    logging.info("Starting snippets")
    parser = make_parser()
    arguments = parser.parse_args(sys.argv[1:])
    
    # Convert parsed arguments from Namespace to dictionary arguments = vars(arguments)
    arguments = vars(arguments)
    command = arguments.pop("command")
   
    if command == "put":
        name, snippet = put(**arguments)
        print "Stored {!r} as {!r}".format(snippet, name)
    elif command == "get":
        name = read(command, **arguments)
        if name == "Snippet not found":
            print "Snippet not found"
        else:
            print "Retrieved Snippet {!r}".format(name)
    elif command == "search":
        snippet = read(command, **arguments)
        if snippet == "Snippet not found":
            print "Snippet not found"
        else:
            print "Snippet Found!\n==============\n\n{}".format(snippet)
    elif command == "modify":
        name = read(command, **arguments)
        if name == "Snippet not found":
            print "Snippet not found"
        else:
            print "Modified Snippet {!r}".format(name)
Ejemplo n.º 9
0
def start_server():
   parser = OptionParser(usage="usage: %prog [options]", version="%prog 1.0")
   parser.add_option("--host", default='', type='string', action="store", dest="host", help="hostname (localhost)")
   parser.add_option("--port", default=9001, type='int', action="store", dest="port", help="port (9001)")
   parser.add_option("--example", default='echo', type='string', action="store", dest="example", help="echo, chat")
   parser.add_option("--ssl", default=0, type='int', action="store", dest="ssl", help="ssl (1: on, 0: off (default))")
   parser.add_option("--cert", default='./cert.pem', type='string', action="store", dest="cert", help="cert (./cert.pem)")
   parser.add_option("--ver", default=ssl.PROTOCOL_TLSv1, type=int, action="store", dest="ver", help="ssl version")

   (options, args) = parser.parse_args()

   os.system("rm ./unix_socket")
   cls = SimpleEcho
   if options.example == 'chat':
      cls = SimpleChat

   if options.ssl == 1:
      server = SimpleSSLWebSocketServer(options.host, options.port, cls, options.cert, options.cert, version=options.ver)
   else:
      server = SimpleWebSocketServer(options.host, options.port, cls)

   # def close_sig_handler(signal, frame):
   #    server.close()
   #    sys.exit()

   # serverThread = threading.Thread(target=other_thread)
   # serverThread.daemon = True
   # serverThread.start()

   #signal.signal(signal.SIGINT, close_sig_handler)

   server.serveforever()
Ejemplo n.º 10
0
def args():
    parser = argparse.ArgumentParser(description="Get information needed to \
            parse the log")
    parser.add_argument('script', nargs='?') # Why does argparse read the 
    # script name? Who knows.
    parser.add_argument('infile', nargs='?', default=sys.stdin)
    parser.add_argument('parser', nargs='?', default='IrssiBitlbee')
    parser.add_argument('viewer', nargs='?', default='Null')
    return parser.parse_args(sys.argv)
Ejemplo n.º 11
0
def main():
    parser = optparse.OptionParser(usage='Usage: %prog [options] filename')
    parser.add_option('-s', '--source', dest='source', default=DEFAULT_SOURCE,
                      help='Source path. Default: ' + DEFAULT_SOURCE)
    parser.add_option('-d', '--destination', dest='destination',
                      default=DEFAULT_DESTINATION,
                      help='Destination path. Default: ' + DEFAULT_DESTINATION)
    (options, args) = parser.parse_args()
    process(options.source, options.destination)
Ejemplo n.º 12
0
def parse_args():
    """
    Parses arguments
    """
    import argparse
    import parser
    #argparse stuff
    parser = argparse.ArgumentParser(description="Scan files and store results in elastic search")
    parser.add_argument("-v", "--verbose", action="store_true")
    parser.add_argument('Report', help="Report file with one json report per line")
    return parser.parse_args()
Ejemplo n.º 13
0
def _parse_args():
	'''Parse the command-line arguments.'''
	parser = OptionParser(usage='%prog [Options] <device>', version='%prog 0.5')
	parser.add_option('-p', '--purge',
		dest='purge',
		help='purge the tracklog memory on the device',
		action='store_true',
		default=False)
	parser.add_option('-d', '--download',
		dest='download',
		help='download tracklogs from device',
		action='store_true',
		default=False)
	parser.add_option('-t', '--target-dir',
		dest='target_dir',
		help='target directory for downloaded tracklogs [default: %default]',
		default='%s/mainnav-tracklogs/' % os.environ.get('HOME', tempfile.gettempdir()))
	parser.add_option('-u', '--utc',
		dest='utc_offset',
		help='generate GPX time entry in UTC by declaring the offset (your timezone, e.g. -5 or +9.5)',
		type='float',
		default=False)
	parser.add_option('-r', '--raw',
		dest='raw',
		help='store the raw binary data in the target directory (must be combined with the download option)',
		action='store_true',
		default=False)
	parser.add_option('-m', '--memory',
		dest='memory',
		help='show the amount of memory in use',
		action='store_true',
		default=False)
	parser.add_option('-v', '--verbose',
		dest='verbose',
		help='be verbose',
		action='store_true',
		default=False)

	(options, args) = parser.parse_args()

	try:
		options.device = args[0]
	except IndexError:
		options.device = ''
	if not options.device:
		parser.error('please specify device path, for example \'/dev/ttyUSB0\'')
	if options.download and options.purge:
		parser.error('options -d and -p are mutually exclusive')
	if options.download and options.memory:
		parser.error('options -d and -m are mutually exclusive')
	if options.memory and options.purge:
		parser.error('options -m and -p are mutually exclusive')

	return options
Ejemplo n.º 14
0
def parse_args():
    """
    Parses arguments
    """
    import argparse
    import parser
    #argparse stuff
    parser = argparse.ArgumentParser(description="Scan files and store results in elastic search")
    parser.add_argument("-r", "--recursive", action="store_true")
    parser.add_argument("-v", "--verbose", action="store_true")
    parser.add_argument('Files', help="Files and Directories to attach", nargs='+')
    return parser.parse_args()
Ejemplo n.º 15
0
def runBatch():
    parser = OptionParser()
    parser.set_usage(parser.get_usage().rstrip() + " {create | start | stop | clean | drop}")
    parser.add_option("-t", "--tables", dest="tables", type="string", default="test: Blog"
        , help = "format: \"dbname1: table1 table2;dbname2: table3;\"")
    parser.add_option("-m", "--home", dest="mysqlhome", type="string", default="/usr"
        , help = "mysql home directory")
    parser.add_option("-H", "--host", dest="mysqlhost", type="string", default="127.0.0.1"
        , help = "mysql server address")
    parser.add_option("-P", "--port", dest="mysqlport", type="int", default = 3306
        , help = "mysql server port")
    parser.add_option("-u", "--user", dest="mysqluser", type="string", default = "root"
        , help = "mysql server user name")
    parser.add_option("-p", "--password", dest="mysqlpassword", type="string", default=""
        , help = "mysql server password")
    parser.add_option("-e", "--engine", dest="engine", type="string", default="innodb"
    		, help = "mysql storage engine")
    parser.add_option("-i", "--iteration", dest="iteration", type=int
    		,	help = "the iteration times of query information_schemation database")
    parser.add_option("-o", "--output", dest="output", type="string", default="./"
    		, help = "output directory for result")
    
    (options, args) = parser.parse_args()
    
    tables = []
    if options.tables != None:
        options.tables.strip(";")
        for dbtabs in options.tables.split(";"):
            str = dbtabs.split(":")
            print str
            tables.append((str[0], str[1].split()))
    if len(args) <= 0:
        parser.print_usage()
        return

    if args[0] == "create":
        StatisticianRunner.createTest(options.output, MysqlConfig(options.mysqlhome, options.mysqlhost
            , options.mysqlport, options.mysqluser, options.mysqlpassword), options.engine, tables)
    elif args[0] == "start":
        if options.iteration == None:
    		parser.print_usage()
    		return
        StatisticianRunner.start(options.output, options.iteration)
    elif args[0] == "stop":
        StatisticianRunner.stop(options.output)
    elif args[0] == "clean":
        StatisticianRunner.clean(options.output)
    elif args[0] == "drop":
        StatisticianRunner.drop(options.output)
    else:
        parser.print_usage()
        return
Ejemplo n.º 16
0
def _parse_args():
    """
    Parses the command line arguments.

    @return: Tuple with options and (positional) arguments.
    @rtype: tuple
    """
    parser = optparse.OptionParser(usage="", description="")
    parser.add_option("-o", dest="outfile", default=None, help="File to write to")
    parser.add_option("-w", dest="write_format", default="pidgin", help="Write format. [default: %default]")
    parser.add_option("-r", dest="read_format", default="adium", help="Read format. [default: %default]")

    return parser.parse_args()
Ejemplo n.º 17
0
def main():
	parser = argparse.ArgumentParser()
	parser.add_argument('-v', '--verbose', action="store_true", default=False)
	parser.add_argument('source', action="store")
	args = parser.parse_args(sys.argv[1:])

	global verbose
	verbose = args.verbose

	source = args.source

	show_tokens(source)
	ast, symtab = show_ast(source)
	show_code(ast, symtab)
Ejemplo n.º 18
0
def main():
    parser = argparse.ArgumentParser(description='Music Scanner')
    parser.add_argument('-d','--directory', help='Directory', required=True)
    args = parser.parse_args()
    x = 0
    L = []
    badalbums = []

    for dirName,subdirList,fileList in os.walk(args.directory):
        #Prune bad directories from list
        if re.search(r'([Ss]ingles|lost\+found|System\ Volume\ Information|.*RECYCLE?)',dirName):
            pass
        else:
            try:
                # Separate out Artist Name from Album Title
                em = re.search(r'^.*/(newmusic|music|MUSIC|Music)/(.*)_-_(.*)/?',dirName)
                # Prune off extra "/". if group 2 contains a "/" character, don't print
                if re.search(r'/',em.group(2)):
                    pass
                else:
                    #print em.group(1) ,"~~~", em.group(2)
                    for fname in fileList:
                        # Get actual music files, not other files
                        if re.search(r'\.(flac$|wav$|mp3$|m4a$|mp4$|wma$)',fname):
                            L.append(fname)
                            x = x+1
                    if x == 0:
                        pass
                    else:
                        pass
                        # Print out total files contained in Album
                        #print x , "songs in", em.group(1) , em.group(2)
                        # Do you want to print this data to stdout or write it to a file? 
                        group2 = re.sub(r"_",' ', em.group(2))
                        group3 = re.sub(r"_",' ', em.group(3))
                        if re.search(r'/',group3):
                            group3 = group3.rstrip('/')
                        #print group2,group3
                        # Function that compares my albums to musicBrainz goes here!
                        foundtracks = querymusicbrainz.gather(group2,group3)
                        if int(x) != int(foundtracks):
                          print bcolors.WARNING + "You're missing some tracks bro!" + bcolors.ENDC
                        print x , "songs in", dirName, foundtracks, "in MusicBrainz"
                    L = []
                    x = 0
            except AttributeError:
                print "Cannot parse ", dirName
                badalbums.append(dirName)
Ejemplo n.º 19
0
def get_args():
    parser = argparse.ArgumentParser(
        description='A Maze interpreter (http://esolangs.org/wiki/Maze)')
    parser.add_argument('file', type=open,
        help='the program to run')
    parser.add_argument('-d', '--debug', action='store_true',
        help='display the maze during interpretation.')
    parser.add_argument('-l', '--log-length', default=10, type=int,
        help='Max length of debug log.')
    parser.add_argument('-c', '--no-colors', action='store_false',
        help='shows the maze without color when in debug mode.')
    parser.add_argument('-f', '--fps', default=10, type=int,
        help='the fps of the maze when in debug mode.')

    args = parser.parse_args()
    return args
Ejemplo n.º 20
0
def parse_options():
    parser = argparse.ArgumentParser(
        description=("Download the hourly weather data for all the"
                     " stations available in aemet.es.")
    )
    parser.add_argument('-d', '--debug', action='store_true',
                        help="Enable debug mode.")
    parser.add_argument('-v', '--verbose', default="2",
                        help="Verbosity level. Options: 0=ERROR, 1=WARNING,"
                             " 2=INFO or 3=DEBUG. Default: 2.")
    parser.add_argument('-o', '--output', default=settings.DEFAULT_OUTPUT_DIR,
                        help="Output directory path where files will be"
                             " downloaded. Default: aemet_data.")
    parser.add_argument('-f', '--format', default='txt',
                        help="Store file in the specified format."
                             "Options: csv or txt. Default: csv.")
    return parser.parse_args()
Ejemplo n.º 21
0
Archivo: mess.py Proyecto: nekobon/mess
def handle_user_params():
	"""
	Handles user parameters using python's built-in optparse module.

	@rtype: dict
	@return: The dictionary of the user parameters with the below elements,
				- 'errorFile', default value is 'errors.json'
				- 'inputFile', default value is 'quipsim.txt'
	"""
	parser = OptionParser('Usage: mcchp.py [options]')
	parser.add_option("-e", "--error", dest="errorFile", default='sample_errors.json',
			help="Specify a json file wit error rates (default = sample_errors.json)", metavar="FILE")
	parser.add_option("-i", "--input", dest="inputFile", default='sample_input.txt',
			help="Specify a QUIPSIM output file for the input circuit (default = sample_input.txt)", 
			metavar="FILE")
	(options, args) = parser.parse_args()
	return options
Ejemplo n.º 22
0
def parse_args():
    global args

    parser = argparse.ArgumentParser(description = __doc__,
                                     formatter_class = argparse.RawDescriptionHelpFormatter)

    parser.add_argument("-e", "--big-endian", action="store_true",
                        help="Target encodes data in big-endian format"
                        "(little endian is the default)")

    parser.add_argument("-i", "--input",
                        help="Input file from which MMU regions are read.")
    parser.add_argument("-o", "--output",
                        help="Output file into which the page tables are written.")
    parser.add_argument("-v", "--verbose", action="store_true",
                        help="Lists all the relavent data generated.")
    args = parser.parse_args()
Ejemplo n.º 23
0
def main(argv):
    global options
    from optparse import OptionParser,make_option
    parser = OptionParser(
        usage = """usage: %prog [Options] PYTHONFILE
Creates a reference manual in sphinx format for the functions and classes
defined in PYTHONFILE.""",
        version = "%s (C) 2009 Benedict Verhegghe" % __file__,
        option_list=[
        make_option('-d',"--debug", help="print debug info",
                    action="store_true", dest="debug", default=False),
        make_option('-c',"--continue", help="continue on errors",
                    action="store_false", dest="error", default=True),
        ])
    options, args = parser.parse_args(argv)
    
    for a in args:
        info = get_docs(a)
        do_module(info)
Ejemplo n.º 24
0
def parse_args():
    """parse_args parses sys.argv for wiki2centrality."""
    # Help Menu
    parser = optparse.OptionParser(usage='%prog [options] title')
    parser.add_option('-r', '--remove',
                      action='store_false', dest='remove', default=True,
                      help='remove mass deletions')
    parser.add_option('-c', '--centrality',
                      type='str', dest='ctype', default='closeness',
                      help='type of centrality: closeness, out_degree, betweenness',
                      metavar='CTYPE')

    (opts, args) = parser.parse_args()

    # Parser Errors
    if len(args) != 1:
        parser.error('incorrect number of arguments')

    wiki2centrality(args[0], remove=opts.remove, ctype=opts.ctype)
Ejemplo n.º 25
0
def parse_args():
    parser = argparse.ArgumentParser(
        description='DNS for libvirt',
        )
    parser.add_argument(
        '--config',
        metavar='CONFIGFILE',
        help='path to YAML config file',
        )
    parser.add_argument(
        '--server',
        action='store_true', default=False,
        help='Run as the server (http/sql access).',
        )
    parser.add_argument(
        'remainder',
        nargs=argparse.REMAINDER,
        help='Remainder arguments for webpy, ip:port for listen address'
        )
    args = parser.parse_args()
    return args
Ejemplo n.º 26
0
def main():
    global verbose
    global ssl_location
    global revocation

    if int(os.getuid()) > 0:
        print "[-] You need to be root for this."
        sys.exit(1)

        # command line options
    parser = OptionParser()
    parser.add_option("-a", help="Adapter to sniff on", action="store", default="eth0", dest="adapter")
    parser.add_option("-v", help="Dump certificate information", action="store_true", default=False, dest="verbose")
    parser.add_option(
        "-s", help="Specify a different SSL cert location", action="store", default="/etc/ssl/certs", dest="certFile"
    )
    parser.add_option("-p", help="Specify a port (default: 443)", action="store", default=443, dest="port")
    parser.add_option(
        "-r", help="Specify a CRL file for lookup; give full path", action="store", default=None, dest="revocation"
    )

    (options, args) = parser.parse_args()
    adapter = options.adapter
    verbose = options.verbose
    port = options.port
    revocation = options.revocation

    # validate the ssl cert folder
    if os.path.isdir(options.certFile):
        ssl_location = options.certFile
    else:
        ssl_location = "/etc/ssl/certs/"

    try:
        print "[+] Beginning sniffer on adapter '{0}' port {1}".format(adapter, port)
        sniff(filter="tcp and port %s" % port, iface=adapter, prn=validate)
    except KeyboardInterrupt, k:
        print "\n[!] Closing down..."
Ejemplo n.º 27
0
def main():
    import argparse
    parser = argparse.ArgumentParser(description = 'Process Celltone code')
    parser.add_argument('--update', '-u', action = 'store_true',
                        help = 'Allow for dynamic updating of source file during runtime')
    parser.add_argument('--file', '-f', help = 'Output to MIDI file instead of the MIDI device')
    parser.add_argument('--length', '-l', help = 'Stop after <LENGTH> seconds')
    group = parser.add_mutually_exclusive_group()
    group.add_argument('-v', action = 'store_true', help = 'verbose')
    group.add_argument('-vv', action = 'store_true', help = 'more verbose')
    group.add_argument('-vvv', action = 'store_true', help = 'even more verbose')
    parser.add_argument('filename', nargs = '?', help = 'if omitted reads from stdin')
    args = parser.parse_args()

    verbosity = 0
    if args.v:
        verbosity = 1
    if args.vv:
        verbosity = 2
    if args.vvv:
        verbosity = 3

    try:
        if not args.filename or args.filename == '-':
            if args.update:
                self.error('Cannot dynamically update from stdin')
            code = ''.join(sys.stdin.readlines())
        else:
            if not os.path.exists(args.filename):
                self.error('Error: No such file \'%s\'' % args.filename)
            with open(args.filename) as f:
                code = ''.join(f.readlines())
    except KeyboardInterrupt:
        sys.exit(0)

    ct = Celltone(code, verbosity, args.filename, args.update, args.file, args.length)
    ct.start()
    sys.exit(0)
Ejemplo n.º 28
0
    def post(self):
        try:
            parser = reqparse.RequestParser()
            parser.add_argument('analytic_name',
                                type=str,
                                location="json",
                                required=True)
            parser.add_argument('method',
                                type=str,
                                location="json",
                                required=True)
            parser.add_argument('request_id',
                                type=str,
                                location="json",
                                required=True)
            args = parser.parse_args()
            path = server_path + "/" + args.get(
                "request_id") + "/" + "preprocess"
            file = os.listdir(path)
            df = pandas.read_csv(path + "/" + file[0])
            module_name = "analytics." + args.get('analytic_name')
            module = importlib.import_module(module_name)
            analytic_class = getattr(module, args.get("analytic_name"))
            if args.get("method") == "train":
                result = analytic_class.train(df)
                if result["status"] == "success":

                    path = server_path + "/" + args.get(
                        "request_id") + "/" + args.get("analytic_name")
                    if os.path.exists(path):
                        pass
                    else:
                        os.mkdir(path)
                    file_name = os.path.join(path, "model.json")
                    fp = open(file_name, "w")
                    json.dump(result, fp)
                    fp.close()
                return result

            elif args.get("method") == "score":
                path = server_path + "/" + args.get(
                    "request_id") + "/" + args.get("analytic_name")
                model_file = os.path.join(path, "model.json")
                fp = open(model_file, "r")
                dct_model = json.load(fp)
                fp.close()
                result, df_out, error = analytic_class.score(
                    df, dct_model["coeff"])
                if result == "success":

                    if os.path.exists(path):
                        pass
                    else:
                        os.mkdir(path)
                    file_name = os.path.join(path, "output.csv")
                    df_out.to_csv(file_name, index=False)
                    return {"status": "success"}
                else:
                    return {"status": "failed", "error": error}
        except Exception as e:
            return {"status": "failed", "error": str(e)}
Ejemplo n.º 29
0
def parse_args():
    parser = parse_classifier_args(ret_parser=True)
    args = parser.parse_args()
    return args
Ejemplo n.º 30
0
def main():
    global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, no_journal, no_preallocj, auth, keyFile, smoke_db_prefix, test_path
    parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
    parser.add_option(
        "--mode",
        dest="mode",
        default="suite",
        help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)',
    )
    # Some of our tests hard-code pathnames e.g., to execute, so until
    # that changes we don't have the freedom to run from anyplace.
    # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
    parser.add_option(
        "--test-path",
        dest="test_path",
        default=None,
        help="Path to the test executables to run, " "currently only used for 'client' (%default)",
    )
    parser.add_option(
        "--mongod",
        dest="mongod_executable",
        default=os.path.join(mongo_repo, "mongod"),
        help="Path to mongod to run (%default)",
    )
    parser.add_option("--port", dest="mongod_port", default="27999", help="Port the mongod will bind to (%default)")
    parser.add_option(
        "--mongo",
        dest="shell_executable",
        default=os.path.join(mongo_repo, "mongo"),
        help="Path to mongo, for .js test files (%default)",
    )
    parser.add_option(
        "--continue-on-failure",
        dest="continue_on_failure",
        action="store_true",
        default=False,
        help="If supplied, continue testing even after a test fails",
    )
    parser.add_option(
        "--from-file", dest="File", help="Run tests/suites named in FILE, one test per line, '-' means stdin"
    )
    parser.add_option(
        "--smoke-db-prefix",
        dest="smoke_db_prefix",
        default=smoke_db_prefix,
        help="Prefix to use for the mongods' dbpaths ('%default')",
    )
    parser.add_option(
        "--small-oplog",
        dest="small_oplog",
        default=False,
        action="store_true",
        help="Run tests with master/slave replication & use a small oplog",
    )
    parser.add_option(
        "--small-oplog-rs",
        dest="small_oplog_rs",
        default=False,
        action="store_true",
        help="Run tests with replica set replication & use a small oplog",
    )
    parser.add_option(
        "--nojournal", dest="no_journal", default=False, action="store_true", help="Do not turn on journaling in tests"
    )
    parser.add_option(
        "--nopreallocj",
        dest="no_preallocj",
        default=False,
        action="store_true",
        help="Do not preallocate journal files in tests",
    )
    parser.add_option(
        "--auth",
        dest="auth",
        default=False,
        action="store_true",
        help="Run standalone mongods in tests with authentication enabled",
    )
    parser.add_option(
        "--authMechanism",
        dest="authMechanism",
        default="MONGO-CR",
        help="Use the given authentication mechanism, when --auth is used.",
    )
    parser.add_option(
        "--keyFile",
        dest="keyFile",
        default=None,
        help="Path to keyFile to use to run replSet and sharding tests with authentication enabled",
    )
    parser.add_option("--ignore", dest="ignore_files", default=None, help="Pattern of files to ignore in tests")
    parser.add_option(
        "--only-old-fails",
        dest="only_old_fails",
        default=False,
        action="store_true",
        help="Check the failfile and only run all tests that failed last time",
    )
    parser.add_option(
        "--reset-old-fails",
        dest="reset_old_fails",
        default=False,
        action="store_true",
        help="Clear the failfile. Do this if all tests pass",
    )
    parser.add_option(
        "--with-cleanbb",
        dest="with_cleanbb",
        default=False,
        action="store_true",
        help="Clear database files from previous smoke.py runs",
    )
    parser.add_option(
        "--dont-start-mongod",
        dest="start_mongod",
        default=True,
        action="store_false",
        help="Do not start mongod before commencing test running",
    )
    parser.add_option(
        "--use-ssl",
        dest="use_ssl",
        default=False,
        action="store_true",
        help="Run mongo shell and mongod instances with SSL encryption",
    )

    # Buildlogger invocation from command line
    parser.add_option(
        "--buildlogger-builder",
        dest="buildlogger_builder",
        default=None,
        action="store",
        help='Set the "builder name" for buildlogger',
    )
    parser.add_option(
        "--buildlogger-buildnum",
        dest="buildlogger_buildnum",
        default=None,
        action="store",
        help='Set the "build number" for buildlogger',
    )
    parser.add_option(
        "--buildlogger-credentials",
        dest="buildlogger_credentials",
        default=None,
        action="store",
        help="Path to Python file containing buildlogger credentials",
    )
    parser.add_option(
        "--buildlogger-phase",
        dest="buildlogger_phase",
        default=None,
        action="store",
        help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)',
    )

    global tests
    (options, tests) = parser.parse_args()

    set_globals(options, tests)

    buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
    if all(buildlogger_opts):
        os.environ["MONGO_USE_BUILDLOGGER"] = "true"
        os.environ["MONGO_BUILDER_NAME"] = options.buildlogger_builder
        os.environ["MONGO_BUILD_NUMBER"] = options.buildlogger_buildnum
        os.environ["BUILDLOGGER_CREDENTIALS"] = options.buildlogger_credentials
        if options.buildlogger_phase:
            os.environ["MONGO_PHASE"] = options.buildlogger_phase
    elif any(buildlogger_opts):
        # some but not all of the required options were sete
        raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")

    if options.File:
        if options.File == "-":
            tests = sys.stdin.readlines()
        else:
            f = open(options.File)
            tests = f.readlines()
    tests = [t.rstrip("\n") for t in tests]

    if options.only_old_fails:
        run_old_fails()
        return
    elif options.reset_old_fails:
        clear_failfile()
        return

    # If we're in suite mode, tests is a list of names of sets of tests.
    if options.mode == "suite":
        tests = expand_suites(tests)
    elif options.mode == "files":
        tests = [(os.path.abspath(test), start_mongod) for test in tests]

    if options.ignore_files != None:
        ignore_patt = re.compile(options.ignore_files)
        print "Ignoring files with pattern: ", ignore_patt

        def ignore_test(test):
            if ignore_patt.search(test[0]) != None:
                print "Ignoring test ", test[0]
                return False
            else:
                return True

        tests = filter(ignore_test, tests)

    if not tests:
        print "warning: no tests specified"
        return

    if options.with_cleanbb:
        dbroot = os.path.join(options.smoke_db_prefix, "data", "db")
        call([utils.find_python(), "buildscripts/cleanbb.py", "--nokill", dbroot])

    try:
        run_tests(tests)
    finally:
        add_to_failfile(fails, options)

        f = open("smoke-last.json", "wb")
        f.write(json.dumps({"results": all_test_results}))
        f.close()

        report()
Ejemplo n.º 31
0
def main(cli_args):
    parser = argparse.ArgumentParser(description="CSCE 496 HW 3, SeaQuest RL Homework")
    parser.add_argument('--n_step', type=int, default=2, help='N-Step time differences for DQN Update Function')
    parser.add_argument('--lambda', type=int, default=0.5, help="Value for Temporal Difference Calculation")
    parser.add_argument('--batch_size', type=int, default=32, help="Batch Size")
    parser.add_argument('--model_dir',type=str,default='./homework_3/',help='directory where model graph and weights are saved')
    parser.add_argument('--epoch' , type=int, default=100, help = "Epoch : number of iterations for the model")
    parser.add_argument('--model', type=int, help=" '1' for basic model, '2' for best model")
    parser.add_argument('--stopCount', type=int, default = 100, help="Number of times for dropping accuracy before early stopping")
    args_input = parser.parse_args(cli_args)

    if args_input.model:
        model = args_input.model
    else:
        raise ValueError("Model selection must not be empty") 

    if args_input.batch_size:
        batch_size = args_input.batch_size

    if args_input.model_dir:
        model_dir = args_input.model_dir
    else:
        raise ValueError("Provide a valid model data path")

    if args_input.epoch:
        epochs = args_input.epoch
    else:
        raise ValueError("Epoch value cannot be null and has to be an integer")

    #Make output model dir
    if os.path.exists(model_dir) == False:
        os.mkdir(model_dir)

    #Placeholder for Tensorflow Variables
    x = tf.placeholder(tf.float32, [None, 84, 84, 4], name='input_placeholder') #4 frames
    y = tf.placeholder(tf.float32, [None, 18], name='output') #18 possible outputs
    
    #Setup
    LEARNING_RATE = 0.0001
    TARGET_UPDATE_STEP_FREQ = 5
    number_of_episodes = epochs

    replay_memory = util.ReplayMemory(10000)
    #Optimizer
    optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE)
    #Load "SeaQuest" from atari_wrapper.py
    seaquest_env = util.load_seaquest_env()
    NUM_ACTIONS = seaquest_env.action_space.n #18 Possible Actions
    OBS_SHAPE = seaquest_env.observation_space.shape # [height, width, channels] = [84, 84, 4]
    EPS_END = 0.1
    EPS_DECAY = 100000
    step = 0

    if(str(model) == '1'):
        policy_model, policy_output_layer = initiate_policy_model(x, NUM_ACTIONS)
        target_model, target_output_layer = initiate_target_model(x, NUM_ACTIONS)
        print("Basic Model Initialized")
    elif(str(model) == '2'):
        policy_model, policy_output_layer = initiate_better_policy_model(x, NUM_ACTIONS)
        target_model, target_output_layer = initiate_better_target_model(x, NUM_ACTIONS)
        print("Better Model Initialized")
    prev_episode_score = -1
    saver = tf.train.Saver()
    argmax_action = tf.argmax(policy_output_layer, axis = 1)
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for episode in range(number_of_episodes):
            prev_observation = seaquest_env.reset()
            observation, reward, done, _ = seaquest_env.step(random.randrange(NUM_ACTIONS))
            done = False
            episode_score = 0.0
            while not done:
                prep_obs = np.expand_dims(np.array(observation, dtype=np.float32), axis=0)
                curr_action = util.epsilon_greedy_exploration(x, sess, argmax_action, prep_obs, step, NUM_ACTIONS, EPS_END, EPS_DECAY)
                observation, reward, done, _ = seaquest_env.step(curr_action)
                next_obs = np.expand_dims(np.array(observation, dtype=np.float32), axis=0)
                next_action = util.epsilon_greedy_exploration(x, sess, argmax_action, next_obs, step, NUM_ACTIONS, EPS_END, EPS_DECAY)
                following_observation, next_reward, next_done , _ = seaquest_env.step(next_action)
                replay_memory.push(prev_observation, curr_action, observation, reward, next_action, next_reward, following_observation) # s, a , r, s' a' r' s'' 
                prev_observation = observation

                #run the session with the placeholders
                loss, gradients= util.dqn_gradient_calculation(replay_memory, x, y, policy_output_layer, target_output_layer, sess, batch_size, optimizer)
                episode_score += next_reward
                step += 1
            print(f"Episode : {episode} Episode Score : {episode_score} Step: {step}")
            if episode % TARGET_UPDATE_STEP_FREQ == 0:
                for (target_var, policy_var) in zip(tf.trainable_variables(target_model), tf.trainable_variables(policy_model)):
                    tf.assign(target_var, policy_var)
                if episode_score >= prev_episode_score:
                    print("Saving .........")
                    saver.save(sess, os.path.join("./homework_3/", "homework_3"))
                    prev_episode_score = episode_score
            if episode_score >= prev_episode_score:
                print("Saving .........")
                saver.save(sess, os.path.join("./homework_3/", "homework_3"))
                prev_episode_score = episode_score
Ejemplo n.º 32
0
def main(argv):
    options = parse_args(argv)
    led = LED(options)
    led.run(display)
Ejemplo n.º 33
0
def setup_args():

    parser = argparse.ArgumentParser(
        description=
        "Carbon footprint analysis and demographic analysis provided by the acm-climate committee for conference planning."
    )
    parser.add_argument(
        "input_participants",
        help=
        "Name of the .xml file containing the list of participants to the events. Must be located in ./input/input_participants.xml",
    )
    parser.add_argument(
        "input_events",
        help=
        "Name of the .xml file containing the list of events to be processed. Must be located in ./input/input_events.xml",
    )
    parser.add_argument(
        "output_folder",
        help=
        "Name of the destination folder to store the results of the analysis. Will be created in ./output/output_folder",
    )

    parser.add_argument(
        "-f",
        "--force",
        action="store_true",
        help=
        "Erase the content of the given output_folder if it already exists",
    )
    parser.add_argument(
        "-r",
        "--radiative",
        type=int,
        help=
        "Set the value of the radiative forcing index. The default value used is 1.891",
    )
    parser.add_argument(
        "--no_radiative",
        action="store_true",
        help=
        "Disable accounting for radiative forcing. This is equivalent to '--radiative 1'",
    )
    parser.add_argument(
        "-m",
        "--model",
        choices=["acm", "cool"],
        help="Changes the emission model to be used. Default value is acm",
    )
    parser.add_argument(
        "--no_id",
        action="store_true",
        help=
        "Assert that participants to events are not uniquely identified, disabling overlap analyses",
    )
    parser.add_argument(
        "--east-west-coast",
        action="store_true",
        help="Distinguish the east and the west coast in demographic analyses",
    )
    parser.add_argument(
        "--multilocation",
        action="store_true",
        help=
        "Estimate the carbon gain of multilocating conferences (hardcoded for SIGPLAN)",
    )
    parser.add_argument(
        "--log",
        default="info",
        choices=["info", "debug", "warning"],
        help="Set the level of logging",
    )

    args = parser.parse_args()

    GLOB = Globals(
        args.input_events,
        args.input_participants,
        args.output_folder,
        east_west=args.east_west_coast,
    )

    log_levels = {
        "debug": logging.DEBUG,
        "info": logging.INFO,
        "warning": logging.WARNING,
    }
    logging.basicConfig(
        filename="../output/analysis.log",
        filemode="w",
        format="%(asctime)s - %(levelname)s: %(message)s",
        level=log_levels[args.log],
    )

    logging.info(
        "Analyzing the set of participants from file {} having taken part to events from file {}. The results of the analysis will be stored in folder {}."
        .format(args.input_participants, args.input_events,
                args.output_folder))

    # Checks whether the name provided for the output folder was not already taken. If not, creates the folder.
    # If taken but with the '--force' option on, erase the content of the folder.
    # Otherwise, aborts.
    exists_output = os.path.isdir(GLOB.output_prefix)
    if exists_output:
        if args.force:
            for the_file in os.listdir(GLOB.output_prefix):
                file_path = os.path.join(GLOB.output_prefix, the_file)
                try:
                    if os.path.isfile(file_path):
                        os.unlink(file_path)
                except Exception as e:
                    print(e)
        else:
            sys.exit(
                "Output folder '{}' already exists. Pick another name for the analysis or use the --force option is its content may be erased"
                .format(args.output_folder))
    else:
        os.mkdir(GLOB.output_prefix)

    if not args.radiative is None:
        logging.info("Radiative factor index modified to {}.".format(
            args.radiative))
        GLOB.radiative_factor_index = args.radiative
    if args.no_radiative:
        logging.info("Radiative factor index set to 1.")
        GLOB.radiative_factor_index = 1
    if args.model:
        logging.info("Model {} selected".format(args.model))
        GLOB.model = args.model
    if args.no_id:
        logging.info("Disabling cross-participation analyses")
        GLOB.unique_id = False
    if args.multilocation:
        GLOB.multilocation = True

    return GLOB
Ejemplo n.º 34
0
def main():
    global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, no_journal, no_preallocj, auth, keyFile, smoke_db_prefix, smoke_server_opts, test_path
    parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
    parser.add_option(
        '--mode',
        dest='mode',
        default='suite',
        help=
        'If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)'
    )
    # Some of our tests hard-code pathnames e.g., to execute, so until
    # that changes we don't have the freedom to run from anyplace.
    # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
    parser.add_option('--test-path',
                      dest='test_path',
                      default=None,
                      help="Path to the test executables to run, "
                      "currently only used for 'client' (%default)")
    parser.add_option('--mongod',
                      dest='mongod_executable',
                      default=os.path.join(mongo_repo, 'mongod'),
                      help='Path to mongod to run (%default)')
    parser.add_option('--port',
                      dest='mongod_port',
                      default="27999",
                      help='Port the mongod will bind to (%default)')
    parser.add_option('--mongo',
                      dest='shell_executable',
                      default=os.path.join(mongo_repo, 'mongo'),
                      help='Path to mongo, for .js test files (%default)')
    parser.add_option(
        '--continue-on-failure',
        dest='continue_on_failure',
        action="store_true",
        default=False,
        help='If supplied, continue testing even after a test fails')
    parser.add_option(
        '--from-file',
        dest='File',
        help=
        "Run tests/suites named in FILE, one test per line, '-' means stdin")
    parser.add_option(
        '--smoke-db-prefix',
        dest='smoke_db_prefix',
        default=smoke_db_prefix,
        help="Prefix to use for the mongods' dbpaths ('%default')")
    parser.add_option(
        '--smoke-server-opts',
        dest='smoke_server_opts',
        default=smoke_server_opts,
        help="Additional options for the mongod server ('%default')")
    parser.add_option(
        '--small-oplog',
        dest='small_oplog',
        default=False,
        action="store_true",
        help='Run tests with master/slave replication & use a small oplog')
    parser.add_option(
        '--small-oplog-rs',
        dest='small_oplog_rs',
        default=False,
        action="store_true",
        help='Run tests with replica set replication & use a small oplog')
    parser.add_option('--nojournal',
                      dest='no_journal',
                      default=False,
                      action="store_true",
                      help='Do not turn on journaling in tests')
    parser.add_option('--nopreallocj',
                      dest='no_preallocj',
                      default=False,
                      action="store_true",
                      help='Do not preallocate journal files in tests')
    parser.add_option(
        '--auth',
        dest='auth',
        default=False,
        action="store_true",
        help='Run standalone mongods in tests with authentication enabled')
    parser.add_option(
        '--authMechanism',
        dest='authMechanism',
        default='MONGODB-CR',
        help='Use the given authentication mechanism, when --auth is used.')
    parser.add_option(
        '--keyFile',
        dest='keyFile',
        default=None,
        help=
        'Path to keyFile to use to run replSet and sharding tests with authentication enabled'
    )
    parser.add_option('--ignore',
                      dest='ignore_files',
                      default=None,
                      help='Pattern of files to ignore in tests')
    parser.add_option(
        '--only-old-fails',
        dest='only_old_fails',
        default=False,
        action="store_true",
        help='Check the failfile and only run all tests that failed last time')
    parser.add_option('--reset-old-fails',
                      dest='reset_old_fails',
                      default=False,
                      action="store_true",
                      help='Clear the failfile. Do this if all tests pass')
    parser.add_option('--with-cleanbb',
                      dest='with_cleanbb',
                      default=False,
                      action="store_true",
                      help='Clear database files from previous smoke.py runs')
    parser.add_option('--server-log',
                      dest='server_log_file',
                      default=server_log_file,
                      help="Log file for the mongod server ('%default')")
    parser.add_option('--tests-log',
                      dest='tests_log_file',
                      default='',
                      help="Log file for the tests ('%default')")
    parser.add_option('--quiet',
                      dest='quiet',
                      default=False,
                      action="store_true",
                      help='Generate a quieter report (use with --tests-log)')
    parser.add_option('--valgrind',
                      dest='valgrind',
                      default=False,
                      action="store_true",
                      help='Run mongod under valgrind')
    parser.add_option('--drd',
                      dest='drd',
                      default=False,
                      action="store_true",
                      help='Run mongod under drd')
    parser.add_option(
        '--shuffle',
        dest='shuffle',
        default=False,
        action="store_true",
        help='Shuffle tests instead of running them in alphabetical order')
    parser.add_option(
        '--skip-until',
        dest='skip_tests_until',
        default="",
        action="store",
        help='Skip all tests alphabetically less than the given name.')
    parser.add_option(
        '--dont-start-mongod',
        dest='start_mongod',
        default=True,
        action='store_false',
        help='Do not start mongod before commencing test running')
    parser.add_option(
        '--use-ssl',
        dest='use_ssl',
        default=False,
        action='store_true',
        help='Run mongo shell and mongod instances with SSL encryption')

    # Buildlogger invocation from command line
    parser.add_option('--buildlogger-builder',
                      dest='buildlogger_builder',
                      default=None,
                      action="store",
                      help='Set the "builder name" for buildlogger')
    parser.add_option('--buildlogger-buildnum',
                      dest='buildlogger_buildnum',
                      default=None,
                      action="store",
                      help='Set the "build number" for buildlogger')
    parser.add_option(
        '--buildlogger-credentials',
        dest='buildlogger_credentials',
        default=None,
        action="store",
        help='Path to Python file containing buildlogger credentials')
    parser.add_option(
        '--buildlogger-phase',
        dest='buildlogger_phase',
        default=None,
        action="store",
        help=
        'Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)'
    )

    global tests
    (options, tests) = parser.parse_args()

    set_globals(options, tests)

    buildlogger_opts = (options.buildlogger_builder,
                        options.buildlogger_buildnum,
                        options.buildlogger_credentials)
    if all(buildlogger_opts):
        os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
        os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
        os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
        os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
        if options.buildlogger_phase:
            os.environ['MONGO_PHASE'] = options.buildlogger_phase
    elif any(buildlogger_opts):
        # some but not all of the required options were sete
        raise Exception(
            "you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials"
        )

    if options.File:
        if options.File == '-':
            tests = sys.stdin.readlines()
        else:
            f = open(options.File)
            tests = f.readlines()
    tests = [t.rstrip('\n') for t in tests]

    if options.only_old_fails:
        run_old_fails()
        return
    elif options.reset_old_fails:
        clear_failfile()
        return

    # If we're in suite mode, tests is a list of names of sets of tests.
    if options.mode == 'suite':
        tests = expand_suites(tests)
    elif options.mode == 'files':
        tests = [(os.path.abspath(test), True) for test in tests]

    if options.ignore_files != None:
        ignore_patt = re.compile(options.ignore_files)
        print "Ignoring files with pattern: ", ignore_patt

        def ignore_test(test):
            if ignore_patt.search(test[0]) != None:
                print "Ignoring test ", test[0]
                return False
            else:
                return True

        tests = filter(ignore_test, tests)

    if not tests:
        print "warning: no tests specified"
        return

    if options.with_cleanbb:
        dbroot = os.path.join(options.smoke_db_prefix, 'data', 'db')
        if options.quiet:
            f = open(server_log_file, "a")
            try:
                call([
                    utils.find_python(), "buildscripts/cleanbb.py", "--nokill",
                    dbroot
                ],
                     stdout=f)
            finally:
                f.close()
        else:
            call([
                utils.find_python(), "buildscripts/cleanbb.py", "--nokill",
                dbroot
            ])

    if options.shuffle:
        import random
        random.shuffle(tests)

    if options.skip_tests_until != "":
        filtered_tests = []
        for t in tests:
            name = t[0]
            if name >= name[:name.rfind('/')] + "/" + options.skip_tests_until:
                filtered_tests.append(t)
        tests = filtered_tests

    try:
        run_tests(tests)
    finally:
        add_to_failfile(fails, options)

        f = open("smoke-last.json", "wb")
        f.write(json.dumps({"results": all_test_results}))
        f.close()

        report()
Ejemplo n.º 35
0
import numpy as np
import pickle as pkl
import os
import parser

import torch
from torchvision import datasets

import utils
import cs_dip
import baselines as baselines
import time

NEW_RECONS = False

args = parser.parse_args('configs.json')
print(args)

NUM_MEASUREMENTS_LIST, ALG_LIST = utils.convert_to_list(args)

dataloader = utils.get_data(args)  # get dataset of images

for num_meas in NUM_MEASUREMENTS_LIST:
    args.NUM_MEASUREMENTS = num_meas

    # init measurement matrix
    A = baselines.get_A(args.IMG_SIZE * args.IMG_SIZE * args.NUM_CHANNELS,
                        args.NUM_MEASUREMENTS)
    y = np.dot(x, A)

    for _, (batch, _, im_path) in enumerate(dataloader):
Ejemplo n.º 36
0
"""

if __name__ == "__main__":

    parser = argparse.ArgumentParser()
    parser.add_argument("--files_path",
                        type=str,
                        default="images/",
                        help="path for  images files")
    parser.add_argument("--cut_files_path",
                        type=str,
                        default="cut_images/",
                        help="path for  images  after cut")

    params_opt = parser.parse_args()

    ###开始遍历数据文件夹
    for parent, dirs, file_names in os.walk(params_opt.files_path,
                                            followlinks=True):
        for file_name in file_names:
            file_path = os.path.join(parent, file_name)
            print("parent: {}".format(parent))
            print('full path : {}'.format(file_path))
            img_path = file_path
            ## 1.提取roi区域
            """
            img = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)  # jpg转为灰度
            if(img.any() == None):
                print("load image fail!")
                exit(0)
Ejemplo n.º 37
0
    for fs in fsop.get_all_fs():
        one = _get_merged_file(fs)
        fss = _get_fss_file(fs)

        print "%-10s %10s B %10s B" % (fs, _get_size(one), _get_size(fss))


if __name__ == '__main__':
    utils.install_pdb()

    parser = optparse.OptionParser()
    parser.add_option("--linux", help="Linux kernel", default=LINUX)
    parser.add_option("--clang", help="Clang path", default=CLANG)
    parser.add_option("--outdir", help="Clang output dir", default="/tmp")
    (opts, args) = parser.parse_args()

    def __print_usage(note):
        print(note)
        for (cmd, func) in get_all_cmds():
            print("> %-20s: %s" % (cmd, func.__doc__))
        exit(0)

    if len(args) == 0:
        __print_usage("Need to specify a command")

    cmd = args[0]
    if get_cmd(cmd) is None:
        __print_usage("Can't find the command, '%s'" % cmd)

    exit(invoke_cmd(cmd, opts, args[1:]))
Ejemplo n.º 38
0
    pbar.close()

    gallery_features = torch.cat(gallery_features, dim=0).numpy()
    gallery_labels = torch.cat(gallery_labels, dim=0).numpy()
    gallery_cams = torch.cat(gallery_cams, dim=0).numpy()

    Cmc, mAP = Video_Cmc(gallery_features, gallery_labels, gallery_cams,
                         dataloader.dataset.query_idx, 10000)
    network.train()

    return Cmc[0], mAP


if __name__ == '__main__':
    #Parse args
    args = parser.parse_args()

    test_transform = Compose([
        Resize((256, 128)),
        ToTensor(),
        Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
    ])
    print('Start dataloader...')
    num_class = 625
    test_dataloader = utils.Get_Video_test_DataLoader(args.test_txt,args.test_info,args.query_info,test_transform,batch_size=args.batch_size,\
                                                 shuffle=False,num_workers=args.num_workers,S=args.S,distractor=True)
    print('End dataloader...')

    network = nn.DataParallel(
        models.CNN(args.latent_dim,
                   model_type=args.model_type,
        if args.run_eval:
            for top_k in [5,10, 25,50]:
                evaluate_paths(top_k, path_file, eva_file, train_labels, test_labels)

            eva_file = open(eva_file, "a")
            eva_file.write('*' * 50)
            eva_file.write('\n')
            eva_file.close()

    # eva_file = open(eva_file, "a")
    # cur_tim = time.strftime("%Y%m%d-%H%M%S")
    # eva_file.write("current time = " + str(cur_tim))


if __name__ == '__main__':
    args = parse_args()

    args.training = 0
    args.training = (args.training == 1)

    parameter_env(args)
    
    if args.h0_embbed == True:
        log_dir_fodder = f"no_shu_qy_uam_{args.user_query_threshold}_{args.query_threshold}_{args.query_threshold_maximum}_lr_{args.lr}_bs_{args.batch_size}_sb_{args.sub_batch_size}_ma_{args.max_acts}_p0{args.p_hop}_{args.name}_g_aiu_{args.att_core}_{args.item_core}_{args.user_core}_qy_uam_{args.user_query_threshold}_{args.query_threshold}_{args.query_threshold_maximum}"
    else:
        log_dir_fodder = f'no_shu_qy_uam_{args.user_query_threshold}_{args.query_threshold}_{args.query_threshold_maximum}_lr_{args.lr}_bs_{args.batch_size}_sb_{args.sub_batch_size}_ma_{args.max_acts}_p1{args.p_hop}_{args.name}_g_aiu_{args.att_core}_{args.item_core}_{args.user_core}_qy_uam_{args.user_query_threshold}_{args.query_threshold}_{args.query_threshold_maximum}'
    
    args.sub_batch_size = 1
    os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
    args.device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
Ejemplo n.º 40
0
def main(cli_args):
    parser = argparse.ArgumentParser(
        description="CSCE 496 HW 1, Classify Fashion MNIST data")
    parser.add_argument('--input_dir',
                        type=str,
                        default='/work/cse496dl/shared/homework/01',
                        help='Numpy datafile input')
    parser.add_argument(
        '--model_dir',
        type=str,
        default='./homework_1/',
        help='directory where model graph and weights are saved')
    parser.add_argument('--epoch',
                        type=int,
                        default=100,
                        help="Epoch : number of iterations for the model")
    parser.add_argument('--batch_size',
                        type=int,
                        default=32,
                        help="Batch Size")
    parser.add_argument('--model',
                        type=int,
                        help=" '1' for basic model, '2' for best model")
    parser.add_argument(
        '--stopCount',
        type=int,
        default=100,
        help="Number of times for dropping accuracy before early stopping")
    args_input = parser.parse_args(cli_args)

    if args_input.input_dir:
        input_dir = args_input.input_dir
    else:
        raise ValueError("Provide a valid input data path")

    if args_input.model_dir:
        model_dir = args_input.model_dir
    else:
        raise ValueError("Provide a valid model data path")

    if args_input.epoch:
        epochs = args_input.epoch
    else:
        raise ValueError("Epoch value cannot be null and has to be an integer")

    if args_input.batch_size:
        batch_size = args_input.batch_size
    else:
        raise ValueError(
            "Batch Size value cannot be null and has to be an integer")

    if args_input.model:
        model = args_input.model
    else:
        raise ValueError("Model selection must not be empty")

    if args_input.stopCount:
        stop_counter = args_input.stopCount
    else:
        raise ValueError("StopCount have to be an int")

    input_dir = '/work/cse496dl/shared/homework/01'
    #Make output model dir
    if os.path.exists(model_dir) == False:
        os.mkdir(model_dir)

    #Load Data
    train_images, train_labels, test_images, test_labels, val_images, val_labels = util.load_data(
        input_dir)
    x = tf.placeholder(tf.float32, [None, 784], name='input_placeholder')
    y = tf.placeholder(tf.float32, [None, 10], name='labels')

    #Specify Model
    if (str(model) == '1'):
        _, outputLayer = initiate_basic_model(x)
    elif (str(model) == '2'):
        _, outputLayer = initiate_better_model(x)

    #Run Training with early stopping and save output
    counter = stop_counter
    prev_winner = 0
    curr_winner = 0
    optimizer = tf.train.AdamOptimizer(learning_rate=0.0001)
    cross_entropy = util.cross_entropy_op(y, outputLayer)
    global_step_tensor = util.global_step_tensor('global_step_tensor')
    train_op = util.train_op(cross_entropy, global_step_tensor, optimizer)
    conf_matrix = util.confusion_matrix_op(y, outputLayer, 10)
    saver = tf.train.Saver()
    with tf.Session() as session:
        session.run(tf.global_variables_initializer())
        for i in range(10):
            print("KFold : " + str(i))
            counter = stop_counter
            for epoch in range(epochs):
                if counter > 0:
                    print("Epoch : " + str(epoch))
                    util.training(batch_size, x, y, train_images[i],
                                  train_labels[i], session, train_op,
                                  conf_matrix, 10)
                    accuracy = util.validation(batch_size, x, y, val_images[i],
                                               val_labels[i], session,
                                               cross_entropy, conf_matrix, 10)
                    if epoch == 0:
                        prev_winner = accuracy
                    else:
                        curr_winner = accuracy
                        if (curr_winner > prev_winner) and (counter > 0):
                            prev_winner = curr_winner
                        else:
                            counter -= 1

                    test_accuracy = util.test(batch_size, x, y, test_images[i],
                                              test_labels[i], session,
                                              cross_entropy, conf_matrix, 10)
                    #Calculate the confidence interval
                    value1, value2 = util.confidence_interval(
                        test_accuracy, 1.96, test_images[i].shape[0])
                    print("Confidence Interval : " + str(value1) + " , " +
                          str(value2))
                else:
                    break
            print("Saving.......")
            saver.save(session, os.path.join("./homework_1/", "homework_1"))
Ejemplo n.º 41
0
def main():
    global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, no_journal, no_preallocj, auth, keyFile, smoke_db_prefix, test_path
    parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
    parser.add_option(
        '--mode',
        dest='mode',
        default='suite',
        help=
        'If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)'
    )
    # Some of our tests hard-code pathnames e.g., to execute, so until
    # that changes we don't have the freedom to run from anyplace.
    # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
    parser.add_option('--test-path',
                      dest='test_path',
                      default=None,
                      help="Path to the test executables to run, "
                      "currently only used for 'client' (%default)")
    parser.add_option('--mongod',
                      dest='mongod_executable',
                      default=os.path.join(mongo_repo, 'mongod'),
                      help='Path to mongod to run (%default)')
    parser.add_option('--port',
                      dest='mongod_port',
                      default="27999",
                      help='Port the mongod will bind to (%default)')
    parser.add_option('--mongo',
                      dest='shell_executable',
                      default=os.path.join(mongo_repo, 'mongo'),
                      help='Path to mongo, for .js test files (%default)')
    parser.add_option(
        '--continue-on-failure',
        dest='continue_on_failure',
        action="store_true",
        default=False,
        help='If supplied, continue testing even after a test fails')
    parser.add_option(
        '--from-file',
        dest='File',
        help=
        "Run tests/suites named in FILE, one test per line, '-' means stdin")
    parser.add_option(
        '--smoke-db-prefix',
        dest='smoke_db_prefix',
        default=smoke_db_prefix,
        help="Prefix to use for the mongods' dbpaths ('%default')")
    parser.add_option(
        '--small-oplog',
        dest='small_oplog',
        default=False,
        action="store_true",
        help='Run tests with master/slave replication & use a small oplog')
    parser.add_option(
        '--small-oplog-rs',
        dest='small_oplog_rs',
        default=False,
        action="store_true",
        help='Run tests with replica set replication & use a small oplog')
    parser.add_option('--nojournal',
                      dest='no_journal',
                      default=False,
                      action="store_true",
                      help='Do not turn on journaling in tests')
    parser.add_option('--nopreallocj',
                      dest='no_preallocj',
                      default=False,
                      action="store_true",
                      help='Do not preallocate journal files in tests')
    parser.add_option(
        '--auth',
        dest='auth',
        default=False,
        action="store_true",
        help='Run standalone mongods in tests with authentication enabled')
    parser.add_option(
        '--keyFile',
        dest='keyFile',
        default=None,
        help=
        'Path to keyFile to use to run replSet and sharding tests with authentication enabled'
    )
    parser.add_option('--ignore',
                      dest='ignore_files',
                      default=None,
                      help='Pattern of files to ignore in tests')
    parser.add_option(
        '--only-old-fails',
        dest='only_old_fails',
        default=False,
        action="store_true",
        help='Check the failfile and only run all tests that failed last time')
    parser.add_option('--reset-old-fails',
                      dest='reset_old_fails',
                      default=False,
                      action="store_true",
                      help='Clear the failfile. Do this if all tests pass')
    parser.add_option('--with-cleanbb',
                      dest='with_cleanbb',
                      default=False,
                      action="store_true",
                      help='Clear database files from previous smoke.py runs')
    parser.add_option('--dont-start-mongod',
                      dest='start_mongod',
                      default=True,
                      action='store_false')

    # Buildlogger invocation from command line
    parser.add_option('--buildlogger-builder',
                      dest='buildlogger_builder',
                      default=None,
                      action="store",
                      help='Set the "builder name" for buildlogger')
    parser.add_option('--buildlogger-buildnum',
                      dest='buildlogger_buildnum',
                      default=None,
                      action="store",
                      help='Set the "build number" for buildlogger')
    parser.add_option(
        '--buildlogger-credentials',
        dest='buildlogger_credentials',
        default=None,
        action="store",
        help='Path to Python file containing buildlogger credentials')
    parser.add_option(
        '--buildlogger-phase',
        dest='buildlogger_phase',
        default=None,
        action="store",
        help=
        'Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)'
    )

    global tests
    (options, tests) = parser.parse_args()

    set_globals(options, tests)

    buildlogger_opts = (options.buildlogger_builder,
                        options.buildlogger_buildnum,
                        options.buildlogger_credentials)
    if all(buildlogger_opts):
        os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
        os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
        os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
        os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
        if options.buildlogger_phase:
            os.environ['MONGO_PHASE'] = options.buildlogger_phase
    elif any(buildlogger_opts):
        # some but not all of the required options were sete
        raise Exception(
            "you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials"
        )

    if options.File:
        if options.File == '-':
            tests = sys.stdin.readlines()
        else:
            f = open(options.File)
            tests = f.readlines()
    tests = [t.rstrip('\n') for t in tests]

    if options.only_old_fails:
        run_old_fails()
        return
    elif options.reset_old_fails:
        clear_failfile()
        return

    # If we're in suite mode, tests is a list of names of sets of tests.
    if options.mode == 'suite':
        tests = expand_suites(tests)
    elif options.mode == 'files':
        tests = [(os.path.abspath(test), True) for test in tests]

    if options.ignore_files != None:
        ignore_patt = re.compile(options.ignore_files)
        tests = filter(lambda x: ignore_patt.search(x[0]) == None, tests)

    if not tests:
        print "warning: no tests specified"
        return

    if options.with_cleanbb:
        dbroot = os.path.join(options.smoke_db_prefix, 'data', 'db')
        call([
            utils.find_python(), "buildscripts/cleanbb.py", "--nokill", dbroot
        ])

    try:
        run_tests(tests)
    finally:
        add_to_failfile(fails, options)
        report()
Ejemplo n.º 42
0
           soup.find('span', attrs = {'class':'top-card__subline-item'}).text,
           str(experiance).replace("'",'').replace('[','').replace(']',''),
           str(education).replace("'",'').replace('[','').replace(']',''),
           str(patents).replace("'",'').replace('[','').replace(']',''),
           str(score).replace("'",'').replace('[','').replace(']','')
           ]]
    df=pd.DataFrame(data,columns=['name','profession','loc','experiance','edu','patents','score'])
'''
    print(skills)


parser = argparse.ArgumentParser()
parser.add_argument('-url',
                    '--url',
                    help='URL to the online repository of images')
args = vars(parser.parse_args())
url = args['url']
url = "https://www.linkedin.com/login"

#userid = str(input("Enter email address or number with country code: "))
#password = getpass.getpass('Enter your password:'******'permissions.default.image', 2)

driver = webdriver.Firefox(firefoxProfile)
driver.get(url)
driver.implicitly_wait(3)
driver.find_element_by_id("username").send_keys('*****@*****.**')
Ejemplo n.º 43
0
def main():
    ''' Main function '''
    parser = argparse.ArgumentParser(description='Implement image classification on ImageNet datset using pytorch')
    parser.add_argument('--arch', default='bam', type=str, help='Attention Model (bam, cbam)')
    parser.add_argument('--backbone', default='resnet50', type=str, help='backbone classification model (resnet(18, 34, 50, 101, 152)')
    parser.add_argument('--epoch', default=1, type=int, help='start epoch')
    parser.add_argument('--n_epochs', default=350, type=int, help='numeber of total epochs to run')
    parser.add_argument('--batch', default=256, type=int, help='mini batch size (default: 1024)')
    parser.add_argument('--lr', default=0.1, type=float, help='initial learning rate')
    parser.add_argument('--momentum', default=0.9, type=float, help='momentum')
    parser.add_argument('--save_directory', default='trained.chkpt', type=str, help='path to latest checkpoint')
    parser.add_argument('--workers', default=0, type=int, help='num_workers')
    parser.add_argument('--resume', default=False, type=bool, help='resume')
    parser.add_argument('--datasets', default='CIFAR100', type=str, help='classification dataset  (CIFAR10, CIFAR100, ImageNet)')
    parser.add_argument('--weight_decay', default=5e-4, type=float, help='weight_decay')
    parser.add_argument('--save', default='trained', type=str, help='trained.chkpt')
    parser.add_argument('--save_multi', default='trained_multi', type=str, help='trained_multi.chkpt')
    parser.add_argument('--evaluate', default=False, type=bool, help='evaluate')
    parser.add_argument('--reduction_ratio', default=16, type=int, help='reduction_ratio')
    parser.add_argument('--dilation_value', default=4, type=int, help='reduction_ratio')
    args = parser.parse_args()
    args.arch = args.arch.lower()
    args.backbone = args.backbone.lower()
    args.datasets = args.datasets.lower()

    if not os.path.isdir('checkpoints'):
        os.mkdir('checkpoints')
    # To-do: Write a code relating to seed.

    # use gpu or multi-gpu or not.
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    use_multi_gpu = torch.cuda.device_count() > 1
    print('[Info] device:{} use_multi_gpu:{}'.format(device, use_multi_gpu))

    if args.datasets == 'cifar10':
        num_classes = 10
    elif args.datasets == 'cifar100':
        num_classes = 100
    elif args.datasets == 'imagenet':
        num_classes = 1000

    # load the data.
    print('[Info] Load the data.')
    train_loader, valid_loader, train_size, valid_size = prepare_dataloaders(args)

    # load the model.
    print('[Info] Load the model.')

    if args.backbone == 'resnet18':
        model = resnet.resnet18(num_classes=num_classes, atte=args.arch, ratio=args.reduction_ratio, dilation = args.dilation_value)
    elif args.backbone == 'resnet34':
        model = resnet.resnet34(num_classes=num_classes, atte=args.arch, ratio=args.reduction_ratio, dilation = args.dilation_value)
    elif args.backbone == 'resnet50':
        model = resnet.resnet50(num_classes=num_classes, atte=args.arch, ratio=args.reduction_ratio, dilation = args.dilation_value)
    elif args.backbone == 'resnet101':
        model = resnet.resnet101(num_classes=num_classes, atte=args.arch, ratio=args.reduction_ratio, dilation = args.dilation_value)
    elif args.backbone == 'resnet152':
        model = resnet.resnet152(num_classes=num_classes, atte=args.arch, ratio=args.reduction_ratio, dilation = args.dilation_value)


    model = model.to(device)
    if use_multi_gpu : model = torch.nn.DataParallel(model)
    print('[Info] Total parameters {} '.format(count_parameters(model)))
    # define loss function.
    criterion = torch.nn.CrossEntropyLoss().to(device)

    # define optimizer
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    if args.resume:
        # Load the checkpoint.
        print('[Info] Loading checkpoint.')
        if torch.cuda.device_count() > 1:
            checkpoint = load_checkpoint(args.save)
        else:
            checkpoint = load_checkpoint(args.save)

        backbone = checkpoint['backbone']
        args.epoch = checkpoint['epoch']
        state_dict = checkpoint['state_dict']
        model.load_state_dict(state_dict)
        print('[Info] epoch {} backbone {}'.format(args.epoch, backbone))

    # run evaluate.
    if args.evaluate:
        _ = run_epoch(model, 'valid', [args.epoch, args.epoch], criterion, optimizer, valid_loader, valid_size, device)
        return

    # run train.
    best_acc1 = 0.
    for e in range(args.epoch, args.n_epochs + 1):
        adjust_learning_rate(optimizer, e, args)

        # train for one epoch
        _ = run_epoch(model, 'train', [e, args.n_epochs], criterion, optimizer, train_loader, train_size, device)

        # evaluate on validation set
        with torch.no_grad():
            acc1 = run_epoch(model, 'valid', [e, args.n_epochs], criterion, optimizer, valid_loader, valid_size, device)

        # Save checkpoint.
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)
        save_checkpoint({
            'epoch': e,
            'backbone': args.backbone,
            'state_dict': model.state_dict(),
            'best_acc1': best_acc1,
            'optimizer': optimizer.state_dict(),
        }, is_best, args.save)

        if use_multi_gpu:
            save_checkpoint({
                'epoch': e,
                'backbone': args.backbone,
                'state_dict': model.module.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
            }, is_best, args.save_multi)

        print('[Info] acc1 {} best@acc1 {}'.format(acc1, best_acc1))
Ejemplo n.º 44
0
    test_mrp_dataset(dev_data)

    logger.info("processing test set")
    test_data = readFeaturesInput(opt.testCompanionFilesPath)
    mergeWithAnnotatedGraphs(test_data, opt.testFilesPath)
    test_mrp_dataset(test_data)


def main(opt):
    test_mrp_to_graph()


if __name__ == "__main__":
    global opt
    parser = arg_parser()
    opt = parser.parse_args()
    suffix = opt.suffix
    opt.trainFolderPath = opt.build_folder + "/training/"
    opt.trainingFilesPath = folder_to_files_path(opt.trainFolderPath, suffix)
    opt.trainingCompanionFilesPath = folder_to_files_path(
        opt.trainFolderPath, opt.companion_suffix)

    opt.devFolderPath = opt.build_folder + "/dev/"
    opt.devFilesPath = folder_to_files_path(opt.devFolderPath, suffix)
    opt.devCompanionFilesPath = folder_to_files_path(opt.devFolderPath,
                                                     opt.companion_suffix)

    opt.testFolderPath = opt.build_folder + "/test/"
    opt.testFilesPath = folder_to_files_path(opt.testFolderPath, suffix)
    opt.testCompanionFilesPath = folder_to_files_path(opt.testFolderPath,
                                                      opt.companion_suffix)
Ejemplo n.º 45
0
            cfg["model"]["num_filters"],
            cfg["model"]["convs"],
            cfg["model"]["fc_dim"],
        )
    else:
        assert "layers" in cfg["model"], const.WRONG_CONFIG_FILE(mtype)
        return conv_seq.ConvSeq(
            len(const.IDX_TO_LBL),
            cfg["corpus"]["voc_len"],
            cfg["corpus"]["emb_dim"],
            cfg["model"]["layers"],
        )


if __name__ == "__main__":
    args = parser.parse_args(const.TRAIN_MODE)

    assert args.config

    with open(args.config, "r") as file:
        cfg = yaml.safe_load(file)

    out = Path(args.out)
    if out.is_dir():
        utils.clear_dir(out)
    else:
        out.mkdir(parents=True, exist_ok=True)

    train = Path(args.train)
    dev = Path(args.dev)
Ejemplo n.º 46
0
def main():
    args = parse_args()

    ## Create an output dir
    output_dir_path = args.od + args.en
    if not os.path.exists(output_dir_path):
        os.makedirs(output_dir_path)
        dir_name=output_dir_path 
        tb_dirname = output_dir_path + '/tb_logdir'
    else:
        counter=1
        dir_name = output_dir_path
        new_dir_name = dir_name
        while os.path.exists(new_dir_name):
            new_dir_name = dir_name + "_" + str(counter)
            counter +=1 
        os.makedirs(new_dir_name)
        dir_name=new_dir_name
        tb_dirname = dir_name + "/tb_logdir" 

    print("===>> Output folder = {}".format(dir_name))
    
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    torch.manual_seed(args.seed)

    if args.cuda:
        torch.backends.cudnn.benchmark = True
        torch.cuda.manual_seed(args.seed)
    
    loaders = Cifar10Loaders()
    train_loader = loaders.train_loader()
    test_loader = loaders.test_loader()

    if args.netqat:
        base_model = cnn()
        model=cnn(qat_mode=True)

        ## Loading checkpoints here
        checkpoint = torch.load(args.load_ckpt)

        ##making sure that the checkpoint was loaded from disk correctly
        base_model.load_state_dict(checkpoint['model_state_dict'],strict=True)
        base_model_sd = base_model.state_dict()
        ##renaming the keys only to match the model with qat nodes. Only convs and BNs's will be changed
        base_model_sd_new = map_ckpt_names(base_model_sd)

        ##Updating the values of keys for qat model 
        for k in base_model_sd_new.keys():
            try:
                model.state_dict()[k].copy_(base_model_sd_new[k])
                print("{} successfully loaded".format(k))
            except:
                print("{} didnt load".format(k))

    else:
        model=cnn()

    ## Instantiate tensorboard logs
    writer = SummaryWriter(tb_dirname)
    images, labels = iter(train_loader).next()
    img_grid = torchvision.utils.make_grid(images)
    writer.add_image('cifar-10', img_grid)
    #writer.add_graph(model,images)

    if args.cuda:
        model = model.cuda()
        if args.parallel:
            model = nn.DataParallel(model,device_ids=range(torch.cuda.device_count()))

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
    best_test_accuracy=0
    print("===>> Training started")

    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
    epoch = checkpoint['epoch']
    loss = checkpoint['loss']
    print("===>>> Checkpoint loaded successfully from {} at epoch {} ".format(args.load_ckpt,epoch))

    print(model)
    for epoch in range(args.start_epoch, args.start_epoch + args.num_epochs):
        running_loss=0.0
        start=time.time()
        model.train()
        for i, data in enumerate(train_loader,0):
            inputs, labels = data

            if args.cuda:
                inputs = inputs.cuda()
                labels = labels.cuda()

            optimizer.zero_grad()
            
            outputs = model(inputs)
            loss = criterion(outputs,labels)
            loss.backward()
            optimizer.step()

            running_loss +=loss.item()
            if i %1000 == 999:
                writer.add_scalar('train_loss',running_loss/1000, epoch * len(train_loader) + i)
                writer.add_scalar('learning_rate',optimizer.param_groups[0]['lr'],epoch * len(train_loader) + i)
        
        if epoch > 0 and  epoch % args.lrdt == 0:
            print("===>> decaying learning rate at epoch {}".format(epoch))
            for param_group in optimizer.param_groups:
                param_group['lr'] = param_group['lr'] * 0.94


        running_loss /= len(train_loader)
        end = time.time()
        test_accuracy = calculate_accuracy(model,test_loader)
        writer.add_scalar('test_accuracy',test_accuracy, epoch)

        ## Adding histograms after every epoch
        for tag, value in model.named_parameters():
            tag = tag.replace('.','/')
            writer.add_histogram(tag,value.data.cpu().numpy(),epoch)

        print("Epoch: {0} | Loss: {1} | Test accuracy: {2}| Time Taken (sec): {3} ".format(epoch+1, np.around(running_loss,6), test_accuracy, np.around((end-start),4)))

        best_ckpt_filename = dir_name + "/ckpt_" + str(epoch) +'.pth'
        ##Save the best checkpoint
        if test_accuracy > best_test_accuracy:
            best_test_accuracy = test_accuracy
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'loss': running_loss,
                }, best_ckpt_filename)
    writer.close()
    print("Training finished")
Ejemplo n.º 47
0
def main():
	# parsing the input argument
	parser = argparse.ArgumentParser(\
		'determine the part of FLAT dataset to download.'
	)
	parser.add_argument(\
		'-n','--name', 
		metavar='dataset_name',
		type=str, 
		default = ['test'],
		nargs = '+',
		help='list the part of FLAT dataset to download'
	)
	parser.add_argument(\
		'-c','--category', 
		metavar='category_name',
		type=str, 
		default = 'kinect',
		help='list the hardware or category of FLAT dataset to download'
	)
	args = parser.parse_args()

	# create the local folder for the dataset
	folder = './FLAT/'
	if not os.path.exists(folder):
		os.mkdir(folder)

	# load the directory list of the flat dataset
	file_dir_name = 'file_dir.pickle'
	with open(file_dir_name, 'rb') as f:
		file_dir = pickle.load(f)

	# inidicate the hardware or trans_render one wants to download	
	flat_flg = args.category
	lists = [key for key in file_dir.keys() if (flat_flg in key)*('.txt' in key)]
	if 'all' in args.name:
		list_flgs = lists
	else:
		list_flgs = []
		for i in range(len(args.name)):
			list_flgs += [key for key in file_dir.keys() \
			if (flat_flg in key)*('.txt' in key)*(args.name[i] in key)]

	os.chdir(folder)
	lists = []

	# if one needs to download the trans_rendering file
	if flat_flg == 'trans_render':
		# download the files in the datafolder
		keys = [key for key in file_dir.keys() \
			if (key.split('/')[1] == flat_flg)
		]
		batch_download(keys, file_dir)
	else:
		# download the certain list indicated by the flg
		for i in range(len(list_flgs)):
			filename = list_flgs[i]
			if filename in file_dir.keys():
				batch_download([filename],file_dir)

				# load the file, and read stuffs
				f = open(filename,'r')
				message = f.read()
				files = message.split('\n')
				data_list = files[0:-1]

				# download the images in the list folder
				filename = filename[:-4]+'/'
				keys = [key for key in file_dir.keys() if filename in key]
				batch_download(keys, file_dir)

				# download the files in the datafolder
				keys = [key for key in file_dir.keys() \
					if (key.split('/')[-1] in data_list) \
					and (key.split('/')[1] == flat_flg)
				]
				batch_download(keys, file_dir)

	# download a trans_render model for data augmentation
	param_id = '1CEghNRT-Y_uNzFTkIUXQHaB61kXN0Kt6'
	key = './trans_render/static/'
	for i in range(1,len(key.split('/'))):
		folder = '/'.join(key.split('/')[0:i])+'/'
		if not os.path.exists(folder):
			os.mkdir(folder)
	if not os.path.isfile(folder+'1499455750460059.pickle'):
		gdd.download_file_from_google_drive(
			file_id=param_id,
			dest_path=folder+'1499455750460059.pickle',
			unzip=True,
		)


	# download the parameters
	param_id = '1qXvprK-vmS4eJJA4GimjjuqNoNPuAvpw'
	os.chdir('../')
	folder = './params/'
	if not os.path.exists(folder):
		os.mkdir(folder)
	if not os.path.isfile(folder+'params.zip'):
		gdd.download_file_from_google_drive(
			file_id=param_id,
			dest_path=folder+'params.zip',
			unzip=True,
		)

	# download the parameters
	param_id = '1gVFmJ4mXkcnjjNHfgQ_BKM4v7woMUYWa'
	os.chdir('./pipe/')
	folder = './models/'
	if not os.path.exists(folder):
		os.mkdir(folder)
	if not os.path.isfile(folder+'params.zip'):
		gdd.download_file_from_google_drive(
			file_id=param_id,
			dest_path=folder+'kinect.zip',
			unzip=True
		)
Ejemplo n.º 48
0
    "-i",
    "--brint",
    help="pass the connection of br-int to br-tun port, or use default '1' ",
    type=str)
parser.add_argument(
    "-c",
    "--control",
    help="pass the connection of br-tun to contoller, or use default '2' ",
    type=str)
parser.add_argument(
    "-n",
    "--node",
    help="pass the connection of br-int to the other node, or use default '3' ",
    type=str)  # TODO number of nodes. and port corresponding to nodes

args = parser.parse_args()  # pass the arguments to the parser
# default values for ports
server = get_ip()

if args.compute:  # parse the server adress passed
    compute = args.compute
if args.brint:
    brintport = args.brint
else:
    brintport = '1'
if args.control:
    brcontrol = args.control
else:
    brcontrol = '2'
if args.node:
    brnode = args.node
Ejemplo n.º 49
0
def argparser():
    parser = argparse.ArgumentParser()
    parser.add_argument('-F',
                        type=str,
                        required=True,
                        help='input microscopy image path')
    parser.add_argument(
        '--mode',
        type=int,
        required=True,
        help='0: single image processing;\n 1: multiple image processing')
    parser.add_argument(
        '--type',
        type=int,
        default=0,
        help='setting type: 0: common setting;\n 1: including large cells')
    parser.add_argument(
        '--choose',
        type=str,
        default='0',
        help=
        'gpu id choose for training, if you use -1 means you do not use gpu')
    parser.add_argument('--class',
                        type=int,
                        default='2',
                        help='number of classes')
    parser.add_argument('--cardinality',
                        default=32,
                        type=int,
                        help='ResNeXt cardinality')
    parser.add_argument('--batch_size',
                        type=int,
                        default=128,
                        help='batch size for training')
    parser.add_argument('-M',
                        type=str,
                        default="best_model/ema_best.pth.tar",
                        help='trained model path of OC_Finder'
                        )  # File path for our MAINMAST code
    parser.add_argument('--width',
                        type=int,
                        default=50,
                        help="Width of classification image")
    parser.add_argument('--height',
                        type=int,
                        default=50,
                        help="Height of classification image")
    parser.add_argument('--num_workers',
                        type=int,
                        default=4,
                        help="number of workers for the dataloader")
    parser.add_argument(
        '--resize',
        default=0,
        type=int,
        help=
        "if we need resize the input microscopy image or not. default: 0(no resizing)"
    )
    parser.add_argument(
        '--resize_height',
        default=200,
        type=int,
        help="The resized image height used for the segmentation")
    parser.add_argument(
        '--resize_width',
        default=200,
        type=int,
        help="The resized image width used for the segmentation")
    parser.add_argument(
        '--filter_size',
        default=3,
        type=int,
        help=
        "user can adjust their own filter size to have different segmentation results. default:3"
    )
    parser.add_argument(
        '--threshold',
        default=195,
        type=int,
        help=
        "Threshold used to do image segmentation (Suggested 150-210 for big cell cases)"
    )
    parser.add_argument(
        '--remove_pixel',
        default=500,
        type=int,
        help="number of pixels to remove small segmented regions. default: 500"
    )
    args = parser.parse_args()
    params = vars(args)
    return params
Ejemplo n.º 50
0
def main():
    import os
    import json
    import pickle
    import re
    from optparse import OptionParser

    parser = OptionParser()
    parser.add_option('-q',
                      '--ip-api',
                      dest='ip_api',
                      default='http://127.0.0.1:8080',
                      help='HTTP API to retrieve IP info.')
    parser.add_option('-f',
                      '--file',
                      default='route.pickle',
                      help='Use the pickled routes')
    parser.add_option('-u',
                      '--update',
                      action="store_true",
                      default=False,
                      help='Update routes')
    parser.add_option('--source-network',
                      dest='source_network',
                      action='append',
                      help='Regex of source network pattern')
    parser.add_option('--target-network',
                      dest='target_network',
                      action='append',
                      help='Regex of target network pattern')

    (options, args) = parser.parse_args()

    logging.basicConfig(
        level=logging.INFO,
        format=
        '[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d] %(message)s')

    if not args:
        import explore
        args = explore.targets

    ip_parser = lambda ip: parse_ip(options.ip_api, ip)

    source_network = []
    for pattern in options.source_network:
        logging.info('Using source pattern: ' + pattern)
        source_network.append(re.compile(pattern, re.IGNORECASE))

    target_network = []
    for pattern in options.target_network:
        logging.info('Using target pattern: ' + pattern)
        target_network.append(re.compile(pattern, re.IGNORECASE))

    routes = []
    if not options.update:
        try:
            routes = pickle.load(open(options.file, 'rb'))
        except Exception as e:
            logging.error(e, exc_info=True)

    if not routes:
        logging.info("Fetching routes.")
        for ip in args:
            for route in get_routes(ip, ip_parser):
                routes.append(route)

        logging.info('Dumping routes into {}'.format(options.file))
        tmp_filename = options.file + '.tmp'
        tmp = open(tmp_filename, 'wb')
        pickle.dump(routes, tmp, protocol=2)
        os.rename(tmp_filename, options.file)

    geo_json = GeoJSON()
    for route in routes:
        if not match(source_network, route[1]) or not match(
                target_network, route[3]):
            continue

        geo_json.add_route(*route)

    print(json.dumps(geo_json.to_object()))
Ejemplo n.º 51
0
def main():
    global mongod_executable, mongod_port, shell_executable, continue_on_failure, small_oplog, no_journal, set_parameters, no_preallocj, auth, keyFile, smoke_db_prefix, test_path
    parser = OptionParser(usage="usage: smoke.py [OPTIONS] ARGS*")
    parser.add_option('--mode', dest='mode', default='suite',
                      help='If "files", ARGS are filenames; if "suite", ARGS are sets of tests (%default)')
    # Some of our tests hard-code pathnames e.g., to execute, so until
    # that changes we don't have the freedom to run from anyplace.
    # parser.add_option('--mongo-repo', dest='mongo_repo', default=None,
    parser.add_option('--test-path', dest='test_path', default=None,
                      help="Path to the test executables to run, "
                      "currently only used for 'client' (%default)")
    parser.add_option('--mongod', dest='mongod_executable', default=os.path.join(mongo_repo, 'mongod'),
                      help='Path to mongod to run (%default)')
    parser.add_option('--port', dest='mongod_port', default="27999",
                      help='Port the mongod will bind to (%default)')
    parser.add_option('--mongo', dest='shell_executable', default=os.path.join(mongo_repo, 'mongo'),
                      help='Path to mongo, for .js test files (%default)')
    parser.add_option('--continue-on-failure', dest='continue_on_failure',
                      action="store_true", default=False,
                      help='If supplied, continue testing even after a test fails')
    parser.add_option('--from-file', dest='File',
                      help="Run tests/suites named in FILE, one test per line, '-' means stdin")
    parser.add_option('--smoke-db-prefix', dest='smoke_db_prefix', default=smoke_db_prefix,
                      help="Prefix to use for the mongods' dbpaths ('%default')")
    parser.add_option('--small-oplog', dest='small_oplog', default=False,
                      action="store_true",
                      help='Run tests with master/slave replication & use a small oplog')
    parser.add_option('--small-oplog-rs', dest='small_oplog_rs', default=False,
                      action="store_true",
                      help='Run tests with replica set replication & use a small oplog')
    parser.add_option('--nojournal', dest='no_journal', default=False,
                      action="store_true",
                      help='Do not turn on journaling in tests')
    parser.add_option('--nopreallocj', dest='no_preallocj', default=False,
                      action="store_true",
                      help='Do not preallocate journal files in tests')
    parser.add_option('--auth', dest='auth', default=False,
                      action="store_true",
                      help='Run standalone mongods in tests with authentication enabled')
    parser.add_option('--authMechanism', dest='authMechanism', default='MONGODB-CR',
                      help='Use the given authentication mechanism, when --auth is used.')
    parser.add_option('--keyFile', dest='keyFile', default=None,
                      help='Path to keyFile to use to run replSet and sharding tests with authentication enabled')
    parser.add_option('--ignore', dest='ignore_files', default=None,
                      help='Pattern of files to ignore in tests')
    parser.add_option('--only-old-fails', dest='only_old_fails', default=False,
                      action="store_true",
                      help='Check the failfile and only run all tests that failed last time')
    parser.add_option('--reset-old-fails', dest='reset_old_fails', default=False,
                      action="store_true",
                      help='Clear the failfile. Do this if all tests pass')
    parser.add_option('--with-cleanbb', dest='with_cleanbb', default=False,
                      action="store_true",
                      help='Clear database files from previous smoke.py runs')
    parser.add_option('--dont-start-mongod', dest='start_mongod', default=True,
                      action='store_false',
                      help='Do not start mongod before commencing test running')
    parser.add_option('--use-ssl', dest='use_ssl', default=False,
                      action='store_true',
                      help='Run mongo shell and mongod instances with SSL encryption')
    parser.add_option('--set-parameters', dest='set_parameters', default="",
                      help='Adds --setParameter for each passed in items in the csv list - ex. "param1=1,param2=foo" ')
    # Buildlogger invocation from command line
    parser.add_option('--buildlogger-builder', dest='buildlogger_builder', default=None,
                      action="store", help='Set the "builder name" for buildlogger')
    parser.add_option('--buildlogger-buildnum', dest='buildlogger_buildnum', default=None,
                      action="store", help='Set the "build number" for buildlogger')
    parser.add_option('--buildlogger-credentials', dest='buildlogger_credentials', default=None,
                      action="store", help='Path to Python file containing buildlogger credentials')
    parser.add_option('--buildlogger-phase', dest='buildlogger_phase', default=None,
                      action="store", help='Set the "phase" for buildlogger (e.g. "core", "auth") for display in the webapp (optional)')
    parser.add_option('--report-file', dest='report_file', default=None,
                      action='store',
                      help='Path to generate detailed json report containing all test details')


    global tests
    (options, tests) = parser.parse_args()

    set_globals(options, tests)

    buildlogger_opts = (options.buildlogger_builder, options.buildlogger_buildnum, options.buildlogger_credentials)
    if all(buildlogger_opts):
        os.environ['MONGO_USE_BUILDLOGGER'] = 'true'
        os.environ['MONGO_BUILDER_NAME'] = options.buildlogger_builder
        os.environ['MONGO_BUILD_NUMBER'] = options.buildlogger_buildnum
        os.environ['BUILDLOGGER_CREDENTIALS'] = options.buildlogger_credentials
        if options.buildlogger_phase:
            os.environ['MONGO_PHASE'] = options.buildlogger_phase
    elif any(buildlogger_opts):
        # some but not all of the required options were sete
        raise Exception("you must set all of --buildlogger-builder, --buildlogger-buildnum, --buildlogger-credentials")

    if options.File:
        if options.File == '-':
            tests = sys.stdin.readlines()
        else:
            f = open(options.File)
            tests = f.readlines()
    tests = [t.rstrip('\n') for t in tests]

    if options.only_old_fails:
        run_old_fails()
        return
    elif options.reset_old_fails:
        clear_failfile()
        return

    # If we're in suite mode, tests is a list of names of sets of tests.
    if options.mode == 'suite':
        tests = expand_suites(tests)
    elif options.mode == 'files':
        tests = [(os.path.abspath(test), start_mongod) for test in tests]

    if options.ignore_files != None :
        ignore_patt = re.compile( options.ignore_files )
        print "Ignoring files with pattern: ", ignore_patt

        def ignore_test( test ):
            if ignore_patt.search( test[0] ) != None:
                print "Ignoring test ", test[0]
                return False
            else:
                return True

        tests = filter( ignore_test, tests )

    if not tests:
        print "warning: no tests specified"
        return

    if options.with_cleanbb:
        dbroot = os.path.join(options.smoke_db_prefix, 'data', 'db')
        call([utils.find_python(), "buildscripts/cleanbb.py", "--nokill", dbroot])

    test_report["start"] = time.time()
    try:
        run_tests(tests)
    finally:
        add_to_failfile(fails, options)

        test_report["end"] = time.time()
        test_report["elapsed"] = test_report["end"] - test_report["start"]
        test_report["failures"] = len(losers.keys())
        if report_file:
            f = open( report_file, "wb" )
            f.write( json.dumps( test_report ) )
            f.close()

        report()
Ejemplo n.º 52
0
# -*- coding: utf-8 -*-
from crawler import Crawlerfrom cmdParser import parser

def main(): '''    The program's main entrance, this method will create an instance of    class Crawler. '''
 try:        args = parser.parse_args()        crawler = Crawler(args)        crawler.entry() except KeyboardInterrupt: print 'Process aborted by user.'
if __name__ == '__main__':    main()
Ejemplo n.º 53
0
Archivo: ars.py Proyecto: zhan0903/ARS
if __name__ == '__main__':
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--env_name', type=str, default='HalfCheetah-v1')
    parser.add_argument('--n_iter', '-n', type=int, default=1000)
    parser.add_argument('--n_directions', '-nd', type=int, default=8)
    parser.add_argument('--deltas_used', '-du', type=int, default=8)
    parser.add_argument('--step_size', '-s', type=float, default=0.02)
    parser.add_argument('--delta_std', '-std', type=float, default=.03)
    parser.add_argument('--n_workers', '-e', type=int, default=18)
    parser.add_argument('--rollout_length', '-r', type=int, default=1000)

    # for Swimmer-v1 and HalfCheetah-v1 use shift = 0
    # for Hopper-v1, Walker2d-v1, and Ant-v1 use shift = 1
    # for Humanoid-v1 used shift = 5
    parser.add_argument('--shift', type=float, default=0)
    parser.add_argument('--seed', type=int, default=237)
    parser.add_argument('--policy_type', type=str, default='linear')
    parser.add_argument('--dir_path', type=str, default='data')

    # for ARS V1 use filter = 'NoFilter'
    parser.add_argument('--filter', type=str, default='MeanStdFilter')

    local_ip = socket.gethostbyname(socket.gethostname())
    ray.init(redis_address= local_ip + ':6379')
    
    args = parser.parse_args()
    params = vars(args)
    run_ars(params)

Ejemplo n.º 54
0
import os
from pyspark.sql import SparkSession
from pyspark.ml.recommendation import ALS
from pyspark.ml.evaluation import RegressionEvaluator
from pyspark.sql.types import *
import time
import parser

def string_to_list(x):
    x = x.replace(' ', '').split(',')
    return [float(y) for y in x]


parser = parser.build_parser()
(options, args) = parser.parse_args()

APP_DIRECTORY = options.app_directory
SAMPLE_PERCENT = options.sample_percent
DATA_DIRECTORY = os.path.join(APP_DIRECTORY, 'data')
MODELS_DIRECTORY = os.path.join(APP_DIRECTORY, 'models')

RANKS = string_to_list(options.ranks)
MAX_ITERS = string_to_list(options.max_iters) 
REG_PARAMS = string_to_list(options.reg_params)

spark = SparkSession.builder.getOrCreate()

interaction_schema = StructType(
    [
        StructField('user_id', IntegerType(), True),
        StructField('book_id', IntegerType(), True),