Exemple #1
0
def run_main():
    global configfile
    global maintenance_job, legacy_sync_job
    global main_xmlrpc_handler, stats_resource, web_root, web_static

    parser = argparse.ArgumentParser(description="DenyHosts sync server")
    parser.add_argument("-c", "--config", default="/etc/denyhosts-server.conf", help="Configuration file")
    parser.add_argument("--recreate-database", action='store_true', help="Wipe and recreate the database")
    parser.add_argument("--evolve-database", action='store_true', help="Evolve the database to the latest schema version")
    parser.add_argument("--purge-legacy-addresses", action='store_true',
       help="Purge all hosts downloaded from the legacy server. DO NOT USE WHEN DENYHOSTS-SERVER IS RUNNING!")
    parser.add_argument("--purge-reported-addresses", action='store_true',
        help="Purge all hosts that have been reported by clients. DO NOT USE WHEN DENYHOSTS-SERVER IS RUNNING!")
    parser.add_argument("--purge-ip", action='store',
        help="Purge ip address from both legacy and reported host lists. DO NOT USE WHEN DENYHOSTS-SERVER IS RUNNING!")
    parser.add_argument("--check-peers", action="store_true", 
        help="Check if all peers are responsive, and if they agree about the peer list")
    parser.add_argument("--bootstrap-from-peer", action="store", metavar="PEER_URL",
        help="First wipe database and then bootstrap database from peer. DO NOT USE WHEN DENYHOSTS-SERVER IS RUNNING!")
    parser.add_argument("-f", "--force", action='store_true',
        help="Do not ask for confirmation, execute action immediately")
    args = parser.parse_args()

    configfile = args.config

    try:
        config.read_config(args.config)
    except ConfigParser.NoSectionError, e:
        print("Error in reading the configuration file from \"{}\": {}.".format(args.config, e))
        print("Please review the configuration file. Look at the supplied denyhosts-server.conf.example for more information.")
        sys.exit()
    def do_set(self, args):
        """
        Set config for cloudmonkey. For example, options can be:
        url, auth, log_file, history_file
        You may also edit your ~/.cloudmonkey_config instead of using set.

        Example:
        set url http://localhost:8080/client/api
        set prompt 🐵 cloudmonkey>
        set log_file /var/log/cloudmonkey.log
        """
        args = args.strip().partition(" ")
        key, value = (args[0].strip(), args[2].strip())
        if not key:
            return
        if not value:
            print "Blank value of %s is not allowed" % key
            return

        self.prompt = self.get_prompt()
        setattr(self, key, value)
        if key in ['host', 'port', 'path', 'protocol']:
            key = 'url'
            self.url = "%s://%s:%s%s" % (self.protocol, self.host,
                                         self.port, self.path)
            print "This option has been deprecated, please set 'url' instead"
            print "This server url will be used:", self.url
        write_config(self.get_attr, self.config_file)
        read_config(self.get_attr, self.set_attr, self.config_file)
        self.init_credential_store()
        if key.strip() == 'profile':
            print "\nLoaded server profile '%s' with options:" % value
            for option in default_profile.keys():
                print "    %s = %s" % (option, self.get_attr(option))
            print
Exemple #3
0
def test():
    import cairo
    surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,100,100)
    context = cairo.Context(surface)
    config.read_config(path("/home/jose/.astronex"))
    zod = Zodiac(context)
    for z in zod.zod: 
        print z.__dict__
    print
    for z in zod.plan:
        print z.__dict__
def install_nifi():

	logger.info('Attempting to install NiFi to the cluster')
	if not is_ambari_installed():
		logger.error('Ambari must be installed to install NiFi as well.')
		raise EnvironmentError('You must install the demo on the same node as the Ambari server. Install Ambari here or move to another node with Ambari installed before continuing')
	
	
	if not is_hdp_select_installed():
		installed = install_hdp_select()
		if not installed:
			logger.error('hdp-select must be installed to install NiFi')
			raise EnvironmentError('hdp-select could not be installed. Please install it manually and then re-run the setup.')

	conf = config.read_config('service-installer.conf')
	cmds = json.loads(conf['NIFI']['install-commands'])
	
	sh = Shell()
	logger.info('Getting HDP Version')
	version = sh.run(cmds[0])
	logger.info('HDP Version: ' + version[0])
	fixed_copy = cmds[2].replace('$VERSION', str(version[0])).replace('\n', '')
	fixed_remove = cmds[1].replace('$VERSION', str(version[0])).replace('\n', '')
	logger.info('NiFi Clean Command: ' + fixed_copy)
	logger.info('NiFi Copy Command: ' + fixed_remove)
	remove = sh.run(fixed_remove)
	copy = sh.run(fixed_copy)
	logger.info('Attempting to restart Ambari...')
	restart = sh.run(cmds[3])

	print("Please open the Ambari Interface and manually deploy the NiFi Service.")
	raw_input("Press enter twice to continue...")
	raw_input("Press enter once to continue...")
	
#	 We've copied the necessary files. Once that completes we need to add it to Ambari
	logger.info('Waiting for user to install service in Ambari to continue')
	print('Checking to make sure service is installed')
	ambari = config.read_config('global.conf')['AMBARI']
	installed = check_ambari_service_installed('NIFI', ambari)
	logger.info('NiFi installed successfully')
	cont = ''
	if not installed:
		print('Unable to contact Ambari Server. Unsure whether or not Zeppelin was installed')
		while not (cont == 'y' or cont == 'n'):
			cont = raw_input('Continue attempt to set up NiFi for demo?(y/n)')
			if not (cont == 'y' or cont == 'n'):
				print('Please enter "y" or "n"')
	else:
		cont = 'y'
	
	if cont == 'n':
		return False
	elif cont == 'y':
		return True
Exemple #5
0
def general(sqlsession):
    active = 'settings'

    devices = sqlsession.query(models.Device).all()
    return render_template(
        'settings_general.html',
        active=active,
        category='general',
        flash_device=config.read_config('flash_device'),
        default_door_device=config.read_config('default_door_device'),
        devices=devices,
        semester_end_dates=config.read_config('semester_end_dates')
    )
Exemple #6
0
def slice(jobinfo):
        # this will be replaced with proper error checking code later
        assert(jobinfo.status == 'slicing')
        conf = config.read_config()
        
        # read and store config information
        # this allows us to have different version of slicer stored in different places
        slice_command = conf['Slicer']['Path']
        slice_ini = conf['Slicer']['Config']
        
        # get the last file worked on (validated file)
        infile = jobinfo.previous_file()

        # CK - Modified to work on my linux VM - passes
	# Infile OK as is, must do some work for correct outfile
	# Remove original file extension and add .gcode extension
	# AW - fixed to work with file paths that contain periods
        # put together the input and output filenames
        root, ext = os.path.splitext(infile)
        outfile = root+'.gcode'

        # construct the actual command line call
	# CK - Removed quotes around infile and outfile names
        command = '{0} {1} --output {2} --load {3}'.format(slice_command, infile, outfile, slice_ini)
        
        # execute the command line call.
        # output contains anything written to stdout by the program
	# CK - Uncommented line
        output = os.popen(command).read()
        
        # this is a debugging message. remove on release
        slice_logger.log("$ "+command)
        
        # store the newly created file into the printjob
        jobinfo['slicing'] = outfile
def authorize_token(user):
    config = read_config()
    if config.get('GitHub', 'token'):
        print("The token already exists.")
        sys.exit()

    password = getpass('Password for {0}: '.format(user))

    note = 'OCA (odoo community association) Maintainers Tools'
    note_url = 'https://github.com/OCA/maintainers-tools'
    scopes = ['repo', 'read:org', 'write:org', 'admin:org']

    try:
        auth = github3.authorize(user, password, scopes, note, note_url)
    except github3.GitHubError as err:
        if err.code == 422:
            for error in err.errors:
                if error['code'] == 'already_exists':
                    msg = ("The 'OCA (odoo community association) Maintainers "
                           "Tools' token already exists. You will find it at "
                           "https://github.com/settings/applications and can "
                           "revoke it or set the token manually in the "
                           "configuration file.")
                    sys.exit(msg)
        raise

    config.set("GitHub", "token", auth.token)
    write_config(config)
    print("Token stored in configuration file")
Exemple #8
0
 def head(self):
     if not self.config:
         self.configure(config.read_config())
     if not self.blockchain or 'HEAD' not in self.blockchain:
         self._initialize_blockchain()
     ptr = self.blockchain.get('HEAD')
     return blocks.get_block(self.blockchain, ptr)
Exemple #9
0
def print_tabulate(res, offering_type="compute", noheader=False, short=False):
    config = read_config()
    c = conn(config)
    tbl = []
    for i in res:
        if short:
            tbl.append([i.name])
        else:
            if offering_type == "compute":
                tbl.append([
                    i.name,
                    i.displaytext,
                    i.storagetype,
                    ])
            elif offering_type == "network":
                tbl.append([
                    i.name,
                    i.displaytext,
                    "%s Mbit/s"% i.networkrate,
                    ])

    tbl = sorted(tbl, key=operator.itemgetter(0))
    if (noheader or short):
        print tabulate(tbl, tablefmt="plain")
    else:
        if offering_type == 'compute':
            tbl.insert(0, ['name', 'description', 'storagetype'])
        else:
            tbl.insert(0, ['name', 'description', 'external networkrate'])

        print tabulate(tbl, headers="firstrow")
def post_notebook(notebook_path, session_cookie=''):
  '''Add a single notebook to a Zeppelin installation via REST API
  
  Must have a file called ``global.conf`` inside of the configuration directory.
  
  Inside that configuration file we use protocol://server:port to connect to a Zeppelin instance and use the API
  
  Args:
    notebook_path (str): Full file path to the Zeppelin note
    session_cookie (str, optional): The cookie used after authentication in order to make authorized API calls. Required on the 2.5 TP Sandbox
    
  Returns:
    bool: True if the upload was successful (received 201 created), or False otherwise.
  
  '''
  conf = config.read_config('global.conf')['ZEPPELIN']
  client = CurlClient(proto=conf['protocol'], server=conf['server'], port=int(conf['port']))
  path = '/api/notebook'
  
  logger.info('Attempting to POST notebook at ' + client.proto + '://' + client.server + ':' + str(client.port))
  opts = '-i -b \'' + session_cookie + '\' -H "Content-Type: application/json" -d @' + notebook_path
  output = client.make_request('POST', path, options=opts)
  if '201 created' in output[0].lower():
    logger.info('Note posted successfully')
    return True
  else:
    logger.info('Note failed to be added. User will need to add manually')
    return False
Exemple #11
0
def main(nzb_files, options):
    if not nzb_files:
        parser.print_help()
        return

    settings = config.read_config(options.config_file)
    if not settings:
        print '%s has no settings!' % options.config_file
        return

    if options.debug:
        settings['debug'] = options.debug
        print settings

    if options.pattern:
        settings['skip_regex'] = options.pattern
        settings['invert'] = True
    elif options.par2:
        settings['skip_regex'] = ['\.par2']
        settings['invert'] = True

    nzbs = [nzb for nzb in helper.get_nzb_file(nzb_files)]
    for nzb in nzbs:
        nzb_name = os.path.split(nzb)[1]
        new_dir = helper.get_download_path(settings.get('download_dir'), nzb_name)
        settings['download_path'] = new_dir
        if not os.path.exists(new_dir):
            if settings.get('debug'):
                print 'made new dir %s ' % new_dir
            os.mkdir(new_dir)

        if settings.get('debug'):
            print settings['download_path']

        download.start(nzb, settings)
def add_zeppelin_notebooks():
  '''Add all Zeppelin notes to the current Zeppelin instalation
  
  Looks inside ``configuration/zeppelin/notes`` for any ``*.json`` files.
  
  Each JSON file will be attempted to be added to the current Zeppelin instance
  
  Returns:
    bool: True is every Note was added successfully. True is returned on an empty directory. False otherwise.
  
  
  '''
  logger.info('Adding zeppelin notebooks to installation')
  all_success = True
  note_dir = config.get_conf_dir() + 'zeppelin/notes'
  
  # First need to authenticate to Zeppelin and retrieve JSESSIONID
  conf = config.read_config('global.conf')['ZEPPELIN']
  session = get_zeppelin_session(conf['username'], conf['password'])
  
  for item in os.listdir(note_dir):
    logger.debug('Found item in zeppelin/notes: ' + item)
    item_path = note_dir + '/' + item
    if os.path.isfile(item_path) and str(item).endswith('.json'):
      logger.info('POSTing ' + item + ' to Zeppelin')
      result = post_notebook(item_path, session)
      if not result:
        logger.error('Not all notebooks were added to Zeppelin successfully')
        all_success = False
  return all_success
Exemple #13
0
def print_tabulate(res, noheader=False, short=False):
    config = read_config()
    c = conn(config)
    tbl = []
    for i in res:
        if not i.__dict__.has_key("associatednetworkname"):
            if i.__dict__.has_key("vpcid"):
                i.__setattr__("associatednetworkname", "%s (VPC)" % c.list_vpcs(id=i.vpcid)[0].name)
        if i.isstaticnat:
            vmname = i.virtualmachinename
        else:
            vmname = ""
        if i.issourcenat:
            snat = "Yes"
        else:
            snat = "No"
        if short:
            tbl.append([i.name])
        else:
            tbl.append([i.ipaddress, i.zonename, i.associatednetworkname, vmname, snat, i.allocated])

    tbl = sorted(tbl, key=operator.itemgetter(2))
    if noheader or short:
        print tabulate(tbl, tablefmt="plain")
    else:
        tbl.insert(0, ["ip", "zone", "network", "staticnat", "sourcenat", "allocated"])
        print tabulate(tbl, headers="firstrow")
Exemple #14
0
def get_driver():
    driver = config.read_config('driver')
    if driver == 'chrome':
        browser = webdriver.Chrome(config.read_config('chromedriver_path'))
        browser.maximize_window()
        browser.implicitly_wait(30)
    elif driver == 'android':
        desired_capabilities = {'aut': config.read_config('android_app_id')}
        return webdriver.Remote(
            desired_capabilities=desired_capabilities
        )
    else:
        browser = webdriver.Firefox()
        browser.maximize_window()
        browser.implicitly_wait(30)
    return browser
Exemple #15
0
def print_tabulate(res, noheader=False, short=False, t='list'):
    config = read_config()
    c = conn(config)
    tbl = []
    for i in res:
        if t == 'list':
            if not i.__dict__.has_key('virtualmachinename'):
                i.__setattr__('virtualmachinename', 'None')
            if short:
                tbl.append([i.id])
            else:
                tbl.append([
                    i.ipaddress,
                    c.list_networks(id=i.networkid)[0].name,
                    i.publicport,
                    i.privateport,
                    i.protocol,
                    i.virtualmachinename,
                    i.state,
                    i.id,
                ])

    tbl = sorted(tbl, key=operator.itemgetter(0))
    if (noheader or short):
        print tabulate(tbl, tablefmt="plain")
    else:
        tbl.insert(0, ['ip', 'network', 'public port', 'private port', 'proto',
            'vmname', 'state', 'uuid'])
        print tabulate(tbl, headers="firstrow")
Exemple #16
0
def print_tabulate(res, noheader=False, short=False):
    config = read_config()
    c = conn(config)
    tbl = []
    for i in res:
        if not i.__dict__.has_key('passwordenabled'):
            i.__setattr__('passwordenabled', 0)
        if not i.__dict__.has_key('created'):
            i.__setattr__('created', '')
        if i.passwordenabled == 1:
            passw = "Yes"
        else:
            passw = "No"
        if short:
            tbl.append(["%s/%s" % (i.account, i.name)])
        else:
            tbl.append([
                "%s/%s" % (i.account, i.name),
                i.zonename,
                i.ostypename,
                i.created,
                passw,
                ])

    tbl = sorted(tbl, key=operator.itemgetter(0))
    if (noheader or short):
        print tabulate(tbl, tablefmt="plain")
    else:
        tbl.insert(0, ['name', 'zone', 'ostype', 'created', 'passwordenabled'])
        print tabulate(tbl, headers="firstrow")
def get_zeppelin_session(username, password):
  '''Gets the JSESSIONID cookie by POSTing to /api/login
  
  This is required to retrieve the JSESSIONID on Zeppelin instances that have logins and user permissions. Used when POSTing notebooks as well.
  
  Returned in the form of ``'JSESSIONID=**``
  
  Args:
    username (str): The username to login with
    password (str): The password to login with
    
  Returns:
    str: ``''`` If the request was unsuccessful, or the cookie wasn't present. Otherwise returns the cookie as a string.
  '''
  conf = config.read_config('global.conf')['ZEPPELIN']
  client = CurlClient(proto=conf['protocol'], server=conf['server'], port=int(conf['port']))
  path = '/api/login'
  opts = '-i -d \'userName='******'&password='******'\''
  res = client.make_request('POST', path, options=opts)
  lines = res[0].split('\n')
  for line in lines:
    if 'Set-Cookie:'.lower() in line.lower() and 'JSESSIONID' in line.upper():
      cookie = line.split('Set-Cookie: ')[1]
      print 'COOKIE: ' + cookie
      return cookie.strip()
  return ''
Exemple #18
0
def env(environ, content):
    "show the environment (debugging/tracing)"
    configpath = config.get_configpath()
    ctimef = config.get_modified_configpath_fs(configpath)
    sctimef = config.fmt_ts(ctimef) if ctimef else "None"
    ctimeu = config.get_modified_configpath_db(configpath)
    sctimeu = config.fmt_ts(ctimeu) if ctimeu else "never"
    content.append(u"""<table>
    <tr><td>%s</td><td>%s</td></tr>
    <tr><td>%s</td><td>%s</td></tr>
    <tr><td>%s</td><td>%s</td></tr>
    </table>""" % ("configpath", configpath,
                   "modified", sctimef,
                   "modified of read", sctimeu))
    content.append(u"<br /><strong>contents of %s </strong><br />" % configpath)
    for entry in config.read_config(configpath):
        content.append(str(entry) + "<br />")
    content.append(u"<br /> <br /><strong>environ</strong>")
    # the most important ones first
    important = ("wsgi.multithread", "wsgi.multiprocess")
    for key in important:
        content.append(_envvar(environ, key))
    # everything else
    for key in environ:
        if key not in important:
            content.append(_envvar(environ, key))
Exemple #19
0
def main():
	# read the config file. We'll need this later.
	# also caches the file for stages
	configrc = config.read_config()
	
	#load the logger
	plogger = logger.Logger('pipeline')
	
	if len(sys.argv) == 1:
		plogger.log("Error: no model given to pipeline")
		return
	infile = sys.argv[1]
	
	job = pipeline.PrintJob(infile, "*****@*****.**")

	job.status = 'converting'
	converter.convert(job)
	
	#replaced by slic3r automatic validation
	#job.status = 'validating'
	#validator.validate(job)
	
	job.status = 'slicing'	
	slicer.slice(job)
	
	job.status = 'printing'
	printer.send_job(job)
Exemple #20
0
def copy_inputs(config_file, InputsDir):

    config_dict = read_config(config_file)

    config = SafeConfigParser()
    config.optionxform = str
    config.read(config_file)

    new_config = os.path.join(InputsDir, os.path.split(config_file)[1])

    # ---------------------------------------------------------------- #
    # copy the inputs
    for key, section in config_dict.iteritems():
        if 'FILE_NAME' in section.keys():
            new_file_name = os.path.join(InputsDir,
                                         os.path.split(section['FILE_NAME'])[1])

            copyfile(section['FILE_NAME'], new_file_name)

            # update the config file for an easy restart
            config.set(key, 'FILE_NAME',
                       os.path.join(InputsDir,
                                    os.path.split(section['FILE_NAME'])[1]))

            # update the config_dict with the new value
            config_dict[key]['FILE_NAME'] = new_file_name
    # ---------------------------------------------------------------- #

    # ---------------------------------------------------------------- #
    # write the new configuration file
    with open(new_config, 'w') as configfile:
        config.write(configfile)
    # ---------------------------------------------------------------- #

    return config_dict
Exemple #21
0
def convert(jobinfo):
	# this should be replaced by error handling code later
	assert(jobinfo.status == 'converting')
	
	#read  config file information
	conf = config.read_config()
	conv_command = conf['Converter']['Path']
	
	# determine input and output filenames
	infile = jobinfo.previous_file()
	root, ext = os.path.splitext(infile)
	if ext not in conf['Mailfetch']['extensions']:
		conv_logger.error(jobinfo, 'File is in an unsupported format {0}'.format(ext))
	
	outfile = None
	# don't bother converting if you already have an stl
	if ext != '.stl':
		outfile = root+'.stl'
		
		# run command
		command = '{0} {1} -c stl -o {2}.stl'.format(conv_command, infile, root)
		
		#output = os.popen(command).read()
		conv_logger.log("$ "+command)
	else:
		conv_logger.log("$ Converter: {0} is already an stl file".format(infile))
		
	# if you did not convert, this is set to None
	# this will simply tell the pipeline that no file was created in this stage
	jobinfo['converting'] = outfile
def decrypt_config(path="./config"):
    """
    Decrypt all json.encrypt files to .json files
    """
    title_screen()
    password = utils.get_master_password()
    
    print "Processing folder `%s`" % path
     
    print(green("Decrypting..."))
    
    for root, dirs, files in os.walk(path):
        for file in files:
            if file.endswith(".encrypt"):
                encrypt_file_path = os.path.join(root, file)
                with open(encrypt_file_path) as ciphertext:
                    file_path = encrypt_file_path[:-8]
                    try:
                        config_data = config.read_config(file_path)
                        
                        config.write_config(file_path,
                                            config_data)
                        print(green("File `%s` decrypted." % encrypt_file_path))
                        os.remove(encrypt_file_path)
                    except:
                        print(red("Cannot decrypt file... `%s` skipping..." % encrypt_file_path))
Exemple #23
0
 def __init__(self, logfilepath = "%s/var/log/certmaster/certmaster.log" % conf_dir):
     config_file = '%s/etc/certmaster/minion.conf' % conf_dir
     self.config = read_config(config_file, CMConfig)
     self.loglevel = logging._levelNames[self.config.log_level]
     self._setup_logging()
     if self._no_handlers:
         self._setup_handlers(logfilepath=logfilepath)
Exemple #24
0
def main():
    config = read_config()
    c = conn(config)
    args = docopt(__doc__, version='%s %s' % (__cli_name__, __version__))
    if args['list']:
        res = c.list_sshkeypairs()
    if args['generate']:
        try:
            res = c.create_sshkeypair(name=args['NAME'])
            print res.privatekey
            sys.exit(0)
        except molnctrl.csexceptions.ResponseError as e:
            print "Unable to create ssh key: %s" % e[1]
            sys.exit(1)
    if args['delete']:
        try:
            res = c.delete_sshkeypair(name=args['NAME'])
            print "deleted SSH key: %s" % args['NAME']
            sys.exit(0)
        except Exception as e:
            print "Unable to delete SSK keypair: %s" % e[1]
            sys.exit(1)
    if res:
        print_tabulate(res, noheader=args['--noheader'], short=args['--short'])
    else:
        print "Unable to execute command"
        sys.exit(1)
Exemple #25
0
def print_tabulate(res, noheader=False, short=False):
    config = read_config()
    c = conn(config)
    tbl = []
    for i in res:
        if not i.__dict__.has_key('networkofferingname'):
            i.__setattr__('networkofferingname', 'VPC')
        if short:
            tbl.append([i.name])
        else:
            tbl.append([
                i.name,
                i.zonename,
                i.networkofferingname,
                i.cidr,
                i.networkdomain,
                get_snat(c, i.id),
                has_vpn(c, i.id),
                i.state
                ])

    tbl = sorted(tbl, key=operator.itemgetter(0))
    if (noheader or short):
        print tabulate(tbl, tablefmt="plain")
    else:
        tbl.insert(0, ['name', 'zone', 'offering', 'cidr', 'domain', 'snat', 'vpn', 'state'])
        print tabulate(tbl, headers="firstrow")
Exemple #26
0
def print_tabulate(res, noheader=False, short=False, t="vpn"):
    config = read_config()
    c = conn(config)
    tbl = []
    for i in res:
        if t == 'vpn':
            if short:
                tbl.append([i.publicip])
            else:
                tbl.append([
                    get_network_for_ip(c, i.publicipid),
                    i.publicip,
                    i.presharedkey,
                    i.state,
                    ])
        if t == 'vpnusers':
            if short:
                tbl.append([i.username])
            else:
                tbl.append([
                    i.username,
                    i.state,
                    ])
    
    tbl = sorted(tbl, key=operator.itemgetter(0))
    if (noheader or short):
        print tabulate(tbl, tablefmt="plain")
    else:
        if t == 'vpn':
            tbl.insert(0, ['network', 'endpoint', 'preshared key', 'state'])
        elif t == 'vpnuser':
            tbl.insert(0, ['username', 'state'])
        print tabulate(tbl, headers="firstrow")
Exemple #27
0
def before_feature(context, scenario):
    driver = config.read_config('driver')
    if driver == 'android':
        pass
    else:
        context.execute_steps(u'''
            given login to eTools
        ''')
Exemple #28
0
def after_feature(context, scenario):
    driver = config.read_config('driver')
    if driver == 'android':
        pass
    else:
        context.execute_steps(u'''
            given logout from eTools
        ''')
Exemple #29
0
def before_all(context):
    setup_debug_on_error(context.config.userdata)

    context.browser = get_driver()
    context.util = Utils(context.browser)
    context.base_url = config.read_config('baseurl')
    context.verificationErrors = []
    context.accept_next_alert = True
Exemple #30
0
    def update(self, config_file = None):
        self._apps = dict()
        self._windows = []

        # Load running apps
        config.read_config(config_file)
        apps = elements.get_accessible_applications(config.IGNORED_BUNDLES)
        for app in apps:
            self._apps[app.title] = app
            app._element.set_callback(_accessibility_notifications_callback)
            app._element.watch('AXWindowMiniaturized', 'AXWindowCreated')
            for win in app._windows:
                self._add_window(win)

        logging.info('The window manager is now aware of: %s', ', '.join(self._apps.keys()))

        self._layout = config.LAYOUT
#%%
import os
import sys

# sys.path.append(os.getcwd().replace(f"\classification", ""))
from classification.model_evaluation import print_cross_val_scores

print(f"Working directory: {os.getcwd()}")
for path in sys.path:
    print(path)

#%%
import config
from dataset import dataset_loader
dataset = dataset_loader.load_dataset(config.read_config(''))

#%% Add targets to features as a new column
# dataset['features']['targets'] = dataset['targets']
#%% --
print(dataset['features'].info())
print(dataset['targets'].value_counts())
#%% Create histogram of the features to see their shame --
import matplotlib.pyplot as plt
dataset['features'].hist(bins=50, figsize=(10, 7))
plt.show()

#%%
dataset['targets'].hist(bins=50, figsize=(5, 3))
plt.show()

#%% Looking for correlations
Exemple #32
0
from model_rvrd_kepler import lnlike, lnprior, preprocess
import config
import numpy as np

nplanets = 0

modelpath = 'configfiles/hd40307_dace.py'

parnames, datadict, priordict, fixedpardict = config.read_config(
    modelpath, nplanets, False)

theta = [31334.88, 0.81, -0.16, -0.04]

print(parnames)

likelihood = lnlike(theta, parnames, fixedpardict, datadict, {})

print(f'Log(L) = {-likelihood}')
Exemple #33
0
 def do_POST(self):
     if (self.path == "/get") or (self.path == "/train"):
         self.send_response(200)
     else:
         self.send_response(404)
         self.end_headers()
         return
     form = cgi.FieldStorage(fp=self.rfile,
                             headers=self.headers,
                             environ={
                                 "REQUEST_METHOD": "POST",
                                 "CONTENT_TYPE":
                                 self.headers["Content-Type"]
                             })
     field_item = form["file"]
     if (field_item == None):
         self.send_response(403)
         self.end_headers()
         return
     filename = field_item.filename
     fileValue = field_item.value
     fileSize = len(fileValue)
     print("[UPLOAD]", "file: ", filename, ", size:", len(fileValue),
           "bytes")
     if (len(fileValue) > 20000000) or not (
             filename.endswith(".jpg") or filename.endswith(".png")
             or filename.endswith(".jpeg") or filename.endswith(".gif")):
         self.send_response(403)
         self.end_headers()
         return
     else:
         self.send_header("Access-Control-Allow-Origin", "*")
         self.send_header("Content-type", "application/json")
         self.end_headers()
         if self.path == "/get":
             with open("upload/" + filename, "wb") as f:
                 f.write(fileValue)
             crop.ssd_process(img_path="upload/" + filename,
                              crop_path="crop",
                              show=False)
             os.remove("upload/" + filename)
             files = glob.glob(crop_path + "/*")
             pred_result = []
             pred_probabilities = []
             for index, f in enumerate(files):
                 img = image.load_img(f, target_size=(256, 256))
                 image_tensor = image.img_to_array(img)
                 image_tensor = np.expand_dims(image_tensor, axis=0)
                 image_tensor /= 255.
                 model = load_model(
                     "model/trash.h5",
                     custom_objects={"leaky_relu": leaky_relu})
                 pred = model.predict(image_tensor)
                 pred_class = np.argmax(pred, axis=1)
                 pred_result.append(pred)
                 pred_probabilities.append(pred)
                 print("[PREDICT] Crop name:", f)
                 print("[PREDICT] Raw prediction data:", pred)
                 print("[PREDICT] Raw prediction class data: ", pred_class)
             response = {
                 "status": "success",
                 "result": pred_result,
                 "probabilities": pred_probabilities
             }
         elif self.path == "/train":
             with open(
                     "train/" + labels_index[int(form["type"].value)] +
                     "/" + filename, "wb") as f:
                 f.write(fileValue)
             response = {"status": "success"}
             if int(time.time()) - conf.read_config()["last"] > 86400:
                 train_path = list(pathlib.Path("./train").glob("*/*"))
                 train_count = len(train_path)
                 if train_count > int(conf.read_config()["in"]):
                     conf.write_config("last", int(time.time()))
                     conf.write_config("in", weight.get_new_n(train_count))
                     train.do_train()
                     for item in train_path:
                         os.remove(item)
         responseBody = json.dumps(response)
         self.wfile.write(responseBody.encode("utf-8"))
     print()
Exemple #34
0
def run_main():
    global configfile
    global maintenance_job, legacy_sync_job
    global main_xmlrpc_handler, stats_resource, web_root, web_static

    parser = argparse.ArgumentParser(description="DenyHosts sync server")
    parser.add_argument("-c",
                        "--config",
                        default="/etc/denyhosts-server.conf",
                        help="Configuration file")
    parser.add_argument("--recreate-database",
                        action='store_true',
                        help="Wipe and recreate the database")
    parser.add_argument(
        "--evolve-database",
        action='store_true',
        help="Evolve the database to the latest schema version")
    parser.add_argument(
        "--purge-legacy-addresses",
        action='store_true',
        help=
        "Purge all hosts downloaded from the legacy server. DO NOT USE WHEN DENYHOSTS-SERVER IS RUNNING!"
    )
    parser.add_argument(
        "--purge-reported-addresses",
        action='store_true',
        help=
        "Purge all hosts that have been reported by clients. DO NOT USE WHEN DENYHOSTS-SERVER IS RUNNING!"
    )
    parser.add_argument(
        "--purge-ip",
        action='store',
        help=
        "Purge ip address from both legacy and reported host lists. DO NOT USE WHEN DENYHOSTS-SERVER IS RUNNING!"
    )
    parser.add_argument(
        "--check-peers",
        action="store_true",
        help=
        "Check if all peers are responsive, and if they agree about the peer list"
    )
    parser.add_argument(
        "--bootstrap-from-peer",
        action="store",
        metavar="PEER_URL",
        help=
        "First wipe database and then bootstrap database from peer. DO NOT USE WHEN DENYHOSTS-SERVER IS RUNNING!"
    )
    parser.add_argument(
        "-f",
        "--force",
        action='store_true',
        help="Do not ask for confirmation, execute action immediately")
    args = parser.parse_args()

    configfile = args.config

    try:
        config.read_config(args.config)
    except ConfigParser.NoSectionError, e:
        print(
            "Error in reading the configuration file from \"{}\": {}.".format(
                args.config, e))
        print(
            "Please review the configuration file. Look at the supplied denyhosts-server.conf.example for more information."
        )
        sys.exit()
filepath = os.path.dirname(__file__)
modelpath = os.path.join(
    filepath, 'models/lognormal_prior/nathan_newmodel{}.py'.format(model))

# Generate dictionaries
datafile = args_params.dfile
data_path = os.path.join(filepath, 'data/data_2gen/')
if args_params.cluster:
    data_path = '/home/spectro/nunger/codigo/src/nathan/data/data_2gen/'
data_files = subprocess.check_output('ls {}'.format(data_path),
                                     shell=True).decode('utf-8').split('\n')
data_files.remove('')  # Remove last empty item
assert datafile in range(1,
                         201), "Incorrect datafile has to be one of {}".format(
                             range(1, 101))
parnames, datadict, priordict, fixedpardict = config.read_config(
    modelpath, data_path, datafile)

if rank == 0:
    print('Datafile: {}'.format(data_files[datafile - 1]))
    print('\n Parameter names and order:')
    print(parnames)

# Number of dimensions is the number of parameters in the model
nDims = len(parnames)
nDerived = 0


def logLikelihood(theta):
    """ Log Likelihood for the planet model. """

    result = lnlike(theta, parnames, fixedpardict, datadict)
    for run in runs:
        dates.append(run[-9:])
    dates.sort()
    for run in runs:
        if dates[-1] in run:
            prev_run = run
    print('Corrida a resumir: {}'.format(prev_run))
    # Extract the number of planets analyzed in previous run
    nplanets = int(prev_run.split('_')[1][0])

# Assign modelpath
filepath = os.path.dirname(__file__)
modelpath = os.path.join(filepath, 'configfiles/hd40307_model_newdata.py')

# Generate dictionaries
parnames, datadict, priordict, fixedpardict = config.read_config(
    modelpath, nplanets, args_params.narrow)

# Filepath for the polychord data and storing the samples
timecode = time.strftime("%m%d_%H%M")
folder_path = 'hd40307_{}a_'.format(nplanets) + timecode
if args_params.narrow:
    folder_path = 'hd40307_{}b_'.format(nplanets) + timecode
if not args_params.save:
    # Save the samples in a dump folder if the data shouldn't be saved
    # This overwrites any data saved before of the same model
    folder_path = 'dump'

base_dir = os.path.join(dirname, folder_path)

if rank == 0:
    print('\n Parameter names and order:')
Exemple #37
0
from flask import Flask, request
from flask_restful import Resource, Api
import config
from scrape import ListApartment, SearchApartment
import os

app = Flask(__name__)
api = Api(app)

app_config = config.read_config()

api.add_resource(SearchApartment, '/api/apartments/')
api.add_resource(ListApartment, '/api/apartments/<int:zipcode>')

app.run(port=app_config['application_port'], host='0.0.0.0', debug=True)
Exemple #38
0
 def __init__(self):
     config = read_config()
     self.__pg_connect_str = config["PostgreSQLConnectString"]
     self.__max_query_time = int(config["MaxQueryTimeSeconds"]) * 1000
Exemple #39
0
def main():
    location = get_user_input()
    configured_data = read_config()
    create_folders_if_necessary(list(configured_data.keys()), location)
    mapping_dictionary = get_mapping_dictionary(read_config())
    move_files(mapping_dictionary, location)
Exemple #40
0
# You should have received a copy of the GNU General
# Public License along with Navidoc; if not, write to the Free
# Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA  02111-1307  USA
# 

#$Id: rst2any.py,v 1.34 2003/07/22 11:03:50 humppake Exp $

#
# Written by Asko Soukka
#

__docformat__ = 'reStructuredText'

import config
config.read_config(config, config.navidoc_conf)

import sys, os, getopt
import docutils.core

import navidoc.directives
import navidoc.writers
import navidoc.languages
import navidoc.transforms
import navidoc.modules

from navidoc.util.path import *

# Import parser "plugins" from 'navidoc/link' and 'navidoc/mp'
config.mp_includes = listdir('navidoc/mp',['mp'],dirs=0)
dirlist = listdir('navidoc/mp',['py'],dirs=0)
Exemple #41
0
def main():
    """Top-level function."""
    parser = argparse.ArgumentParser(description=__doc__.strip())

    parser.add_argument('ledger', help="Beancount ledger file")
    parser.add_argument('config',
                        action='store',
                        help='Configuration for accounts and reports.')
    parser.add_argument('output',
                        help="Output directory to write all output files to.")

    parser.add_argument(
        'filter_reports',
        nargs='*',
        help="Optional names of specific subset of reports to analyze.")

    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Verbose mode')

    parser.add_argument('-d',
                        '--days-price-threshold',
                        action='store',
                        type=int,
                        default=5,
                        help="The number of days to tolerate price latency.")

    parser.add_argument('-e',
                        '--end-date',
                        action='store',
                        type=datetime.date.fromisoformat,
                        help="The end date to compute returns up to.")

    parser.add_argument('--pdf',
                        '--pdfs',
                        action='store_true',
                        help="Render as PDFs. Default is HTML directories.")

    parser.add_argument('-j',
                        '--parallel',
                        action='store_true',
                        help="Run report generation concurrently.")

    parser.add_argument(
        '-E',
        '--check-explicit-flows',
        action='store_true',
        help=("Enables comparison of the general categorization method "
              "with the explicit one with specialized explicit  handlers "
              "per signature."))

    args = parser.parse_args()
    if args.verbose:
        logging.basicConfig(level=logging.DEBUG,
                            format='%(levelname)-8s: %(message)s')
        logging.getLogger('matplotlib.font_manager').disabled = True

    # Figure out end date.
    end_date = args.end_date or datetime.date.today()

    # Load the example file.
    logging.info("Reading ledger: %s", args.ledger)
    entries, _, options_map = loader.load_file(args.ledger)
    accounts = getters.get_accounts(entries)
    dcontext = options_map['dcontext']

    # Load, filter and expand the configuration.
    config = configlib.read_config(args.config, args.filter_reports, accounts)
    os.makedirs(args.output, exist_ok=True)
    with open(path.join(args.output, "config.pbtxt"), "w") as efile:
        print(config, file=efile)

    # Extract data from the ledger.
    account_data_map = investments.extract(
        entries, dcontext, config, end_date, args.check_explicit_flows,
        path.join(args.output, "investments"))

    # Generate output reports.
    output_reports = path.join(args.output, "reports")
    pricer = reports.generate_reports(account_data_map, config,
                                      prices.build_price_map(entries),
                                      end_date, output_reports, args.parallel,
                                      args.pdf)

    # Generate price reports.
    output_prices = path.join(args.output, "prices")
    reports.generate_price_pages(account_data_map,
                                 prices.build_price_map(entries),
                                 output_prices)

    # Output required price directives (to be filled in the source ledger by
    # fetching prices).
    reports.write_price_directives(
        path.join(output_prices, "prices.beancount"), pricer,
        args.days_price_threshold)
Exemple #42
0
def connect_to_db():
    db_conn_params = read_config(filename=POSTGRESQL_CONFIG_FILE_NAME,
                                 section="postgresql")
    conn = psycopg2.connect(**db_conn_params)
    conn.autocommit = False
    return conn
def main(config_file, num_opt_eval):
    conf = read_config(config_file)

    train_df, test_df = get_train_test(conf)
    feats = [
        f for f in train_df.columns if f not in (
            ['TARGET', 'SK_ID_CURR', 'SK_ID_BUREAU', 'SK_ID_PREV', 'index'])
    ]
    print(f"use {len(feats)} features.")

    parameter_space = {
        "learning_rate": hp.quniform("learning_rate", 0.01, 0.02, 0.001),
        "max_bin": hp.quniform("max_bin", 5, 500, 5),
        "num_leaves": hp.quniform("num_leaves", 16, 128, 1),
        "min_child_samples": hp.quniform("min_child_samples", 1, 300, 1),
        "colsample_bytree": hp.quniform("colsample_bytree", 0.01, 0.2, 0.01),
        "subsample": hp.quniform("subsample", 0.01, 0.8, 0.01),
        "min_gain_to_split": hp.quniform("min_gain_to_split", 0.0, 5.0, 0.1),
        "reg_alpha": hp.quniform("reg_alpha", 0, 100, 0.1),
        "reg_lambda": hp.quniform("reg_lambda", 0, 100, 0.1)
    }

    def objective(params):
        conf.model = {
            **conf.model, "clf_params": {
                "learning_rate": float(params["learning_rate"]),
                "max_bin": int(params["max_bin"]),
                "num_leaves": int(params["num_leaves"]),
                "min_child_samples": int(params["min_child_samples"]),
                "colsample_bytree": float(params["colsample_bytree"]),
                "subsample": float(params["subsample"]),
                "min_gain_to_split": float(params["min_gain_to_split"]),
                "reg_alpha": float(params["reg_alpha"]),
                "reg_lambda": float(params["reg_lambda"]),
                "boosting_type": "dart",
                "n_estimators": 10000,
                "max_depth": -1,
                "nthread": -1,
                "scale_pos_weight": 1,
                "is_unbalance": False,
                "silent": -1,
                "verbose": -1,
                "random_state": 0
            }
        }
        pprint(conf.model.clf_params)

        model = LightGBM()
        score = model.train_and_predict_kfold(train_df, test_df, feats,
                                              'TARGET', conf)
        return {'loss': -1.0 * score, 'status': STATUS_OK}

    print("====== optimize lgbm parameters ======")
    trials = hyperopt.Trials()
    best = fmin(objective,
                parameter_space,
                algo=tpe.suggest,
                max_evals=num_opt_eval,
                trials=trials,
                verbose=1)
    print("====== best estimate parameters ======")
    pprint(best)
    # for key, val in best.items():
    #     print(f"    {key}: {val}")
    print("============= best score =============")
    best_score = -1.0 * trials.best_trial['result']['loss']
    print(best_score)
    pickle.dump(
        trials.trials,
        open(
            os.path.join(
                conf.dataset.output_directory,
                f'{datetime.now().strftime("%m%d%H%M")}_{conf.config_file_name}_trials_score{best_score}.pkl'
            ), 'wb'))
Exemple #44
0
def main():

    parser = argparse.ArgumentParser(prog="sir")
    parser.add_argument("-d", "--debug", action="store_true")
    parser.add_argument("--sqltimings", action="store_true")
    subparsers = parser.add_subparsers()

    reindex_parser = subparsers.add_parser("reindex",
                                           help="Reindexes all or a single "
                                           "entity type")
    reindex_parser.set_defaults(func=reindex)
    reindex_parser.add_argument('--entity-type',
                                action='append',
                                help="Which entity types to index.",
                                choices=SCHEMA.keys())

    generate_trigger_parser = subparsers.add_parser("triggers",
                                                    help="Generate triggers")
    generate_trigger_parser.set_defaults(func=generate_func)
    generate_trigger_parser.add_argument('-t',
                                         '--trigger-file',
                                         action="store",
                                         default="sql/CreateTriggers.sql",
                                         help="The filename to save the "
                                         "triggers into")
    generate_trigger_parser.add_argument('-f',
                                         '--function-file',
                                         action="store",
                                         default="sql/CreateFunctions.sql",
                                         help="The filename to save the "
                                         "functions into")
    generate_trigger_parser.add_argument('-bid',
                                         '--broker-id',
                                         action="store",
                                         default="1",
                                         help="ID of the AMQP broker row "
                                         "in the database.")

    generate_extension_parser = subparsers.add_parser(
        "extension", help="Generate extension")
    generate_extension_parser.set_defaults(func=generate_extension)
    generate_extension_parser.add_argument('-e',
                                           '--extension-file',
                                           action="store",
                                           default="sql/CreateExtension.sql",
                                           help="The filename to save the "
                                           "extension into")

    amqp_setup_parser = subparsers.add_parser("amqp_setup",
                                              help="Set up AMQP exchanges and "
                                              "queues")
    amqp_setup_parser.set_defaults(func=setup_rabbitmq)

    amqp_watch_parser = subparsers.add_parser("amqp_watch",
                                              help="Watch AMQP queues for "
                                              "changes")
    amqp_watch_parser.set_defaults(func=watch)

    args = parser.parse_args()
    if args.debug:
        logger.setLevel(logging.DEBUG)
    else:
        logger.setLevel(logging.INFO)

    loghandler = logging.StreamHandler()
    if args.debug:
        formatter = logging.Formatter(fmt="%(processName)s %(asctime)s  "
                                      "%(levelname)s: %(message)s")
    else:
        formatter = logging.Formatter(fmt="%(asctime)s: %(message)s")
    loghandler.setFormatter(formatter)
    logger.addHandler(loghandler)

    mplogger = multiprocessing.get_logger()
    mplogger.setLevel(logging.ERROR)
    mplogger.addHandler(loghandler)

    if args.sqltimings:
        from sqlalchemy import event
        from sqlalchemy.engine import Engine
        import time

        sqltimelogger = logging.getLogger("sqltimer")
        sqltimelogger.setLevel(logging.DEBUG)
        sqltimelogger.addHandler(loghandler)

        @event.listens_for(Engine, "before_cursor_execute")
        def before_cursor_execute(conn, cursor, statement, parameters, context,
                                  executemany):
            conn.info.setdefault('query_start_time', []).append(time.time())
            sqltimelogger.debug("Start Query: %s", statement)
            sqltimelogger.debug("With Parameters: %s", parameters)

        @event.listens_for(Engine, "after_cursor_execute")
        def after_cursor_execute(conn, cursor, statement, parameters, context,
                                 executemany):
            total = time.time() - conn.info['query_start_time'].pop(-1)
            sqltimelogger.debug("Query Complete!")
            sqltimelogger.debug("Total Time: %f", total)

    config.read_config()
    try:
        init_raven_client(config.CFG.get("sentry", "dsn"))
    except ConfigParser.Error as e:
        logger.info(
            "Skipping Raven client initialization. Configuration issue: %s", e)
    func = args.func
    args = vars(args)
    func(args)
Exemple #45
0
        body = request.urlopen(url=cfg["check_url"], timeout=10).read()
        return "synology" not in str(body).lower()
    except Exception as e:
        logging.warning("check url FAILED, error: %s", str(e))
        return True


def ask_exit(_sig_name):
    logging.warning('got signal {}: exit'.format(_sig_name))
    loop.stop()


if __name__ == '__main__':
    logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)-8s : %(message)s')
    logging.info('start...')
    read_config()
    check_config()
    cfg['record_id'] = get_record_id(cfg['domain'], cfg['sub_domain'])
    logging.info("get record_id: %s" % str(cfg['record_id']))
    logging.info("watching ip for ddns: %s.%s" % (cfg['sub_domain'], cfg['domain']))

    loop = asyncio.get_event_loop()
    for sig_name in ('SIGINT', 'SIGTERM'):
        try:
            loop.add_signal_handler(getattr(signal, sig_name), functools.partial(ask_exit, sig_name))
        except NotImplementedError:
            pass  # 使兼容 WINDOWS
    try:
        loop.run_until_complete(main())
    except (KeyboardInterrupt, RuntimeError):
        logging.info('stop...')
Exemple #46
0
        config_file = options['config_path']
    else:
        config_file += 'server.cfg'

    return config_file


#--------------------------------------------------------------
# config affairs
# look for default config name in lib/config.py
args = sys.argv
args = args[1:]

options = command_line.parse_command_line(args)

conf = config.read_config(get_config_filename(options))

override_config_with_command_line_options(conf, options)

conf['GENERAL']['VERSION'] = '0.9.9.0.2'

print 'NTLM authorization Proxy Server v%s' % conf['GENERAL']['VERSION']
print 'Copyright (C) 2001-2012 by Tony Heupel, Dmitry Rozmanov, and others.'

config = config_affairs.arrange(conf)

#--------------------------------------------------------------
# let's run it
serv = server.AuthProxyServer(config)
serv.run()
Exemple #47
0
import cv2 as cv
import numpy as np
import os
import os.path
import config
import logging
from camera_setup import SingleCameraSetup, StereoCameraSetup

# Load config
cfg = config.read_config()

# Create logger
logger = logging.getLogger(__name__)
logging.basicConfig(format=cfg["log"]["format"], level=cfg["log"]["level"])


def mark_calibration_points(image, image_marker=None, type="checkerboard"):
    """
    Draws the checkerboard corners on the image
    :param image: the image to process in grayscale
    :param image_marker: optional. Draws the markers on this image, can be the same as image
    :return: the corners found
    """

    # Mark calibration points
    dimension = cfg["calibration"][type]["dimension"]
    if type == "checkerboard":
        found, corners = cv.findChessboardCorners(image, dimension, None, flags=cv.CALIB_CB_ADAPTIVE_THRESH)
    else:
        found, corners = cv.findCirclesGrid(image, dimension, flags=cv.CALIB_CB_ASYMMETRIC_GRID)
Exemple #48
0
def pipeline():
    config_dict = config.read_config()

    selected_sources = st.sidebar.multiselect('Data Source: ',
                                              list(config_dict.keys()))

    if not selected_sources:
        st.subheader(
            "Click the arrow in top left to open the sidebar and select a data source!"
        )

    selected_config = {
        selected_key: config_dict[selected_key]
        for selected_key in selected_sources
    }

    show_config = st.sidebar.checkbox('Show config', value=False, key='key1')
    show_load_function_output = st.sidebar.checkbox(
        'Show load function output', value=True, key='key2')
    filtered_region_level = st.sidebar.radio('Filter by region levels',
                                             [1, 2, 3, 'All'])

    for k in selected_config:
        st.header('Data source: ' + k)
        params = config_dict[k]
        data_params = params['data']
        data_keys = list(data_params.keys())
        filtered_data_types = st.sidebar.multiselect(
            'Filter ' + k + ' by data type:', data_keys)
        if show_config:
            st.subheader('Config: ')
            st.write(params)
        load_func_name = params['load']['function']
        if load_func_name == 'None':
            st.write('No load function')
        else:
            load_func = getattr(load_functions, load_func_name)
            df = load_func(params)
            if show_load_function_output:
                st.subheader('Load function output:')
                st.write(df)
            st.subheader('Filtered data:')
            #show_regions = st.checkbox('Filter by region level', value=False, key='key3')
            #filtered_region_level = st.checkbox('Filter by region level', value=False, key='key3')
            filtered_df = get_filtered_df(df, data_params, filtered_data_types,
                                          filtered_region_level)
            st.write(filtered_df)
        plot_data_types = st.sidebar.multiselect(
            'Plot ' + k + ' by data type:', data_keys)
        plot_data_cols = selected_data_types_to_cols(plot_data_types,
                                                     data_params)
        unique_regions = df.region_code.unique()
        selected_regions = st.sidebar.multiselect('Select regions:',
                                                  unique_regions)
        for region in unique_regions:
            st.write(region)
            region_df = df[df['region_code'] == region]
            source_df = plot_utils.melt_and_filter_data(
                plot_data_cols, region_df)
            st.altair_chart(plot_utils.ruled_altair_chart(source_df),
                            use_container_width=True)
Exemple #49
0
                                         'data/exports/cc_by/LICENSE')
EXPORT_PATH_CC_BY_SA_LICENSE = os.path.join(ROOT_DIR,
                                            'data/exports/cc_by_sa/LICENSE')

sys.path.append(PIPELINE_DIR)

import config

cc_by_header = '''The file `aggregated_cc_by.csv` is licensed under Creative Commons Attribution 4.0 International.\n
It includes content under the following licenses:\n\n'''
cc_by_sa_header = '''The file `aggregated_cc_by_sa.csv` is licensed under Creative Commons Attribution-ShareAlike 4.0 International.\n
It includes content under the following licenses:\n\n'''

complete_texts = '''Complete license texts for each unique license are available below:\n\n'''

config_dict_cc_by = config.read_config(cc_by_sa=False)
config_dict_cc_by_sa = config.read_config(cc_by_sa=True)

all_license_files_cc_by = ['docs/license_files/cc-by-4.0']
all_license_files_cc_by_sa = ['docs/license_files/cc-by-sa-4.0']

for (key, source_params) in config_dict_cc_by.items():
    if 'license' in source_params:
        license_params = source_params['license']
        if 'file' in license_params:
            path = license_params['file']
            if path not in all_license_files_cc_by:
                all_license_files_cc_by.append(path)
print(all_license_files_cc_by)

for (key, source_params) in config_dict_cc_by_sa.items():
Exemple #50
0
def main():
    config = read_config()
    c = conn(config, method='post')
    args = docopt(__doc__, version='%s %s' % (__cli_name__, __version__))
    res = False
    res_get = False
    if args['deploy']:
        try:
            res = deploy_vm(c, args)
        except Exception as e:
            print "error deploying instance: %s" % e
            sys.exit(1)
    elif args['destroy']:
        try:
            id = c.list_virtualmachines(name=args['INSTANCE'])
            if len(id) == 1:
                res = destroy_vm(c, id[0].id)
            else:
                # Multiple VMs returned
                if args['--network']:
                    id = c.list_virtualmachines(name=args['INSTANCE'],
                                                networkid=get_network(
                                                    c,
                                                    args['--network'])[0].id)
                    res = destroy_vm(c, id[0].id)
                else:
                    print "Multiple instances with name: %s found, please supply a network name" % args[
                        'INSTANCE']
        except Exception as e:
            print "Error destroying instance: %s" % e
    elif args['stop']:
        try:
            id = c.list_virtualmachines(name=args['INSTANCE'])
            if len(id) == 1:
                res = stop_vm(c, id[0].id)
            else:
                # Multiple VMs returned
                if args['--network']:
                    id = c.list_virtualmachines(name=args['INSTANCE'],
                                                networkid=get_network(
                                                    c,
                                                    args['--network'])[0].id)
                    res = stop_vm(c, id[0].id)
                else:
                    print "Multiple instances with name: %s found, please supply a network name" % args[
                        'INSTANCE']
        except Exception as e:
            print "Error stopping instance: %s" % e
    elif args['start']:
        try:
            id = c.list_virtualmachines(name=args['INSTANCE'])
            if len(id) == 1:
                res = start_vm(c, id[0].id)
            else:
                # Multiple VMs returned
                if args['--network']:
                    id = c.list_virtualmachines(name=args['INSTANCE'],
                                                networkid=get_network(
                                                    c,
                                                    args['--network'])[0].id)
                    res = start_vm(c, id[0].id)
                else:
                    print "Multiple instances with name: %s found, please supply a network name" % args[
                        'INSTANCE']
        except Exception as e:
            print "Error starting instance: %s" % e
    elif args['list']:
        res = list_vms(c, args)
    elif args['get']:
        res = c.list_virtualmachines(name=args['INSTANCE'])
    else:
        print "Unable to execute command"
        sys.exit(1)

    if res:
        print_tabulate(res, noheader=args['--noheader'], short=args['--short'])
    else:
        print "No virtual machines found, deploy new machines using `rbc-instances deploy`"
        sys.exit(1)
Exemple #51
0
import copy
import sys
from datetime import datetime
from shutil import copyfile

from jinja2 import Environment, FileSystemLoader
from tridesclous import DataIO, CatalogueConstructor, Peeler
import numpy as np
from tridesclous import metrics
import matplotlib.pyplot as plt
import os

from config import read_config
from spike_sorting.plot import heatmap, annotate_heatmap, plot_clusters_summary

cfg = read_config()

_cluster_merge_template = """
Cluster {new_cluster_label}, Cell {new_cell_label}
Mapped to cluster {old_cluster_label}, Cell {old_cell_label}
Similarity : {similarity}

"""


def run_compare_catalogues(subject, date, similarity_threshold=0.7):
    bin_min = 0
    bin_max = 100
    bin_size = 1.
    bins = np.arange(bin_min, bin_max, bin_size)
Exemple #52
0
import os
import glob
import threading
import train
import config

if __name__ == '__main__':
    configurations_dir = "/home/rqa/renato/git/A-Convolutional-Neural-Network-Approach-for-Speech-Quality-Assessment/configurations"

    if os.path.isdir(configurations_dir):
        os.chdir(configurations_dir)
        files = glob.glob("*.json")
        for filename in files:
            print('----------------------------------------------')
            print('Running configuration: ' + filename)
            print('----------------------------------------------')
            FLAGS, _unparsed = config.set_flags(config.read_config(filename))
            processThread = threading.Thread(
                target=train.main,
                args=[[FLAGS, os.path.splitext(filename)[0]]])
            processThread.start()
            processThread.join()
            print('----------------------------------------------')
            print('Ending configuration: ' + filename)
            print('----------------------------------------------')
            print('')
Exemple #53
0
        config.vprint("Upgraded from hamstall file version 4 to 5.")
    try:
        file_version = prog_manage.get_file_version('file')
    except KeyError:
        file_version = 1
    config.write_db()

if prog_manage.get_file_version(
        'prog'
) == 1:  # Online update broke between prog versions 1 and 2 of hamstall
    print(
        'Please manually update hamstall! You can back up your directories in ~/.hamstall !'
    )
    generic.leave()

if config.read_config("AutoInstall"):  # Auto-update, if enabled
    prog_manage.update(True)

if args.remove_lock:  # Argument parsing
    print("Lock doesn't exist, so not removed!")
    generic.leave()

elif args.install is not None:
    if not config.exists(args.install):
        print("File does not exist!")
        generic.leave()
    program_internal_name = config.name(args.install)  # Get the program name
    if program_internal_name in config.db["programs"]:  # Reinstall check
        reinstall = generic.get_input(
            "Application already exists! Would you like to reinstall/overwrite? [r/o/N]",
            ["r", "o", "n"], "n")  # Ask to reinstall
Exemple #54
0
from dataloader import prepare_data, create_dataloader
from config import read_config
from training import *
from sklearn.metrics import accuracy_score, classification_report, matthews_corrcoef

parser = argparse.ArgumentParser()
parser.add_argument('--config',
                    default="config.ini",
                    help="INI file for model configuration")
parser.add_argument(
    '--model',
    help="Which model to run",
    choices=["baseline", "vanilla-bert", "bert-linear", "bert-bilstm"],
    default="bert-linear")

PATHS = read_config(filename="config.ini", section="DATASET")
HYPERPARAMS = read_config(filename="config.ini", section="HYPERPARAMS")


def predict_labels_baseline(classifier, test_dataloader):
    model = AutoModel.from_pretrained("allenai/scibert_scivocab_uncased",
                                      output_hidden_states=True)

    model.to(device)
    test_embeddings = []
    test_labels = []

    for idx, batch in enumerate(test_dataloader):
        if idx % 20 == 0:
            print(f"Done with {idx} of {len(test_dataloader)} batches")
        b_input_ids = batch[0].to(device)
Exemple #55
0
def main(config_file, num_opt_eval):
    conf = read_config(config_file)
    print("config:")
    pprint(conf)

    train_df, test_df = get_train_test(conf)
    feats = [
        f for f in train_df.columns if f not in (
            ['TARGET', 'SK_ID_CURR', 'SK_ID_BUREAU', 'SK_ID_PREV', 'index'])
    ]
    print(f"use {len(feats)} features.")

    pbar = tqdm(total=num_opt_eval, desc="optimize blend")

    def objective(params):
        pbar.update()
        warnings.simplefilter('ignore')

        s = sum([v for k, v in params.items() if k in feats])
        for p in feats:
            params[p] = params[p] / s

        test_pred_proba = pd.Series(np.zeros(train_df.shape[0]),
                                    index=train_df.index)

        for f in feats:
            test_pred_proba += (train_df[f]**params["power"]) * params[f]

        score = roc_auc_score(train_df['TARGET'], test_pred_proba)
        pbar.write(f"power {params['power']}: {score:.6f}")
        return {'loss': -1.0 * score, 'status': STATUS_OK}

    parameter_space = {"power": hp.quniform("power", 1, 8, 1)}
    for c in feats:
        parameter_space[c] = hp.quniform(c, 0, 1, 0.001)

    print("====== optimize blending parameters ======")
    trials = hyperopt.Trials()
    best = fmin(objective,
                parameter_space,
                algo=tpe.suggest,
                max_evals=num_opt_eval,
                trials=trials,
                verbose=1)
    pbar.close()

    s = sum([v for k, v in best.items() if k in feats])
    for p in feats:
        best[p] = best[p] / s

    print("====== best weights to blend ======")
    pprint(best)
    # for key, val in best.items():
    #     print(f"    {key}: {val}")
    print("============= best score =============")
    best_score = -1.0 * trials.best_trial['result']['loss']
    print(best_score)

    output_directory = os.path.join(
        conf.dataset.output_directory,
        f"{datetime.now().strftime('%m%d%H%M')}_{conf.config_file_name}_{best_score:.6f}"
    )
    print(f"write results to {output_directory}")
    if not os.path.exists(output_directory):
        os.makedirs(output_directory)

    with open(os.path.join(output_directory, "result.json"), "w") as fp:
        results = {
            "weights": best,
            "score": best_score,
            "eval_num": num_opt_eval
        }
        print(results)
        json.dump(results, fp, indent=2)

    pickle.dump(trials.trials,
                open(os.path.join(output_directory, f'trials.pkl'), 'wb'))

    sub_preds = np.zeros(test_df.shape[0])
    for f in feats:
        sub_preds += (test_df[f]**best["power"]) * best[f]
    test_df['TARGET'] = sub_preds
    test_df[['SK_ID_CURR',
             'TARGET']].to_csv(os.path.join(output_directory,
                                            'submission.csv'),
                               index=False)
import export_utils
import doc_utils
import config
import path_utils

args = args_utils.get_parser().parse_args()
path_utils.root_dir = args.publish_dir

if not args.allowlist:
    logging.warning(
        'RUNNING WITHOUT THE ALLOWLIST! DO NOT MAKE A PULL REQUEST WITH THE OUTPUT!'
    )

sources_all = config.read_config(cc_by=True,
                                 cc_by_sa=True,
                                 cc_by_nc=True,
                                 google_tos=True,
                                 filter_not_approved=args.allowlist)
sources_cc_by = config.read_config(cc_by=True,
                                   cc_by_sa=False,
                                   cc_by_nc=False,
                                   google_tos=False,
                                   filter_not_approved=args.allowlist)
sources_cc_by_sa = config.read_config(cc_by=True,
                                      cc_by_sa=True,
                                      cc_by_nc=False,
                                      google_tos=False,
                                      filter_not_approved=args.allowlist)
sources_cc_by_nc = config.read_config(cc_by=True,
                                      cc_by_sa=False,
                                      cc_by_nc=True,
Exemple #57
0
def main():
    parser = argparse.ArgumentParser(
        description='Micro-seismic earthquake detection: "Training" Step')
    parser.add_argument('--config',
                        required=True,
                        metavar='FILE',
                        dest='config_file',
                        help='configuration file to use')
    parser.add_argument('--batch-size',
                        type=int,
                        default=32,
                        metavar='N',
                        help='input batch size for training (default: 32)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=100,
                        metavar='N',
                        help='input batch size for testing (default: 100)')
    parser.add_argument(
        '--num-workers',
        type=int,
        default=1,
        metavar='W',
        help='how many subprocesses to use for data loading (default: 1)')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.0001,
                        metavar='LR',
                        help='learning rate (default: 0.0001)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        dest='use_cuda',
                        action='store_false',
                        default=True,
                        help='disables CUDA training')
    parser.add_argument(
        '--train-split-ratio',
        type=float,
        default=0.8,
        metavar='R',
        help='the portion of the dataset used for training (default: 0.8)')
    parser.add_argument('--random-split',
                        action='store_true',
                        default=False,
                        help='shuffle the dataset before splitting')
    parser.add_argument('--seed',
                        type=int,
                        default=None,
                        metavar='S',
                        help='seed Python and Pytorch RNGs (default: None)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument(
        '--test-model',
        action='store_true',
        default=False,
        help='test final model and save predictions and ground-truth arrays')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='save the current (trained) model')
    parser.add_argument(
        '--save-costs',
        action='store_true',
        default=False,
        help='write train and test cost values to an ascii column file')
    parser.add_argument(
        '--summary',
        action='store_true',
        default=False,
        help='write summarized info on current model to a plain text file')
    parser.add_argument(
        '--logfile',
        action='store_true',
        default=False,
        help='write logfile for current run to a plain text file')

    args = parser.parse_args()

    # ----------
    tic = datetime.now()
    tic_str = tic.strftime('%Y-%m-%d_%H-%M')

    # CUDA for PyTorch
    use_cuda = args.use_cuda and torch.cuda.is_available()
    device = torch.device("cuda:0" if use_cuda else "cpu")

    # Set seed for RNGs
    if args.seed:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)  # for current GPU
        torch.cuda.manual_seed_all(args.seed)  # for all GPUs

    # Improve performance by enabling benchmarking feature
    # see: https://pytorch.org/docs/stable/notes/randomness.html
    cudnn.benchmark = True

    # ----------
    # Read dataset config file
    config = read_config(args.config_file)
    images_dirname = pathlib.Path(config.images_dirname)
    images_paths_pattern = str(
        images_dirname.joinpath(config.images_paths_pattern))
    images_filename_tmpl = str(
        images_dirname.joinpath(config.images_filename_tmpl))

    targets_filename = config.norm_params_filename

    # ----------
    # kwargs for DataLoader
    dataloader_kwargs = {'num_workers': args.num_workers, 'shuffle': True}

    if use_cuda:
        dataloader_kwargs['pin_memory'] = True

    # Dataset partitioning + prepare targets
    partition = get_train_test_partition(images_paths_pattern,
                                         args.train_split_ratio,
                                         args.random_split)

    train_targets, test_targets = \
        get_train_test_targets(targets_filename, partition)

    # Training-set generator
    train_loader = DataLoader(dataset=CustomDataset(images_filename_tmpl,
                                                    partition['train'],
                                                    train_targets),
                              batch_size=args.batch_size,
                              **dataloader_kwargs)

    # Test-set generator
    test_loader = DataLoader(dataset=CustomDataset(images_filename_tmpl,
                                                   partition['test'],
                                                   test_targets),
                             batch_size=args.test_batch_size,
                             **dataloader_kwargs)

    # ----------
    # Create the model object
    model = MultiTaskConvNet().to(device)

    # Adam optimizer
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    # Loss function
    class_criterion = nn.BCEWithLogitsLoss()
    bbox_criterion = nn.MSELoss()

    train_costs, test_costs = [], []
    for epoch in range(1, args.epochs + 1):
        # Training step
        train_cost = train(model, optimizer, class_criterion, bbox_criterion,
                           train_loader, device, epoch, args)

        # Evaluation step
        test_cost = evaluate(model, class_criterion, bbox_criterion,
                             test_loader, device)

        print('\nTrain-set epoch cost : {:.9f}'.format(train_cost))
        print('Test-set average loss: {:.9f}\n'.format(test_cost))

        train_costs.append(train_cost)
        test_costs.append(test_cost)

    # ----------
    fstem_dated = '{}_{}'.format(g_output_fstem, tic_str)

    # Save trained model
    if args.save_model:
        filename_model = fstem_dated + '_model.pth'
        torch.save(model.state_dict(), filename_model)

    # Save cost values
    if args.save_costs:
        filename_costs = fstem_dated + '_cost.nc'
        costs = xr.DataArray(np.vstack((train_costs, test_costs)).T,
                             dims=['epoch_index', 'epoch_cost'],
                             coords={'epoch_cost': ['train', 'test']})
        costs.to_netcdf(filename_costs)

    # Save model predictions
    if args.test_model:
        predictions, groundtruth = test(model, test_loader, device, args)
        predictions.to_netcdf(fstem_dated + '_pred.nc')
        groundtruth.to_netcdf(fstem_dated + '_true.nc')

    # ----------
    toc = datetime.now()

    # Write log-file
    if args.logfile:
        filename_logfile = fstem_dated + '_log.txt'
        with open(filename_logfile, 'w') as fid:
            fid.write('\n'.join(sys.argv[1:]))
            fid.write('\n')

            if args.save_model:
                fid.write('Trained model: {}\n'.format(filename_model))

            fid.write('Started at: {}\n'.format(tic))
            fid.write('Finished at: {}\n'.format(toc))

    # Write model summary
    if args.summary:
        dummy_model = MultiTaskConvNet().to(device)
        filename_summary = fstem_dated + '_summary.txt'
        original_stdout = sys.stdout

        with open(filename_summary, 'w') as fid:
            fid.write(str(dummy_model))
            fid.write('\n' * 3)

            # NOTE: `device` arg should be `str` not `torch.device`
            sys.stdout = fid
            images_batch, labels_batch = next(iter(test_loader))
            input_shape = images_batch.shape[1:]
            summary(dummy_model,
                    input_shape,
                    batch_size=args.batch_size,
                    device=device.type)

            sys.stdout = original_stdout
Exemple #58
0
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)

    ## create formatter
    formatter = logging.Formatter(
        '%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s'
    )

    ## add formatter to ch
    ch.setFormatter(formatter)

    ## add ch to logger
    logger.addHandler(ch)

    logger.info('Reading configuration.')
    try:
        conf = config.read_config()
    except Exception as e:
        logger.error('Exception while reading configuration: %s' % e)

    logger.info('Creating AstroPlant network client.')
    api_client = astroplant_client.Client(conf["api"]["root"],
                                          conf["websockets"]["url"])

    logger.info('Authenticating AstroPlant network client.')
    api_client.authenticate(conf["auth"]["serial"], conf["auth"]["secret"])

    logger.info('Initialising kit.')
    kit = Kit(api_client, conf["debug"])
    kit.run()
Exemple #59
0
#
# You should have received a copy of the GNU General Public License
# along with the sofware; see the file COPYING. If not, write to the
# Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
#
import __init__

import sys

import server, config, config_affairs

#--------------------------------------------------------------
# config affairs
# look for default config name in lib/config.py
conf = config.read_config(
    config.findConfigFileNameInArgv(sys.argv, __init__.ntlmaps_dir + '/'))

conf['GENERAL']['VERSION'] = '0.9.9'

#--------------------------------------------------------------
print 'NTLM authorization Proxy Server v%s' % conf['GENERAL']['VERSION']
print 'Copyright (C) 2001-2004 by Dmitry Rozmanov and others.'

config = config_affairs.arrange(conf)

#--------------------------------------------------------------
# let's run it
serv = server.AuthProxyServer(config)
serv.run()
#!/usr/bin/python2
import config
import sys

cfg = dict()
if len(sys.argv) > 1:
    cfg = config.read_config(sys.argv[1])
else:
    cfg = config.read_config(config.DEFAULT_CONFIG_FILE)

scriptfname = "gscript"
if len(sys.argv) > 2:
    scriptfname = sys.argv[2]

prefix = cfg['output_prefix']
outdir = cfg['output_dir']

points = open(outdir + "/points_" + prefix + "_all")
size = len(points.readlines())
points.close()

script = "set terminal png size 2048,2048\n"
bigscript = "plot \"" + outdir + "/lines_" + prefix + "_all\" with lines "
for i in range(0,size):
    script = script + "set output \"" + outdir + "/chunks_" + prefix + "_" + str(i) + ".png\"\n"
    script = script + "plot \"" + outdir + "/points_" + prefix + "_" + str(i) + "\" with points, \"" + outdir + "/chunks_" + prefix + "_" + str(i) + "\" with lines\n"
    script = script + "set output \"" + outdir + "/chunks_raw_" + prefix + "_" + str(i) + ".png\"\n"
    script = script + "plot \"" + outdir + "/points_" + prefix + "_" + str(i) + "\" with points" 
    script = script + ",\"" + outdir + "/chunks_raw_" + prefix + "_" + str(i) + "\" with lines\n"
    bigscript = bigscript + ",\"" + outdir + "/points_" + prefix + "_" + str(i) + "\" with points, \"" + outdir + "/chunks_" + prefix + "_" + str(i) + "\" with lines"