コード例 #1
0
def startup():
    cfg.configure_logging(logger,
                          logname='pay_bills',
                          log_level=logging.INFO,
                          file_level=logging.INFO,
                          console_level=logging.DEBUG)
    main()
コード例 #2
0
def main():
    parser = argparse.ArgumentParser(parents=[config.logging_cli()])
    parser.add_argument('--progress',
                        help='log progress every N suspects',
                        type=int,
                        metavar='N')
    parser.add_argument('--list_features',
                        action='store_true',
                        help='print all available features and exit')
    parser.add_argument('--feature',
                        action='append',
                        dest='features',
                        help='''Feature to evaluate.  May be repeated.
                        Features are evaluated in
                        the order given; it is the responsibility of
                        the user to include the required dependencies,
                        and in the right order.  If no features are
                        specified, defaults to all features.  See
                        --list-features to get a list of available
                        features.''')

    args = parser.parse_args()

    if args.list_features:
        print_features()
        return

    features = args.features or ['reg_time',
                                 'first_contrib_time',
                                 'first_contrib_interval',
                                 'live_edit_count',
                                 'deleted_edit_count',
                                 'block_count']
    check_valid_features(features)

    config.configure_logging(args)
    logger = logging.getLogger('get_features')

    logger.info("Starting work")
    logger.info("job-name = %s", args.job_name)
    logger.info("Using features: %s", features)
    start_time = datetime.datetime.now()

    db_connection = toolforge.connect('enwiki')

    count = 0
    for line in sys.stdin:
        initial_data = json.loads(line)

        suspect = Suspect(db_connection, initial_data)
        suspect.add_features(features)
        print(json.dumps(suspect.clean_data()))

        count += 1
        if args.progress and (count % args.progress == 0):
            logger.info("Processed %s suspects", count)
                
    finish_time = datetime.datetime.now()
    elapsed_time = finish_time - start_time
    logger.info("Processed %d suspects in %s", count, elapsed_time)
コード例 #3
0
ファイル: app.py プロジェクト: AAFC-MBB/galaxy-1
 def __init__( self, **kwargs ):
     log.debug( "python path is: %s", ", ".join( sys.path ) )
     self.name = "reports"
     # Read config file and check for errors
     self.config = config.Configuration( **kwargs )
     self.config.check()
     config.configure_logging( self.config )
     # Determine the database url
     if self.config.database_connection:
         db_url = self.config.database_connection
     else:
         db_url = "sqlite:///%s?isolation_level=IMMEDIATE" % self.config.database
     # Setup the database engine and ORM
     self.model = galaxy.model.mapping.init( self.config.file_path,
                                             db_url,
                                             self.config.database_engine_options,
                                             create_tables=True )
     if not self.config.database_connection:
         self.targets_mysql = False
     else:
         self.targets_mysql = 'mysql' in self.config.database_connection
     # Security helper
     self.security = security.SecurityHelper( id_secret=self.config.id_secret )
     # used for cachebusting -- refactor this into a *SINGLE* UniverseApplication base.
     self.server_starttime = int(time.time())
コード例 #4
0
 def __init__(self, **kwargs):
     print >> sys.stderr, "python path is: " + ", ".join(sys.path)
     self.name = "reports"
     # Read config file and check for errors
     self.config = config.Configuration(**kwargs)
     self.config.check()
     config.configure_logging(self.config)
     # Determine the database url
     if self.config.database_connection:
         db_url = self.config.database_connection
     else:
         db_url = "sqlite:///%s?isolation_level=IMMEDIATE" % self.config.database
     # Setup the database engine and ORM
     self.model = galaxy.model.mapping.init(
         self.config.file_path,
         db_url,
         self.config.database_engine_options,
         create_tables=True)
     if not self.config.database_connection:
         self.targets_mysql = False
     else:
         self.targets_mysql = 'mysql' in self.config.database_connection
     # Security helper
     self.security = security.SecurityHelper(
         id_secret=self.config.id_secret)
     # used for cachebusting -- refactor this into a *SINGLE* UniverseApplication base.
     self.server_starttime = int(time.time())
コード例 #5
0
 def __init__(self, **kwd):
     print >> sys.stderr, "python path is: " + ", ".join(sys.path)
     self.name = "tool_shed"
     # Read the tool_shed.ini configuration file and check for errors.
     self.config = config.Configuration(**kwd)
     self.config.check()
     config.configure_logging(self.config)
     # Initialize the  Galaxy datatypes registry.
     self.datatypes_registry = galaxy.datatypes.registry.Registry()
     self.datatypes_registry.load_datatypes(self.config.root,
                                            self.config.datatypes_config)
     # Initialize the Tool Shed repository_types registry.
     self.repository_types_registry = tool_shed.repository_types.registry.Registry(
     )
     # Initialize the RepositoryGridFilterManager.
     self.repository_grid_filter_manager = RepositoryGridFilterManager()
     # Determine the Tool Shed database connection string.
     if self.config.database_connection:
         db_url = self.config.database_connection
     else:
         db_url = "sqlite:///%s?isolation_level=IMMEDIATE" % self.config.database
     # Initialize the Tool Shed database and check for appropriate schema version.
     from galaxy.webapps.tool_shed.model.migrate.check import create_or_verify_database
     create_or_verify_database(db_url, self.config.database_engine_options)
     # Set up the Tool Shed database engine and ORM.
     from galaxy.webapps.tool_shed.model import mapping
     self.model = mapping.init(self.config.file_path, db_url,
                               self.config.database_engine_options)
     # Initialize the Tool Shed security helper.
     self.security = security.SecurityHelper(
         id_secret=self.config.id_secret)
     # initialize the Tool Shed tag handler.
     self.tag_handler = CommunityTagManager(self)
     # Initialize the Tool Shed tool data tables.  Never pass a configuration file here
     # because the Tool Shed should always have an empty dictionary!
     self.tool_data_tables = galaxy.tools.data.ToolDataTableManager(
         self.config.tool_data_path)
     self.genome_builds = GenomeBuilds(self)
     # Citation manager needed to load tools.
     from galaxy.managers.citations import CitationsManager
     self.citations_manager = CitationsManager(self)
     # The Tool Shed makes no use of a Galaxy toolbox, but this attribute is still required.
     self.toolbox = tools.ToolBox([], self.config.tool_path, self)
     # Initialize the Tool Shed security agent.
     self.security_agent = self.model.security_agent
     # The Tool Shed makes no use of a quota, but this attribute is still required.
     self.quota_agent = galaxy.quota.NoQuotaAgent(self.model)
     # TODO: Add OpenID support
     self.openid_providers = OpenIDProviders()
     # Initialize the baseline Tool Shed statistics component.
     self.shed_counter = self.model.shed_counter
     # Let the Tool Shed's HgwebConfigManager know where the hgweb.config file is located.
     self.hgweb_config_manager = self.model.hgweb_config_manager
     self.hgweb_config_manager.hgweb_config_dir = self.config.hgweb_config_dir
     # Initialize the repository registry.
     self.repository_registry = tool_shed.repository_registry.Registry(self)
     #  used for cachebusting -- refactor this into a *SINGLE* UniverseApplication base.
     self.server_starttime = int(time.time())
     print >> sys.stderr, "Tool shed hgweb.config file is: ", self.hgweb_config_manager.hgweb_config
コード例 #6
0
ファイル: app.py プロジェクト: AAFC-MBB/galaxy-1
 def __init__( self, **kwd ):
     log.debug( "python path is: %s", ", ".join( sys.path ) )
     self.name = "tool_shed"
     # Read the tool_shed.ini configuration file and check for errors.
     self.config = config.Configuration( **kwd )
     self.config.check()
     config.configure_logging( self.config )
     # Initialize the  Galaxy datatypes registry.
     self.datatypes_registry = galaxy.datatypes.registry.Registry()
     self.datatypes_registry.load_datatypes( self.config.root, self.config.datatypes_config )
     # Initialize the Tool Shed repository_types registry.
     self.repository_types_registry = tool_shed.repository_types.registry.Registry()
     # Initialize the RepositoryGridFilterManager.
     self.repository_grid_filter_manager = RepositoryGridFilterManager()
     # Determine the Tool Shed database connection string.
     if self.config.database_connection:
         db_url = self.config.database_connection
     else:
         db_url = "sqlite:///%s?isolation_level=IMMEDIATE" % self.config.database
     # Initialize the Tool Shed database and check for appropriate schema version.
     from galaxy.webapps.tool_shed.model.migrate.check import create_or_verify_database
     create_or_verify_database( db_url, self.config.database_engine_options )
     # Set up the Tool Shed database engine and ORM.
     from galaxy.webapps.tool_shed.model import mapping
     self.model = mapping.init( self.config.file_path,
                                db_url,
                                self.config.database_engine_options )
     # Initialize the Tool Shed security helper.
     self.security = security.SecurityHelper( id_secret=self.config.id_secret )
     # initialize the Tool Shed tag handler.
     self.tag_handler = CommunityTagManager( self )
     # Initialize the Tool Shed tool data tables.  Never pass a configuration file here
     # because the Tool Shed should always have an empty dictionary!
     self.tool_data_tables = galaxy.tools.data.ToolDataTableManager( self.config.tool_data_path )
     self.genome_builds = GenomeBuilds( self )
     from galaxy import auth
     self.auth_manager = auth.AuthManager( self )
     # Citation manager needed to load tools.
     from galaxy.managers.citations import CitationsManager
     self.citations_manager = CitationsManager( self )
     # The Tool Shed makes no use of a Galaxy toolbox, but this attribute is still required.
     self.toolbox = tools.ToolBox( [], self.config.tool_path, self )
     # Initialize the Tool Shed security agent.
     self.security_agent = self.model.security_agent
     # The Tool Shed makes no use of a quota, but this attribute is still required.
     self.quota_agent = galaxy.quota.NoQuotaAgent( self.model )
     # TODO: Add OpenID support
     self.openid_providers = OpenIDProviders()
     # Initialize the baseline Tool Shed statistics component.
     self.shed_counter = self.model.shed_counter
     # Let the Tool Shed's HgwebConfigManager know where the hgweb.config file is located.
     self.hgweb_config_manager = self.model.hgweb_config_manager
     self.hgweb_config_manager.hgweb_config_dir = self.config.hgweb_config_dir
     # Initialize the repository registry.
     self.repository_registry = tool_shed.repository_registry.Registry( self )
     #  used for cachebusting -- refactor this into a *SINGLE* UniverseApplication base.
     self.server_starttime = int(time.time())
     log.debug( "Tool shed hgweb.config file is: %s", self.hgweb_config_manager.hgweb_config )
コード例 #7
0
def main():
    configure_logging()
    parser = argparse.ArgumentParser()
    parser.add_argument('command', choices=['create', 'load'])
    args = parser.parse_args()
    if args.command == 'create':
        create_session_file()
    elif args.command == 'load':
        load_session_file()
コード例 #8
0
 def __init__( self, **kwargs ):
     print >> sys.stderr, "python path is: " + ", ".join( sys.path )
     # Read config file and check for errors
     self.config = config.Configuration( **kwargs )
     self.config.check()
     config.configure_logging( self.config )
     # Set up sequencer actions registry
     self.sequencer_actions_registry = galaxy.webapps.demo_sequencer.registry.Registry( self.config.root, self.config.sequencer_actions_config )
     # Security helper
     self.security = security.SecurityHelper( id_secret=self.config.id_secret )
コード例 #9
0
def main():
    config.configure_logging('analyze.log')

    # Load top 25 from file
    with open(config.top_20_save_file, 'r') as f:
        freader = csv.reader(f)
        top_20 = [(row[0], row[1]) for row in freader]

    # Authorize tweepy
    with open(config.tweepy_credentials_file, 'r') as f:
        creds = json.load(f)
        auth = OAuthHandler(creds['consumerKey'], creds['consumerSecret'])
        auth.set_access_token(creds['accessToken'], creds['accessTokenSecret'])

    api = tweepy.API(auth)

    # Instantiate analyzer
    analyzer = SentimentIntensityAnalyzer()

    # Connect to database
    with open(config.cloudant_credentials_File, 'r') as f:
        creds = json.load(f)
        conn = cloudant.Cloudant(creds['username'],
                                 creds['password'],
                                 url='https://' + creds['host'],
                                 connect=True)

    db = conn['sentiments']

    total = 0
    for coin in top_20:
        symb = coin[0]
        name = coin[1]
        # search for and analyze tweets regarding current currency
        tweets = search_and_analyze(api, analyzer, symb, name)
        for t in tweets:
            logging.info('Adding record: %s...', str(t))
            data = {'datetime': t[0], 'symbol': t[1], 'sentiment': t[2]}

            # dont exceed rate limit
            if (total + 1) % 10 == 0:
                time.sleep(1)

            tweet_doc = db.create_document(data)
            dt = datetime.utcnow()
            if tweet_doc.exists():
                logging.info('SUCCESS.')
            else:
                logging.error('FAILED.')
            total += 1

    logging.info('Total records inserted: %d', total)
コード例 #10
0
ファイル: notify.py プロジェクト: codycuellar/bill_tracker
def startup():
	parser = argparse.ArgumentParser()
	parser.add_argument('-d', '--debug', action='store_true', default=False,
						help='Enable debug mode.')
	args = parser.parse_args()

	if args.debug:
		cfg.configure_logging(logger, logname='notify',
							  log_level=logging.DEBUG,
							  file_level=logging.DEBUG,
							  console_level=logging.DEBUG)
	else:
		cfg.configure_logging(logger, logname='notify',
							  log_level=logging.INFO,
							  file_level=logging.INFO,
							  console_level=logging.DEBUG)
	main(DEBUG=args.debug)
コード例 #11
0
 def __init__( self, **kwargs ):
     print >> sys.stderr, "python path is: " + ", ".join( sys.path )
     # Read config file and check for errors
     self.config = config.Configuration( **kwargs )
     self.config.check()
     config.configure_logging( self.config )
     # Determine the database url
     if self.config.database_connection:
         db_url = self.config.database_connection
     else:
         db_url = "sqlite://%s?isolation_level=IMMEDIATE" % self.config.database
     # Setup the database engine and ORM
     self.model = galaxy.model.mapping.init( self.config.file_path,
                                             db_url,
                                             self.config.database_engine_options,
                                             create_tables = True )
     # Security helper
     self.security = security.SecurityHelper( id_secret=self.config.id_secret )
コード例 #12
0
ファイル: app.py プロジェクト: Pelonza/Learn2Mine-Main
 def __init__( self, **kwargs ):
     print >> sys.stderr, "python path is: " + ", ".join( sys.path )
     self.name = "tool_shed"
     # Read config file and check for errors
     self.config = config.Configuration( **kwargs )
     self.config.check()
     config.configure_logging( self.config )
     # Set up datatypes registry
     self.datatypes_registry = galaxy.datatypes.registry.Registry()
     self.datatypes_registry.load_datatypes( self.config.root, self.config.datatypes_config )
     # Set up the repository_types registry.
     self.repository_types_registry = tool_shed.repository_types.registry.Registry()
     # Determine the database url
     if self.config.database_connection:
         db_url = self.config.database_connection
     else:
         db_url = "sqlite:///%s?isolation_level=IMMEDIATE" % self.config.database
     # Initialize database / check for appropriate schema version
     from galaxy.webapps.tool_shed.model.migrate.check import create_or_verify_database
     create_or_verify_database( db_url, self.config.database_engine_options )
     # Setup the database engine and ORM
     from galaxy.webapps.tool_shed.model import mapping
     self.model = mapping.init( self.config.file_path,
                                db_url,
                                self.config.database_engine_options )
     # Security helper
     self.security = security.SecurityHelper( id_secret=self.config.id_secret )
     # Tag handler
     self.tag_handler = CommunityTagHandler()
     # Tool data tables - never pass a config file here because the tool shed should always have an empty dictionary!
     self.tool_data_tables = galaxy.tools.data.ToolDataTableManager( self.config.tool_data_path )
     # The tool shed has no toolbox, but this attribute is still required.
     self.toolbox = tools.ToolBox( [], self.config.tool_path, self )
     # Load security policy
     self.security_agent = self.model.security_agent
     self.quota_agent = galaxy.quota.NoQuotaAgent( self.model )
     # TODO: Add OpenID support
     self.openid_providers = OpenIDProviders()
     self.shed_counter = self.model.shed_counter
     # Let the HgwebConfigManager know where the hgweb.config file is located.
     self.hgweb_config_manager = self.model.hgweb_config_manager
     self.hgweb_config_manager.hgweb_config_dir = self.config.hgweb_config_dir
     print >> sys.stderr, "Tool shed hgweb.config file is: ", self.hgweb_config_manager.hgweb_config
コード例 #13
0
ファイル: app.py プロジェクト: iamciera/galaxy-dist
 def __init__(self, **kwargs):
     print >> sys.stderr, "python path is: " + ", ".join(sys.path)
     self.name = "community"
     # Read config file and check for errors
     self.config = config.Configuration(**kwargs)
     self.config.check()
     config.configure_logging(self.config)
     # Set up datatypes registry
     self.datatypes_registry = galaxy.datatypes.registry.Registry()
     self.datatypes_registry.load_datatypes(self.config.root,
                                            self.config.datatypes_config)
     # Determine the database url
     if self.config.database_connection:
         db_url = self.config.database_connection
     else:
         db_url = "sqlite:///%s?isolation_level=IMMEDIATE" % self.config.database
     # Initialize database / check for appropriate schema version
     from galaxy.webapps.community.model.migrate.check import create_or_verify_database
     create_or_verify_database(db_url, self.config.database_engine_options)
     # Setup the database engine and ORM
     from galaxy.webapps.community.model import mapping
     self.model = mapping.init(self.config.file_path, db_url,
                               self.config.database_engine_options)
     # Security helper
     self.security = security.SecurityHelper(
         id_secret=self.config.id_secret)
     # Tag handler
     self.tag_handler = CommunityTagHandler()
     # Tool data tables - never pass a config file here because the tool shed should always have an empty dictionary!
     self.tool_data_tables = galaxy.tools.data.ToolDataTableManager(
         self.config.tool_data_path)
     # The tool shed has no toolbox, but this attribute is still required.
     self.toolbox = tools.ToolBox([], self.config.tool_path, self)
     # Load security policy
     self.security_agent = self.model.security_agent
     self.quota_agent = galaxy.quota.NoQuotaAgent(self.model)
     # TODO: Add OpenID support
     self.openid_providers = OpenIDProviders()
     self.shed_counter = self.model.shed_counter
     # Let the HgwebConfigManager know where the hgweb.config file is located.
     self.hgweb_config_manager = self.model.hgweb_config_manager
     self.hgweb_config_manager.hgweb_config_dir = self.config.hgweb_config_dir
     print >> sys.stderr, "Tool shed hgweb.config file is: ", self.hgweb_config_manager.hgweb_config
コード例 #14
0
def main():
    """ Driver function """
    config.configure_logging('prepare.log')

    logging.info('Beginning preparation of databases.')

    # connect to server
    try:
        logging.info('Connecting to Cloudant server...')
        with open(config.cloudant_credentials_File, 'r') as f:
            creds = json.load(f)

        conn = cloudant.Cloudant(creds['username'],
                                 creds['password'],
                                 url='https://' + creds['host'],
                                 connect=True)
    except Exception:
        # connection failure
        logging.exception('Error connecting to Cloudant server.')
        sys.exit('Exiting.')

    logging.info('Finished connecting to Cloudant server.')

    # create summaries db if it doesnt exist
    if not check_db(conn, 'summaries'):
        create_db_summaries(conn)

    # if sentiments db exists, summarize its data, then delete
    if check_db(conn, 'sentiments'):
        summary.create_summaries(conn)
        logging.info('Deleting database "sentiments"...')
        conn.delete_database('sentiments')
        logging.info('Finished deleting database.')
    create_db_sentiments(conn)

    # Update top 20 file
    top_20.get_top_20()

    logging.info('Finished preparation of databases.')
    logging.info('Ready for sentiment records.')
コード例 #15
0
 def __init__( self, **kwargs ):
     print >> sys.stderr, "python path is: " + ", ".join( sys.path )
     # Read config file and check for errors
     self.config = config.Configuration( **kwargs )
     self.config.check()
     config.configure_logging( self.config )
     # Set up datatypes registry
     self.datatypes_registry = galaxy.datatypes.registry.Registry()
     # TODO: Handle datatypes included in repositories - the following will only load datatypes_conf.xml.
     self.datatypes_registry.load_datatypes( self.config.root, self.config.datatypes_config )
     # Determine the database url
     if self.config.database_connection:
         db_url = self.config.database_connection
     else:
         db_url = "sqlite://%s?isolation_level=IMMEDIATE" % self.config.database
     # Initialize database / check for appropriate schema version
     from galaxy.webapps.community.model.migrate.check import create_or_verify_database
     create_or_verify_database( db_url, self.config.database_engine_options )
     # Setup the database engine and ORM
     from galaxy.webapps.community.model import mapping
     self.model = mapping.init( self.config.file_path,
                                db_url,
                                self.config.database_engine_options )
     # Security helper
     self.security = security.SecurityHelper( id_secret=self.config.id_secret )
     # Tag handler
     self.tag_handler = CommunityTagHandler()
     # Tool data tables
     self.tool_data_tables = galaxy.tools.data.ToolDataTableManager( self.config.tool_data_table_config_path )
     # The tool shed has no toolbox, but this attribute is still required.
     self.toolbox = None
     # Load security policy
     self.security_agent = self.model.security_agent
     self.quota_agent = galaxy.quota.NoQuotaAgent( self.model )
     # TODO: Add OpenID support
     self.openid_providers = OpenIDProviders()
     self.shed_counter = self.model.shed_counter
コード例 #16
0
def bootstrap():
    env = os.environ

    def configure(binder):
        aws_config = config.get_aws_config(env)
        aws_s3_config = config.get_aws_s3_config(env)
        aws_sqs_config = config.get_aws_sqs_config(env)

        s3_client = boto3.client('s3',
                                 endpoint_url=aws_s3_config['endpoint_url'],
                                 **aws_config)
        sqs_resource = boto3.resource(
            'sqs', endpoint_url=aws_sqs_config['endpoint_url'], **aws_config)

        binder.bind(
            "file_storage_client",
            dao.FileStorageClient(client=s3_client,
                                  bucket_name=aws_s3_config['bucket_name']))
        binder.bind(
            "queue_client",
            dao.QueueClient(resource=sqs_resource,
                            queue_name=aws_sqs_config['queue_name']))

        web_client = web.WebClient()
        binder.bind("web_client", web_client)

        competitors_map = dict(
            nl_fonq=competitors.FonqCompetitor(),
            nl_flinders=competitors.FlindersCompetitor(),
            nl_bolia=competitors.BoliaCompetitor(web_client=web_client))

        binder.bind("competitors_map", competitors_map)

    inject.configure(configure)

    config.configure_logging(env)
コード例 #17
0
ファイル: app.py プロジェクト: jmchilton/cloudman
    def __init__(self, **kwargs):
        print "Python version: ", sys.version_info[:2]
        self.PERSISTENT_DATA_VERSION = 3  # Current expected and generated PD version
        self.DEPLOYMENT_VERSION = 2
        cc = CloudConfig(app=self)
        # Get the type of cloud currently running on
        self.cloud_type = cc.get_cloud_type()
        # Create an appropriate cloud connection
        self.cloud_interface = cc.get_cloud_interface(self.cloud_type)
        # Load user data into a local field through a cloud interface
        self.ud = self.cloud_interface.get_user_data()
        # From user data determine if object store (S3) should be used.
        self.use_object_store = self.ud.get("use_object_store", True)
        # From user data determine if block storage (EBS/nova-volume) should be used.
        # (OpenNebula and dummy clouds do not support volumes yet so skip those)
        self.use_volumes = self.ud.get(
            "use_volumes", self.cloud_type not in ['opennebula', 'dummy'])
        # Read config file and check for errors
        self.config = config.Configuration(**kwargs)
        self.config.init_with_user_data(self.ud)
        self.config.check()
        # Setup logging
        self.logger = CMLogHandler(self)
        if "testflag" in self.ud:
            self.TESTFLAG = bool(self.ud['testflag'])
            self.logger.setLevel(logging.DEBUG)
        else:
            self.TESTFLAG = False
            self.logger.setLevel(logging.INFO)

        if "localflag" in self.ud:
            self.LOCALFLAG = bool(self.ud['localflag'])
            self.logger.setLevel(logging.DEBUG)
        else:
            self.LOCALFLAG = False
            self.logger.setLevel(logging.INFO)
        log.addHandler(self.logger)
        config.configure_logging(self.config, self.ud)
        log.debug("Initializing app")
        log.debug("Running on '{0}' type of cloud in zone '{1}' using image '{2}'."
                  .format(self.cloud_type, self.cloud_interface.get_zone(),
                          self.cloud_interface.get_ami()))

        # App-wide object to store messages that need to travel between the back-end
        # and the UI.
        # TODO: Ideally, this should be stored some form of more persistent
        # medium (eg, database, file, session) and used as a simple module (vs. object)
        # but that's hopefully still forthcoming.
        self.msgs = messages.Messages()

        # Check that we actually got user creds in user data and inform user
        if not ('access_key' in self.ud or 'secret_key' in self.ud):
            self.msgs.error("No access credentials provided in user data. "
                            "You will not be able to add any services.")
        # Update user data to include persistent data stored in cluster's bucket, if it exists
        # This enables cluster configuration to be recovered on cluster re-
        # instantiation
        self.manager = None
        if self.use_object_store and 'bucket_cluster' in self.ud:
            log.debug("Getting pd.yaml")
            if misc.get_file_from_bucket(self.cloud_interface.get_s3_connection(),
               self.ud['bucket_cluster'], 'persistent_data.yaml', 'pd.yaml'):
                pd = misc.load_yaml_file('pd.yaml')
                self.ud = misc.merge_yaml_objects(self.ud, pd)
                self.ud = misc.normalize_user_data(self, self.ud)
            else:
                log.debug("Setting deployment_version to {0}".format(self.DEPLOYMENT_VERSION))
                # This is a new cluster so default to the current version
                self.ud['deployment_version'] = self.DEPLOYMENT_VERSION
コード例 #18
0
ファイル: app.py プロジェクト: ddavidovic/cloudman
    def __init__(self, **kwargs):
        print "Python version: ", sys.version_info[:2]
        self.PERSISTENT_DATA_VERSION = 3  # Current expected and generated PD version
        self.DEPLOYMENT_VERSION = 2
        # Instance persistent data file. This file gets created for
        # test/transient cluster types and stores the cluster config. In case
        # of a reboot, read the file to automatically recreate the services.
        self.INSTANCE_PD_FILE = '/mnt/persistent_data-current.yaml'
        cc = CloudConfig(app=self)
        # Get the type of cloud currently running on
        self.cloud_type = cc.get_cloud_type()
        # Create an appropriate cloud connection
        self.cloud_interface = cc.get_cloud_interface(self.cloud_type)
        # Read config file and check for errors
        self.config = config.Configuration(self, kwargs, self.cloud_interface.get_user_data())
        # From user data determine if object store (S3) should be used.
        self.use_object_store = self.config.get("use_object_store", True)
        # From user data determine if block storage (EBS/nova-volume) should be used.
        # (OpenNebula and dummy clouds do not support volumes yet so skip those)
        self.use_volumes = self.config.get(
            "use_volumes", self.cloud_type not in ['opennebula', 'dummy'])
#         self.config.init_with_user_data(self.ud)
        self.config.validate()
        # Setup logging
        self.logger = CMLogHandler()
        if "testflag" in self.config:
            self.TESTFLAG = bool(self.config['testflag'])
            self.logger.setLevel(logging.DEBUG)
        else:
            self.TESTFLAG = False
            self.logger.setLevel(logging.INFO)

        if "localflag" in self.config:
            self.LOCALFLAG = bool(self.config['localflag'])
            self.logger.setLevel(logging.DEBUG)
        else:
            self.LOCALFLAG = False
            self.logger.setLevel(logging.INFO)
        log.addHandler(self.logger)
        config.configure_logging(self.config)
        log.debug("Initializing app")
        log.debug("Running on '{0}' type of cloud in zone '{1}' using image '{2}'."
                  .format(self.cloud_type, self.cloud_interface.get_zone(),
                          self.cloud_interface.get_ami()))

        # App-wide object to store messages that need to travel between the back-end
        # and the UI.
        # TODO: Ideally, this should be stored some form of more persistent
        # medium (eg, database, file, session) and used as a simple module (vs. object)
        # but that's hopefully still forthcoming.
        self.msgs = messages.Messages()

        # App-wide consecutive number generator. Starting at 1, each time `next`
        # is called, get the next integer.
        self.number_generator = misc.get_a_number()

        # Check that we actually got user creds in user data and inform user
        if not ('access_key' in self.config or 'secret_key' in self.config):
            self.msgs.error("No access credentials provided in user data. "
                            "You will not be able to add any services.")
        # Update user data to include persistent data stored in cluster's bucket, if it exists
        # This enables cluster configuration to be recovered on cluster re-
        # instantiation
        self.manager = None
        pd = None
        if self.use_object_store and 'bucket_cluster' in self.config:
            log.debug("Looking for existing cluster persistent data (PD).")
            validate = True if self.cloud_type == 'ec2' else False
            if not self.TESTFLAG and misc.get_file_from_bucket(
                    self.cloud_interface.get_s3_connection(),
                    self.config['bucket_cluster'],
                    'persistent_data.yaml', 'pd.yaml',
                    validate=validate):
                        log.debug("Loading bucket PD file pd.yaml")
                        pd = misc.load_yaml_file('pd.yaml')
        # Have not found the file in the cluster bucket, look on the instance
        if not pd:
            if os.path.exists(self.INSTANCE_PD_FILE):
                log.debug("Loading instance PD file {0}".format(self.INSTANCE_PD_FILE))
                pd = misc.load_yaml_file(self.INSTANCE_PD_FILE)
        if pd:
            self.config.user_data = misc.merge_yaml_objects(self.config.user_data, pd)
            self.config.user_data = misc.normalize_user_data(self, self.config.user_data)
        else:
            log.debug("No PD to go by. Setting deployment_version to {0}."
                      .format(self.DEPLOYMENT_VERSION))
            # This is a new cluster so default to the current deployment version
            self.config.user_data['deployment_version'] = self.DEPLOYMENT_VERSION
コード例 #19
0
ファイル: app.py プロジェクト: JCVI-Cloud/cloudman
 def __init__( self, **kwargs ):
     print "Python version: ", sys.version_info[:2]
     cc = CloudConfig(app=self)
     # Get the type of cloud currently running on
     self.cloud_type = cc.get_cloud_type()
     # Create an approprite cloud connection
     self.cloud_interface = cc.get_cloud_interface(self.cloud_type)
     # Load user data into a local field through a cloud interface
     self.ud = self.cloud_interface.get_user_data()
     # From user data determine if object store (S3) should be used.
     self.use_object_store = self.ud.get("use_object_store", True)
     # Read config file and check for errors
     self.config = config.Configuration( **kwargs )
     self.config.check()
     # Setup logging
     self.logger = CMLogHandler(self)
     if self.ud.has_key("testflag"):
         self.TESTFLAG = bool(self.ud['testflag'])
         self.logger.setLevel(logging.DEBUG)
     else:
         self.TESTFLAG = False
         self.logger.setLevel(logging.INFO)
     
     if self.ud.has_key("localflag"):
         self.LOCALFLAG = bool(self.ud['localflag'])
         self.logger.setLevel(logging.DEBUG)
     else:
         self.LOCALFLAG = False
         self.logger.setLevel(logging.INFO)
     log.addHandler(self.logger)
     config.configure_logging(self.config)
     log.debug( "Initializing app" )
     log.debug("Running on '{0}' type of cloud.".format(self.cloud_type))
     
     # App-wide object to store messages that need to travel between the back-end
     # and the UI. 
     # TODO: Ideally, this should be stored some form of more persistent
     # medium (eg, database, file, session) and used as a simple module (vs. object)
     # but that's hopefully still forthcoming.
     self.msgs = messages.Messages()
     
     # Check that we actually got user creds in user data and inform user
     if not ('access_key' in self.ud or 'secret_key' in self.ud):
         self.msgs.error("No access credentials provided in user data. "
             "You will not be able to add any services.")
     # Update user data to include persistent data stored in cluster's bucket, if it exists
     # This enables cluster configuration to be recovered on cluster re-instantiation
     self.manager = None
     if self.use_object_store and self.ud.has_key('bucket_cluster'):
         log.debug("Getting pd.yaml")
         if misc.get_file_from_bucket(self.cloud_interface.get_s3_connection(), self.ud['bucket_cluster'], 'persistent_data.yaml', 'pd.yaml'):
             pd = misc.load_yaml_file('pd.yaml')
             self.ud = misc.merge_yaml_objects(self.ud, pd)
     if self.ud.has_key('role'):
         if self.ud['role'] == 'master':
             log.info( "Master starting" )
             from cm.util import master
             self.manager = master.ConsoleManager(self)
         elif self.ud['role'] == 'worker':
             log.info( "Worker starting" )
             from cm.util import worker
             self.manager = worker.ConsoleManager(self)
         self.manager.console_monitor.start()
     else:
         log.error("************ No ROLE in %s - this is a fatal error. ************" % paths.USER_DATA_FILE)
コード例 #20
0
def main():
    "Main program"
    #pylint: disable=R0914
    parser = argparse.ArgumentParser(parents=[config.logging_cli()])
    parser.add_argument(
        '--count',
        help='''Number of candidate users to select (default: 100).
                        The actual number of users produced will (almost
                        certainly) be less than this.''',
        type=int,
        default=100)
    parser.add_argument(
        '--min-edits',
        help='Minimum number of edits (default: 0) for selected users.',
        type=int,
        default=0)
    parser.add_argument('--progress',
                        help='log progress every N candidates',
                        type=int,
                        default=1000,
                        metavar='N')

    args = parser.parse_args()
    config.configure_logging(args)

    logger = logging.getLogger('get_controls')

    logger.info("Starting work, job-name = %s", args.job_name)
    start_time = datetime.datetime.now()

    #pylint: disable=C0103
    db = toolforge.connect('enwiki')

    with db.cursor() as cur:
        cur.execute("select max(user_id) from user")
        row = cur.fetchone()
    max_id = row[0]

    candidate_count = 0
    user_count = 0
    duplicate_count = 0
    non_existant_count = 0
    blocked_count = 0
    unicode_error_count = 0
    too_few_edits_count = 0
    user_ids = set()
    while candidate_count < args.count:
        candidate_count += 1
        if candidate_count % args.progress == 0:
            logger.info("processed %d candidates, %d valid control users",
                        candidate_count, user_count)
        user_id = random.randint(1, max_id)
        if user_id in user_ids:
            duplicate_count += 1
            continue
        user_ids.add(user_id)
        with db.cursor() as cur:
            cur.execute(
                """
            select user_name, user_editcount
            from user
            where user_id = %(user_id)s
            """, {'user_id': user_id})
            rows = cur.fetchall()
        if not rows:
            non_existant_count += 1
            continue
        user, editcount = rows[0]
        if editcount < args.min_edits:
            too_few_edits_count += 1
            continue
        with db.cursor() as cur:
            cur.execute(
                "select count(*)from ipblocks where ipb_user = %(user_id)s",
                {'user_id': user_id})
            row = cur.fetchone()
        block_count = row[0]
        if block_count:
            blocked_count += 1
            continue
        try:
            username = user.decode("utf-8")
        except UnicodeError as ex:
            logger.error("Failed to decode %r as utf-8: %s", user, ex)
            unicode_error_count += 1
            continue
        record = {
            'user': username,
            'is_sock': False,
        }
        print(json.dumps(record))
        user_count += 1

    finish_time = datetime.datetime.now()
    elapsed_time = finish_time - start_time
    logger.info(
        "Processed %d users (%d duplicates, %d non-existant, %d blocked, %d too few edits) in %s",
        user_count, duplicate_count, non_existant_count, blocked_count,
        too_few_edits_count, elapsed_time)
    if unicode_error_count:
        logger.error("There were %d unicode errors!", unicode_error_count)
コード例 #21
0
ファイル: qsystem.py プロジェクト: sjrumsby/queue-management
flask_admin.add_link(
    admin.LogoutMenuLink(name='Logout', category='', url="/api/v1/logout/"))

login_manager = LoginManager()
login_manager.init_app(application)
import app.auth

compress = Compress()
compress.init_app(application)

#   Get long running query logger.
logger = logging.getLogger("myapp.sqltime")
logger.setLevel(logging.DEBUG)

#   Configure all logging except basic logging
configure_logging(application)

#  Code to determine all db.engine properties and sub-properties, as necessary.
if False:
    print("==> All DB Engine options")
    for attr in dir(db.engine):
        print("    --> db.engine." + attr + " = " +
              str(getattr(db.engine, attr)))
        # print("db.engine.%s = %s") % (attr, getattr(db.engine, attr))

#  See whether options took.
if print_flag:
    print("==> DB Engine options")
    print("    --> pool size:    " + str(db.engine.pool.size()))
    print("    --> max overflow: " + str(db.engine.pool._max_overflow))
    print("    --> echo:         " + str(db.engine.echo))
コード例 #22
0
"""
Run multiple telegram clients and auth server.
"""
import asyncio
import atexit
import logging

from quart import Quart, render_template, request

from config import DEBUG, configure_logging, MASTER_KEY, PORT
from registry import Registry, registry

configure_logging(level=DEBUG and logging.DEBUG)
logger = logging.getLogger(__name__)
app = Quart(__name__, template_folder='templates')


@app.route('/', methods=['GET', 'POST'])
async def index_view():
    messages = []
    form = await request.form
    logger.debug(f"received form {form}")

    if 'phone' not in form:
        logger.debug(f"show phone form")
        return await render_template('index.html', messages=messages)

    if form.get('master') != MASTER_KEY:
        logger.debug(f"no master key, show index")
        messages.append({'tag': 'alert-danger', 'text': 'invalid master key'})
        return await render_template('index.html', messages=messages)
コード例 #23
0
def main(env):
    config.configure_logging(env)
    eventstore_cfg = config.get_eventstore_config(env)
    loop = asyncio.get_event_loop()
    loop.run_until_complete(run(eventstore_cfg))
コード例 #24
0
#!/usr/bin/env python

import os
import sys

nemesis_root = os.path.dirname(os.path.abspath(__file__)) + "/../"
sys.path.insert(0, nemesis_root)

import config
import helpers

if __name__ == "__main__":
    config.configure_logging()
    helpers.send_emails()
コード例 #25
0
def main():
    "Main program"
    parser = argparse.ArgumentParser(
        epilog='''If neither --archive nor --archive_dir
                                               are given, reads from stdin.''',
        parents=[config.logging_cli()])

    input_group = parser.add_mutually_exclusive_group()
    input_group.add_argument('--archive',
                             help='SPI archive file to read',
                             default=sys.stdin,
                             type=open)
    input_group.add_argument(
        '--archive-dir',
        help='''Directory where SPI archives files can be found.  Each file in
                             that directory will be processed in turn.''',
        type=directory_path)
    parser.add_argument('--out',
                        help='Output file',
                        type=argparse.FileType('w'),
                        default=sys.stdout)

    args = parser.parse_args()
    config.configure_logging(args)

    logger = logging.getLogger('get_socks')
    db = toolforge.connect('enwiki')

    if args.archive_dir:
        paths = args.archive_dir.iterdir()
        input_streams = map(lambda p: p.open(), paths)
    else:
        input_streams = [args.archive]

    logger.info("Starting work, job-name = %s", args.job_name)
    start_time = datetime.datetime.now()

    archive_count = 0
    suspect_count = 0
    non_sock_count = 0
    duplicate_count = 0
    seen_users = set()
    for stream in input_streams:
        archive_count += 1
        logger.info("Starting archive %d: %s", archive_count, stream.name)
        archive = Archive(stream)
        for suspect in archive.get_suspects():
            suspect_count += 1
            user = suspect['user']
            if user in seen_users:
                duplicate_count += 1
                logger.info("Duplicate supressed: %s", user)
                continue
            if is_sock(db, user):
                seen_users.add(user)
                suspect['is_sock'] = True
                print(json.dumps(suspect), file=args.out)
            else:
                non_sock_count += 1
                logger.info("Skipping non-sock: %s", user)

    finish_time = datetime.datetime.now()
    elapsed_time = finish_time - start_time
    logger.info(
        "Done with %d archives, %d suspects, %d socks, %d non-socks, %d duplicates in %s",
        archive_count, suspect_count, len(seen_users), non_sock_count,
        duplicate_count, elapsed_time)
コード例 #26
0
ファイル: __main__.py プロジェクト: brizzbane/ipscan
import logging

from PyQt5.QtCore import QPoint, QSize, Qt, QSettings, QSignalMapper
from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QAction, QMdiArea
from PyQt5.QtGui import QKeySequence

from config import configure_logging
from gui.iprangescan import IpRangeScanGUI

configure_logging()
logger = logging.getLogger('mainwindow')
debug, info, warn, error, critical = logger.debug, logger.info, logger.warn, logger.error, logger.critical


class NetMainWindow(QMainWindow):
    def __init__(self):
        super(NetMainWindow, self).__init__()
        self.mdiArea = QMdiArea()
        self.mdiArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAsNeeded)
        self.mdiArea.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
        self.setCentralWidget(self.mdiArea)
        self.windowMapper = QSignalMapper(self)
        self.windowMapper.mapped[QWidget].connect(self.setActiveSubWindow)
        self.createActions()
        self.createMenus()
        self.createStatusBar()
        self.readSettings()
        self.setWindowTitle('Net')

    def newScan(self):
        child = IpRangeScanGUI(self)
コード例 #27
0
from pprint import pformat

import aiohttp
from debian.deb822 import Deb822
from sanic import Sanic
from sanic.log import LOGGING_CONFIG_DEFAULTS, logger
from sanic.response import file, raw, redirect
from sanic_prometheus import monitor
from urlobject import URLObject

from config import Config, configure_logging
from cran.models import Package, Status
from cran.registry import Registry

log_config = LOGGING_CONFIG_DEFAULTS.copy()
configure_logging(log_config)

app = Sanic(log_config=log_config)
app.config.from_object(Config)
app.registry: Registry = None
app.semaphore = asyncio.Semaphore(1)
app.compile_queue: asyncio.Queue = None


async def pass_through(to):
    resp = await app.http.get(to)
    return raw(
        await resp.read(),
        status=resp.status,
        headers=resp.headers,
        content_type=resp.content_type,