Example #1
0
def iveGotToMoveIt(source,destination, array):
    for x in range(len(array)):
        src = (source+array[x])
        dest = (destination+array[x])
        shutil.copytree(src,dest)
        collectionRes = (src + " =>" + dest)
        log.logger(collectionRes)
Example #2
0
def get_hist_data(code, ktype, start=None, end=None):
    """
    股票历史数据
    :param code: 股票代码
    :param ktype: K线类型
    :param start: 开始时间
    :param end: 结束时间
    :return:
    """
    hist_data_list = []
    df = ts.get_hist_data(code=code, start=start, end=end, ktype=ktype)

    df.reset_index(inplace=True)
    if len(df.index) == 0:
        log.logger().info("no data found for: " + code + " start date: " +
                          start + "end date: " + end + "K type: " + ktype)
        return

    indicator(df)

    size = df.iloc[:, 0].size

    if size == 0:
        return
    df = df.replace([np.inf, -np.inf], np.nan)
    df = df.astype(object).where(pd.notnull(df), None)

    return hist_data_list
Example #3
0
    def view(self, avid, page=1):
        params = {"id": avid, "page": page}
        sign = self.get_sign(params)

        log.logger().info("get: " + self.__urls["view"] + "?" + sign)
        res = self.opener.open(self.__urls["view"] + "?" + sign).read()
        return Subject(json.loads(res.decode()))
Example #4
0
 def push_to_queue(self, movies):
     for movie_json in movies:
         movie = model.MovieQueue.select().where(model.MovieQueue.douban_id == movie_json["id"]).limit(1).first()
         if movie is None:
             model.MovieQueue.create(douban_id=movie_json["id"])
             log.logger().info("{0}, {1} push into queue".format(movie_json["id"], movie_json["title"]))
         else:
             log.logger().info("movie: {0}, {1} exist".format(movie_json["id"], movie_json["title"]))
Example #5
0
def download(file_name, ftp, output, download_path):
	TempFile = open(output+'/'+file_name, "wb") 
	ftp.cwd(download_path)
	start = time.time()
	ftp.retrbinary('RETR '+ file_name, TempFile.write)
	end = time.time()
	TempFile.close()
        log.logger("*****Download operation*****", end-start,file_name, ftp.size(file_name), datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
	print file_name+" is downloaded!"
Example #6
0
def upload(file_name, ftp, upload_path):
	ftp.cwd(upload_path)
	TempFile = open(file_name,"rb")
        start = time.time()
	ftp.storbinary("STOR "+file_name,TempFile)
        end = time.time()
	log.logger("*****Upload operation*****", end-start,file_name, os.stat(file_name).st_size, datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
	TempFile.close()
	print file_name + " is uploaded!"
Example #7
0
    def on_message(self, client, userdata, msg):
        msg_topic = msg.topic
        try:
            msg_payload = msg.payload.decode('utf-8')
        except UnicodeDecodeError:
            msg_payload = f'binary:{dump_hex(msg.payload)}'

        # Using log to record.
        logger().info(f"Received topic: {msg_topic}")
        logger().info(f"Received Msg: {msg_payload}")
Example #8
0
    def register():
        name_entry = name.get()
        age_entry = age.get()
        sex_entry = sex.get()
        email_entry = email.get()
        pass1_entry = pass1.get()

        registerData = [
            name_entry, age_entry, sex_entry, email_entry, pass1_entry
        ]

        logger(registerData)

        login_open()
Example #9
0
def scanFolder(folder):
    badFiles = []
    #flag = 0
    for dirpath, dirnames, filenames in os.walk(folder):

        for filename in [f for f in filenames if f.endswith(find)]:
            #flag = 1

            toLog = (os.path.join(dirpath, filename))
            log.logger(toLog)
            #badFiles.append((dirpath, filename))
            badFiles.append(toLog)
    #print(badFiles)
    if badFiles:
        return badFiles
    def start(self, settings, workflow=None):
        # Log
        identity = "starter_%s" % int(random.random() * 1000)
        logFile = "starter.log"
        #logFile = None
        logger = log.logger(logFile, settings.setLevel, identity)

        # Simple connect
        conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)

        if workflow is not None:
            (workflow_id, workflow_name, workflow_version, \
                child_policy, execution_start_to_close_timeout, \
                input) = self.get_workflow_params(workflow, settings)

            # Start a workflow execution
            try:
                response = conn.start_workflow_execution(settings.domain, workflow_id, \
                                        workflow_name, workflow_version, \
                                        settings.default_task_list, child_policy, \
                                        execution_start_to_close_timeout, input)

                logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))

            except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
                # There is already a running workflow with that ID, cannot start another
                message = ('SWFWorkflowExecutionAlreadyStartedError: There is already ' +
                           'a running workflow with ID %s' % workflow_id)
                print message
                logger.info(message)
Example #11
0
    def test(self, command):
        """Should the alias be triggered by the text?"""
        match = self.re_alias.search(command)
        if match:
            log = logger("client")
            log.debug("Executing the alias {}".format(
                    repr(self.alias)))

            engine = self.sharp_engine
            if "args" not in engine.locals:
                engine.locals["args"] = {}

            args = engine.locals["args"]

            # Copy the groups of this match
            i = 0
            for group in match.groups():
                i += 1
                args[str(i)] = group

            # Copy the named groups
            for name, group in match.groupdict().items():
                engine.locals[name] = group

            # Execute the alias
            self.execute()
            return True

        return False
    def start(self,
              ENV="dev",
              all=None,
              last_updated_since=None,
              docs=None,
              doi_id=None):
        # Specify run environment settings
        settings = settingsLib.get_settings(ENV)

        # Log
        identity = "starter_%s" % int(random.random() * 1000)
        logFile = "starter.log"
        #logFile = None
        logger = log.logger(logFile, settings.setLevel, identity)

        # Simple connect
        conn = boto.swf.layer1.Layer1(settings.aws_access_key_id,
                                      settings.aws_secret_access_key)

        if (all == True):
            # Publish all articles, use SimpleDB as the source
            docs = self.get_docs_from_SimpleDB(ENV)

        elif (doi_id is not None):
            docs = self.get_docs_from_SimpleDB(ENV, doi_id=doi_id)

        elif (last_updated_since is not None):
            # Publish only articles since the last_modified date, use SimpleDB as the source
            docs = self.get_docs_from_SimpleDB(ENV, last_updated_since)

        if (docs):
            for doc in docs:

                document = doc["document"]
                elife_id = doc["elife_id"]

                id_string = elife_id

                # Start a workflow execution
                workflow_id = "PublishArticle_%s" % (id_string)
                workflow_name = "PublishArticle"
                workflow_version = "1"
                child_policy = None
                execution_start_to_close_timeout = None
                input = '{"data": ' + json.dumps(doc) + '}'

                try:
                    response = conn.start_workflow_execution(
                        settings.domain, workflow_id, workflow_name,
                        workflow_version, settings.default_task_list,
                        child_policy, execution_start_to_close_timeout, input)

                    logger.info('got response: \n%s' %
                                json.dumps(response, sort_keys=True, indent=4))

                except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
                    # There is already a running workflow with that ID, cannot start another
                    message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
                    print message
                    logger.info(message)
 def start(self, ENV = "dev"):
   # Specify run environment settings
   settings = settingsLib.get_settings(ENV)
   
   ping_marker_id = "cron_NewS3FullArticle"
   
   # Log
   logFile = "starter.log"
   logger = log.logger(logFile, settings.setLevel, ping_marker_id)
   
   # Data provider
   db = dblib.SimpleDB(settings)
   db.connect()
   
   # SWF meta data provider
   swfmeta = swfmetalib.SWFMeta(settings)
   swfmeta.connect()
   
   last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(workflow_id = ping_marker_id)
 
   # Start a ping workflow as a marker
   self.start_ping_marker(ping_marker_id, ENV)
 
   # Check for S3 XML files that were updated since the last run
   date_format = "%Y-%m-%dT%H:%M:%S.000Z"
   
   # Quick hack - subtract 15 minutes, 
   #   the time between S3Monitor running and this cron starter
   if last_startTimestamp is not None:
     last_startTimestamp_minus_15 = last_startTimestamp - (60*15)
   else:
     # On the first run ever the last start timestamp will be unavailable
     last_startTimestamp_minus_15 = time.gmtime() - (60*15)
     
   time_tuple = time.gmtime(last_startTimestamp_minus_15)
   
   last_startDate = time.strftime(date_format, time_tuple)
   
   logger.info('last run %s' % (last_startDate))
   
   # 
   file_list = db.elife_get_production_final_delivery_S3_file_items(last_updated_since = last_startDate)
   
   logger.info('Full Article files updated since %s: %s' % (last_startDate, str(len(file_list))))
 
   if(len(file_list) <= 0):
     # No new XML
     pass
   else:
     # Found new XML files
     
     # Start a PackagePOA starter
     try:
       starter_name = "starter_PublishFullArticleZip"
       self.import_starter_module(starter_name, logger)
       s = self.get_starter_module(starter_name, logger)
       s.start(ENV = ENV, last_updated_since = last_startDate)
     except:
       logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
       logger.exception('')
Example #14
0
File: etl.py Project: pjsier/psm
def main():
    os.chdir(os.path.dirname(__file__))
    logger = log.logger()
    info('Starting ETL of LEIE data.')

    # Figure out where we put data
    datadir = get_datadir()
    dbdir = get_dbdir()

    # Get a database connection, create db if needed
    conn = model.LEIE("development",
                      db_conf_file=os.path.join(dbdir, "dbconf.yml"))

    # Make sure the db schema is up to date, create tables, etc.
    conn.migrate()

    assert os.path.exists(datadir)

    # Do our ETL
    download(datadir, conn)
    excl = Exclusions(conn)
    excl.etl_from_dir(datadir)
    rein = Reinstatements(conn)
    rein.etl_from_dir(datadir)

    # Close the db connection
    conn.close()

    info('Finished ETL of LEIE data.')
 def __init__(self, version):
  self.version = version
  self.stopped = False
  self.tc = 0
  self.th = {}
  self.sjid = u'%s@%s/%s' % (config.USER, config.SERVER, config.RESOURCE)
  self.jid = jid.JID(self.sjid)
  self.onauthd = None
  self.a = XMPPAuthenticator(self.jid, config.PASSWD)
  self.c = ClientFactory(self.a, self)
  self.c.maxRetries = 0
  self.c.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self.authd)
  self.c.addBootstrap(xmlstream.INIT_FAILED_EVENT, self.initfailed)
  self.c.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT, self.onConnected)
  self.c.addBootstrap(xmlstream.STREAM_END_EVENT, self.onDisconnected)
  self.x = None
  self.log = log.logger()
  self.msghandlers = []
  port = config.PORT
  if config.CONNECT_SERVER: server = config.CONNECT_SERVER
  else: server = config.SERVER
  if config.USE_SSL:
   from twisted.internet import ssl
   reactor.connectSSL(server, port, self.c, ssl.ClientContextFactory())
  else: reactor.connectTCP(server, port, self.c)
Example #16
0
	def start(self, ENV = "dev", workflow = "Ping"):
		# Specify run environment settings
		settings = settingsLib.get_settings(ENV)
		
		# Log
		identity = "starter_%s" % int(random.random() * 1000)
		logFile = "starter.log"
		#logFile = None
		logger = log.logger(logFile, settings.setLevel, identity)
		
		# Simple connect
		conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
		if(workflow):
			(workflow_id, workflow_name, workflow_version, child_policy, execution_start_to_close_timeout, input) = self.get_workflow_params(workflow)
	
			logger.info('Starting workflow: %s' % workflow_id)
			try:
				response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
	
				logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
				
			except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
				# There is already a running workflow with that ID, cannot start another
				message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
				print message
				logger.info(message)
def main():
    parser = OptionParser()
    parser.add_option("-e", "--env", default="dev", action="store", type="string", dest="env",
                      help="set the environment to run, either dev or live")
    (options, args) = parser.parse_args()
    if options.env:
        env = options.env

    global settings
    settings = settings_lib.get_settings(env)

    log_file = "process_dashboard_queue.log"
    global logger
    logger = log.logger(log_file, settings.log_level)

    # Simple connect
    queue = get_queue()

    pool = Pool(settings.event_queue_pool_size)

    while True:
        messages = queue.get_messages(num_messages=settings.event_queue_message_count, visibility_timeout=60,
                                      wait_time_seconds=20)
        if messages is not None:
            logger.info(str(len(messages)) + " message received")
            pool.map(process_message, messages)
        else:
            logger.info("No messages received")
Example #18
0
    def start(self, settings, info=None):

        # TODO : much of this is common to many starters and could probably be streamlined
        # Log
        identity = "starter_%s" % int(random.random() * 1000)
        log_file = "starter.log"
        # logFile = None
        logger = log.logger(log_file, settings.setLevel, identity)

        if info.file_name is None:
            logger.error("Did not get a filename")
            return

        # Simple connect
        conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)

        # Start a workflow execution
        workflow_id = "NewS3File_%s" % info.file_name + str(int(random.random() * 1000))
        workflow_name = "NewS3File"
        workflow_version = "1"
        child_policy = None
        execution_start_to_close_timeout = None
        workflow_input = json.dumps(info, default=lambda ob: ob.__dict__)

        try:
            response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version,
                                                     settings.default_task_list, child_policy,
                                                     execution_start_to_close_timeout, workflow_input)

            logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))

        except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
            # There is already a running workflow with that ID, cannot start another
            message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
            logger.info(message)
Example #19
0
 def __init__(self, loginId=None, password=None):
     self._loginId = loginId
     self._password = password
     self._logined = False
     self.logger = log.logger(loginId)
     self.opener = buildOpener("resource/cookie_%s.log" % loginId)
     self.ignore_error = False
Example #20
0
 def __init__(self, name, config=None, options=None, args=None, logname=None, **kwargs):
     self.name = name
     self.__dict__.update(kwargs)
     if not options:
         options, args = _parser.parser('cskpUPufmvVFw').parse_args()
         args = [_decode(arg) for arg in args]
     self.options, self.args = options, args
     self.name = name
     self.logname = logname
     self.ql = None
     config2 = _config.CONFIG.copy()
     if config:
         config2.update(config)
     if getattr(options, 'config_file', None):
         options.config_file = os.path.abspath(options.config_file) # XXX useful during testing. could be generalized with optparse callback?
     if not getattr(options, 'service', True):
         options.foreground = True
     self.config = _config.Config(config2, service=name, options=options)
     self.config.data['server_socket'] = os.getenv("KOPANO_SOCKET") or self.config.data['server_socket']
     if getattr(options, 'worker_processes', None):
         self.config.data['worker_processes'] = options.worker_processes
     self.log = _log.logger(self.logname or self.name, options=self.options, config=self.config) # check that this works here or daemon may die silently XXX check run_as_user..?
     for msg in self.config.warnings:
         self.log.warn(msg)
     if self.config.errors:
         for msg in self.config.errors:
             self.log.error(msg)
         sys.exit(1)
     self.stats = collections.defaultdict(int, {'errors': 0})
     self._server = None
Example #21
0
def decide(ENV = "dev"):
	# Specify run environment settings
	settings = settingsLib.get_settings(ENV)
	
	# Decider event history length requested
	maximum_page_size = 100
	
	# Log
	identity = "decider_%s" % int(random.random() * 1000)
	logFile = "decider.log"
	#logFile = None
	logger = log.logger(logFile, settings.setLevel, identity)
	
	# Simple connect
	conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)

	token = None

	# Poll for a decision task
	while(True):
		if(token == None):
			logger.info('polling for decision...')
			
			decision = conn.poll_for_decision_task(settings.domain, settings.default_task_list, identity, maximum_page_size)
			
			# Check for a nextPageToken and keep polling until all events are pulled
			decision = get_all_paged_events(decision, conn, settings.domain, settings.default_task_list, identity, maximum_page_size)
			
			token = get_taskToken(decision)
			
			logger.info('got decision: [json omitted], token %s' % token)
			#logger.info('got decision: \n%s' % json.dumps(decision, sort_keys=True, indent=4))

			if(token != None):
				# Get the workflowType and attempt to do the work
				workflowType = get_workflowType(decision)
				if(workflowType != None):

					logger.info('workflowType: %s' % workflowType)

					# Instantiate and object for the workflow using eval
					# Build a string for the object name
					workflow_name = get_workflow_name(workflowType)
					
					# Attempt to import the module for the workflow
					if(import_workflow_class(workflow_name)):
						# Instantiate the workflow object
						workflow_object = get_workflow_object(workflow_name, settings, logger, conn, token, decision, maximum_page_size)
				
						# Process the workflow
						success = workflow_object.do_workflow()
						
						# Print the result to the log
						logger.info('%s success %s' % (workflow_name, success))
						
					else:
						logger.info('error: could not load object %s\n' % workflow_name)
						
		# Reset and loop
		token = None
 def start(self, ENV = "dev"):
   # Specify run environment settings
   settings = settingsLib.get_settings(ENV)
   
   ping_marker_id = "cron_NewS3FiguresPDF"
   
   # Log
   logFile = "starter.log"
   logger = log.logger(logFile, settings.setLevel, ping_marker_id)
   
   # Data provider
   db = dblib.SimpleDB(settings)
   db.connect()
   
   # SWF meta data provider
   swfmeta = swfmetalib.SWFMeta(settings)
   swfmeta.connect()
   
   # Default, if cron never run before
   last_startTimestamp = 0
   
   # Get the last time this cron was run
   last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(workflow_id = ping_marker_id)
 
   # Start a ping workflow as a marker
   self.start_ping_marker(ping_marker_id, ENV)
 
   # Check for S3 PDF files that were updated since the last run
   date_format = "%Y-%m-%dT%H:%M:%S.000Z"
   
   # Quick hack - subtract 30 minutes to not ignore the top of the hour
   #   the time between S3Monitor running and this cron starter
   last_startTimestamp_minus_30 = last_startTimestamp - (60*30)
   if(last_startTimestamp_minus_30 < 0):
     last_startTimestamp_minus_30 = 0
   time_tuple = time.gmtime(last_startTimestamp_minus_30)
   
   last_startDate = time.strftime(date_format, time_tuple)
   
   logger.info('last run %s' % (last_startDate))
   
   S3_item_list = db.elife_get_article_S3_file_items(file_data_type = "figures", latest = True, last_updated_since = last_startDate)
   
   logger.info('Figures PDF files updated since %s: %s' % (last_startDate, str(len(S3_item_list))))
 
   if(len(S3_item_list) <= 0):
     # No new PDF
     pass
   else:
     # Found new PDF files
     
     # Start a PublishPDF starter
     try:
       starter_name = "starter_PublishFiguresPDF"
       self.import_starter_module(starter_name, logger)
       s = self.get_starter_module(starter_name, logger)
       s.start(ENV = ENV, last_updated_since = last_startDate)
     except:
       logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
       logger.exception('')
Example #23
0
 def start(self, ENV = "dev", workflow = "S3Monitor"):
   # Specify run environment settings
   settings = settingsLib.get_settings(ENV)
   
   # Log
   identity = "starter_%s" % int(random.random() * 1000)
   logFile = "starter.log"
   #logFile = None
   logger = log.logger(logFile, settings.setLevel, identity)
   
   # Simple connect
   conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
   if(workflow):
     (workflow_id, workflow_name, workflow_version, child_policy, execution_start_to_close_timeout, input) = self.get_workflow_params(workflow, settings)
 
     logger.info('Starting workflow: %s' % workflow_id)
     try:
       response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
 
       logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
       
     except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
       # There is already a running workflow with that ID, cannot start another
       message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
       print message
       logger.info(message)
Example #24
0
def detach(database):

    print("Detaching ", database + ".dbo")
    try:
        cursor2.execute(sqlNoResultsExpected)
        cursor.execute(sqlDETACH + database + sqlDETACHend)
        #print(sqlDETACH+database+sqlDETACHend)
        #print ("Great sucess?")
        collectionLog = (database, "successfully detached")
        log.logger(collectionLog)
        print(database, "succesfully detached")
    except pyodbc.ProgrammingError as err:
        print(err)
        print("The Database is already detached")
        collErr = (database, "Already appears to be detached see err \n", err)
        log.logger(collErr)
	def start(self, ENV = "dev"):
		# Specify run environment settings
		settings = settingsLib.get_settings(ENV)
		
		# Log
		identity = "starter_%s" % int(random.random() * 1000)
		logFile = "starter.log"
		#logFile = None
		logger = log.logger(logFile, settings.setLevel, identity)
		
		# Simple connect
		conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
	
		start = True
	
		# Start a workflow execution
		workflow_id = "LensIndexPublish"
		workflow_name = "LensIndexPublish"
		workflow_version = "1"
		child_policy = None
		execution_start_to_close_timeout = str(60*45)
		input = None
	
		if(start):
			response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
	
			logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
 def __init__(self, version):
  #self.lastsent = {}
  #self.queues = {}
  self.version = version
  self.tc = 0
  self.th = {}
  self.sjid = u'%s@%s/%s' % (config.USER, config.SERVER, config.RESOURCE)
  self.jid = jid.JID(self.sjid)
  self.onauthd = None
  self.tryingSRV = True
  self.tryingNonSASL = False
  self.tryingSASL = True
  #self.c = client.basicClientFactory(self.jid, config.PASSWD)
  a = XMPPAndGoogleAuthenticator(self.jid, config.PASSWD, self)
  self.c = ClientFactory(a,self)
  self.c.maxRetries = 0
  self.c.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self.authd)
  self.c.addBootstrap(xmlstream.INIT_FAILED_EVENT, self.initfailed)
  self.c.addBootstrap(xmlstream.STREAM_CONNECTED_EVENT, self.onConnected)
  self.c.addBootstrap(xmlstream.STREAM_END_EVENT, self.onDisconnected)
  self.x = None
  self.log = log.logger()
  self.handlers = []
  self.msghandlers = []
  #self.send_from_queue(True)
  #self.clean_queue()
  port = config.PORT
  server = config.CONNECT_SERVER
  if not server: server = config.SERVER
  self.connector = XMPPClientConnector(reactor, server, self.c, port)
  self.connector.connect()
	def start(self, ENV = "dev"):
		# Specify run environment settings
		settings = settingsLib.get_settings(ENV)
		
		# Log
		identity = "starter_%s" % int(random.random() * 1000)
		logFile = "starter.log"
		#logFile = None
		logger = log.logger(logFile, settings.setLevel, identity)
		
		# Simple connect
		conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
	
		start = True
	
		# Start a workflow execution
		workflow_id = "LensIndexPublish"
		workflow_name = "LensIndexPublish"
		workflow_version = "1"
		child_policy = None
		execution_start_to_close_timeout = str(60*45)
		input = None
	
		if(start):
			response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
	
			logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
    def start(self, settings, limit=None):
        # Log
        identity = "starter_%s" % int(random.random() * 1000)
        logFile = "starter.log"
        logger = log.logger(logFile, settings.setLevel, identity)

        # Simple connect
        conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)

        # Start a workflow execution
        workflow_id = "SendQueuedEmail"
        workflow_name = "SendQueuedEmail"
        workflow_version = "1"
        child_policy = None
        execution_start_to_close_timeout = None

        if limit:
            input = '{"data": {"limit": "' + limit + '"}}'
        else:
            input = None

        try:
            response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name,
                                                     workflow_version, settings.default_task_list,
                                                     child_policy, execution_start_to_close_timeout,
                                                     input)

            logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))

        except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
            # There is already a running workflow with that ID, cannot start another
            message = ('SWFWorkflowExecutionAlreadyStartedError: There is already ' +
                       'a running workflow with ID %s' % workflow_id)
            print message
            logger.info(message)
Example #29
0
    def test(self, command):
        """Should the alias be triggered by the text?"""
        match = self.re_alias.search(command)
        if match:
            log = logger("client")
            log.debug("Executing the alias {}".format(repr(self.alias)))

            engine = self.sharp_engine
            if "args" not in engine.locals:
                engine.locals["args"] = {}

            args = engine.locals["args"]

            # Copy the groups of this match
            i = 0
            for group in match.groups():
                i += 1
                args[str(i)] = group

            # Copy the named groups
            for name, group in match.groupdict().items():
                engine.locals[name] = group

            # Execute the alias
            self.execute()
            return True

        return False
Example #30
0
 def start(self, ENV = "dev"):
   # Specify run environment settings
   settings = settingsLib.get_settings(ENV)
   
   ping_marker_id = "cron_NewS3Suppl"
   
   # Log
   logFile = "starter.log"
   logger = log.logger(logFile, settings.setLevel, ping_marker_id)
   
   # Data provider
   db = dblib.SimpleDB(settings)
   db.connect()
   
   # SWF meta data provider
   swfmeta = swfmetalib.SWFMeta(settings)
   swfmeta.connect()
   
   # Default, if cron never run before
   last_startTimestamp = 0
   
   # Get the last time this cron was run
   last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(workflow_id = ping_marker_id)
 
   # Start a ping workflow as a marker
   self.start_ping_marker(ping_marker_id, ENV)
 
   # Check for S3 Suppl files that were updated since the last run
   date_format = "%Y-%m-%dT%H:%M:%S.000Z"
   
   # Quick hack - subtract 30 minutes to not ignore the top of the hour
   #   the time between S3Monitor running and this cron starter
   last_startTimestamp_minus_30 = last_startTimestamp - (60*30)
   if(last_startTimestamp_minus_30 < 0):
     last_startTimestamp_minus_30 = 0
   time_tuple = time.gmtime(last_startTimestamp_minus_30)
   
   last_startDate = time.strftime(date_format, time_tuple)
   
   logger.info('last run %s' % (last_startDate))
   
   S3_item_list = db.elife_get_article_S3_file_items(file_data_type = "suppl", latest = True, last_updated_since = last_startDate)
   
   logger.info('Suppl files updated since %s: %s' % (last_startDate, str(len(S3_item_list))))
 
   if(len(S3_item_list) <= 0):
     # No new SVG
     pass
   else:
     # Found new SVG files
     
     # Start a PublishSVG starter
     try:
       starter_name = "starter_PublishSuppl"
       self.import_starter_module(starter_name, logger)
       s = self.get_starter_module(starter_name, logger)
       s.start(ENV = ENV, last_updated_since = last_startDate)
     except:
       logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
       logger.exception('')
Example #31
0
    def handle(self, msg):

        self.chat_id = msg['chat']['id']  #WTF?
        self.command = msg['text']

        print(self.command, self.chat_id)

        data_logger = log.logger()
        data_logger.do_log(self.chat_id, self.command)

        #Check if this ID is registered
        if (self.password.check_id(self.chat_id) == True):
            print("Id rilevato in lista")
            self.id_check = 1
        else:
            self.id_check = 0

        if (self.id_check == 0):
            if (self.command == local_opt.PSWD):
                self.bot.sendMessage(self.chat_id, 'Chat registrata!')
                self.password.insert_db_file(self.chat_id)
                self.password.load_db_file()
            else:
                self.bot.sendMessage(self.chat_id,
                                     'Chat non registrata. Inserire password')

        elif self.id_check == 1:
            if self.command == '/get':
                self.img_take(self.chat_id)

            if self.command == '/start':
                self.send_periodic = True

            if self.command == '/stop':
                self.send_periodic = False
    def start(self, settings):
        # Log
        identity = "starter_%s" % int(random.random() * 1000)
        logFile = "starter.log"
        #logFile = None
        logger = log.logger(logFile, settings.setLevel, identity)

        # Simple connect
        conn = boto.swf.layer1.Layer1(settings.aws_access_key_id,
                                      settings.aws_secret_access_key)

        # Start a workflow execution
        workflow_id = "DepositCrossref"
        workflow_name = "DepositCrossref"
        workflow_version = "1"
        child_policy = None
        execution_start_to_close_timeout = None
        input = None

        try:
            response = conn.start_workflow_execution(
                settings.domain, workflow_id, workflow_name, workflow_version,
                settings.default_task_list, child_policy,
                execution_start_to_close_timeout, input)

            logger.info('got response: \n%s' %
                        json.dumps(response, sort_keys=True, indent=4))

        except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
            # There is already a running workflow with that ID, cannot start another
            message = (
                'SWFWorkflowExecutionAlreadyStartedError: There is already ' +
                'a running workflow with ID %s' % workflow_id)
            print message
            logger.info(message)
def main(flag):
    global settings
    global env
    parser = OptionParser()
    parser.add_option("-e", "--env", default="dev", action="store", type="string", dest="env",
                      help="set the environment to run, either dev or live")
    (options, args) = parser.parse_args()
    if options.env:
        env = options.env

    settings = settings_lib.get_settings(env)
    env = env

    log_file = "queue_workflow_starter.log"
    global logger
    logger = log.logger(log_file, settings.setLevel)

    # Simple connect
    queue = get_queue()

    while flag.green():
        messages = queue.get_messages(1, visibility_timeout=60,
                                      wait_time_seconds=20)
        if messages:
            logger.info(str(len(messages)) + " message received")
            logger.info('message contents: %s', messages[0])
            process_message(messages[0])
        else:
            logger.debug("No messages received")

    logger.info("graceful shutdown")
Example #34
0
    def start(self, settings, document=None, last_updated_since=None):
        # Log
        identity = "starter_%s" % int(random.random() * 1000)
        logFile = "starter.log"
        #logFile = None
        logger = log.logger(logFile, settings.setLevel, identity)

        # Simple connect
        conn = boto.swf.layer1.Layer1(settings.aws_access_key_id,
                                      settings.aws_secret_access_key)

        docs = None

        if document is not None:
            docs = []
            doc = {}
            doc["document"] = document
            docs.append(doc)

        elif last_updated_since is not None:
            # Publish only articles since the last_modified date, use SimpleDB as the source
            docs = self.get_docs_from_SimpleDB(
                settings, last_updated_since=last_updated_since)

        if docs:
            for doc in docs:

                document = doc["document"]

                # Get a unique id from the document name for the workflow_id
                id_string = None
                try:
                    id_string = document.split("_")[0]
                except:
                    id_string = "000"

                # Start a workflow execution
                workflow_id = "PackagePOA_%s" % (id_string)
                workflow_name = "PackagePOA"
                workflow_version = "1"
                child_policy = None
                execution_start_to_close_timeout = None
                input = '{"data": ' + json.dumps(doc) + '}'

                try:
                    response = conn.start_workflow_execution(
                        settings.domain, workflow_id, workflow_name,
                        workflow_version, settings.default_task_list,
                        child_policy, execution_start_to_close_timeout, input)

                    logger.info('got response: \n%s' %
                                json.dumps(response, sort_keys=True, indent=4))

                except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
                    # There is already a running workflow with that ID, cannot start another
                    message = (
                        'SWFWorkflowExecutionAlreadyStartedError: There is already '
                        + 'a running workflow with ID %s' % workflow_id)
                    print message
                    logger.info(message)
    def start(self, settings, document=None, last_updated_since=None):
        # Log
        identity = "starter_%s" % int(random.random() * 1000)
        logFile = "starter.log"
        #logFile = None
        logger = log.logger(logFile, settings.setLevel, identity)

        # Simple connect
        conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)

        docs = None

        if document is not None:
            docs = []
            doc = {}
            doc["document"] = document
            docs.append(doc)

        elif last_updated_since is not None:
            # Publish only articles since the last_modified date, use SimpleDB as the source
            docs = self.get_docs_from_SimpleDB(settings, last_updated_since=last_updated_since)

        if docs:
            for doc in docs:

                document = doc["document"]

                # Get a unique id from the document name for the workflow_id
                id_string = None
                try:
                    id_string = document.split("_")[0]
                except:
                    id_string = "000"

                # Start a workflow execution
                workflow_id = "PackagePOA_%s" % (id_string)
                workflow_name = "PackagePOA"
                workflow_version = "1"
                child_policy = None
                execution_start_to_close_timeout = None
                input = '{"data": ' + json.dumps(doc) + '}'

                try:
                    response = conn.start_workflow_execution(settings.domain, workflow_id,
                                                             workflow_name, workflow_version,
                                                             settings.default_task_list,
                                                             child_policy,
                                                             execution_start_to_close_timeout,
                                                             input)

                    logger.info('got response: \n%s' %
                                json.dumps(response, sort_keys=True, indent=4))

                except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
                    # There is already a running workflow with that ID, cannot start another
                    message = ('SWFWorkflowExecutionAlreadyStartedError: There is already ' +
                               'a running workflow with ID %s' % workflow_id)
                    print message
                    logger.info(message)
Example #36
0
    def get_movies(self, start_num, tag, data = {
        'type': 'movie',
        'sort': 'recommend',
        'page_limit': options.page_limit,
        }):

        data['tag'] = tag
        for x in range(start_num, options.max_page):
            self.x = x
            data['page_start'] = x * options.page_limit
            params = urllib.parse.urlencode(data)
            log.logger().info("get: " + self.__urls["search"] + "?" + params)
            res = self.opener.open(self.__urls["search"] + "?" + params, timeout = self.timeout)
            result = res.read()
            subjects_json = json.loads(result.decode())
            self.push_to_queue(subjects_json["subjects"])
            sleep(1)
Example #37
0
 def execute(self):
     """Execute the alias."""
     try:
         self.sharp_engine.execute(self.action, variables=True)
     except Exception:
         log = logger("client")
         log.exception("An error occurred while executing the alias " \
                 "{}".format(repr(self.alias)))
Example #38
0
 def execute(self):
     """Execute the alias."""
     try:
         self.sharp_engine.execute(self.action, variables=True)
     except Exception:
         log = logger("client")
         log.exception("An error occurred while executing the alias " \
                 "{}".format(repr(self.alias)))
Example #39
0
 def __init__(self):
     self.logger = logger("")
     begin()
     self.settings = Settings(self)
     self.worlds = {}
     self.default_world = None
     self.level = Level.engine
     self.logger.info("CocoMUD engine started")
Example #40
0
def scanFolderIgnore(folder):
    #flag = 0
    badFiles = []
    for dirpath, dirs, filenames in os.walk(folder, topdown=True):
        dirs[:] = [d for d in dirs if d not in settings.exclude]
        for filename in [f for f in filenames if f.endswith(find)]:
            #flag =1

            toLog = (os.path.join(dirpath, filename))
            log.logger(toLog)
            #badFolder = (os.path.join(dirpath))

            #badFiles.append((dirpath,filename))
            badFiles.append(toLog)
    #print(badFiles)
    if badFiles:
        return badFiles
Example #41
0
def main(dirname=None):
    logger = log.logger()
    logger.info('Running model.py directly to produce schema/goose output.')
    conn = SQL(connect=False)
    fnames = conn.goose_write(dirname)
    logger.info(
        'Finished running model.py directly to produce schema/goose output.')
    return fnames
Example #42
0
def main():
    global logger
    cl_opts = parse_command_line()
    log.setup(cl_opts.job_dir)
    logger = log.logger()
    logger.info('=== New session ===')
    j = job.Job(cl_opts.job_dir, cl_opts.restart)
    j.send_all()
    def start(self, ENV = "dev", bucket = None, document = None):
        # Specify run environment settings
        settings = settingsLib.get_settings(ENV)
        
        # Log
        identity = "starter_%s" % int(random.random() * 1000)
        logFile = "starter.log"
        #logFile = None
        logger = log.logger(logFile, settings.setLevel, identity)
        
        # Simple connect
        conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
    
        docs = None
    
        if(document is not None):
            docs = []
            doc = {}
            doc["document"] = document
            if bucket is not None:
                doc["bucket"] = bucket
            docs.append(doc)
        
        if(docs):
            for doc in docs:
                
                document = doc["document"]

                # Get a unique id from the document name for the workflow_id
                id_string = None
                try:
                    id_string = ''
                    document_file = document.split("/")[-1]
                    if "bucket" in doc:
                        id_string += doc['bucket'] + '_'
                    id_string += document_file.split("_")[0]
                except:
                    id_string = "000"
        
                # Start a workflow execution
                workflow_id = "PMCDeposit_%s" % (id_string)
                workflow_name = "PMCDeposit"
                workflow_version = "1"
                child_policy = None
                execution_start_to_close_timeout = None
                input = '{"data": ' + json.dumps(doc) + '}'
                
                try:
                    response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
        
                    logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
                    
                except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
                    # There is already a running workflow with that ID, cannot start another
                    message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
                    print message
                    logger.info(message)
Example #44
0
    def start(self, settings):

        ping_marker_id = "cron_NewS3POA"

        # Log
        logFile = "starter.log"
        logger = log.logger(logFile, settings.setLevel, ping_marker_id)

        # Data provider
        db = dblib.SimpleDB(settings)
        db.connect()

        # SWF meta data provider
        swfmeta = swfmetalib.SWFMeta(settings)
        swfmeta.connect()

        last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(
            workflow_id=ping_marker_id)

        # Start a ping workflow as a marker
        self.start_ping_marker(ping_marker_id, settings)

        # Check for S3 XML files that were updated since the last run
        date_format = "%Y-%m-%dT%H:%M:%S.000Z"

        # Quick hack - subtract 15 minutes,
        #   the time between S3Monitor running and this cron starter
        last_startTimestamp_minus_15 = last_startTimestamp - (60 * 15)
        time_tuple = time.gmtime(last_startTimestamp_minus_15)

        last_startDate = time.strftime(date_format, time_tuple)

        logger.info('last run %s' % (last_startDate))

        xml_item_list = db.elife_get_POA_delivery_S3_file_items(
            last_updated_since=last_startDate)

        logger.info('POA files updated since %s: %s' %
                    (last_startDate, str(len(xml_item_list))))

        if len(xml_item_list) <= 0:
            # No new XML
            pass
        else:
            # Found new XML files

            # Start a PackagePOA starter
            try:
                starter_name = "starter_PackagePOA"
                self.import_starter_module(starter_name, logger)
                s = self.get_starter_module(starter_name, logger)
                s.start(settings=settings, last_updated_since=last_startDate)
            except:
                logger.info('Error: %s starting %s' %
                            (ping_marker_id, starter_name))
                logger.exception('')
Example #45
0
 def create_student(self):  #创建学生账号
     with open(settings.USER_FILE, 'rb') as f:
         user_dict = pickle.loads(f.read())
     username = input('输入学员账号:').strip()
     if user_dict.get(username):
         print('用户名已存在')
         return
     password = input('为学员账号创建密码:').strip()
     username_obj = student(username)
     user_dict[username] = {
         'pwd': password,
         'status': 0,
         'role': 'student',
         'username_obj': username_obj
     }
     with open(settings.USER_FILE, 'wb') as f:
         f.write(pickle.dumps(user_dict))
     print('创建学员{}账号成功'.format(username))
     logger('{} create an account {}'.format(self.name, username))
Example #46
0
    def __init__(self, sharp, reaction, action):
        self.sharp_engine = sharp
        self.reaction = reaction
        self.re_reaction = self.find_regex(reaction)
        self.action = dedent(action.strip("\n"))
        self.mute = False
        self.logger = logger("sharp")

        # Set the trigger's level
        self.level = sharp.engine.level
Example #47
0
    def OnInput(self, message):
        """Some text has been sent from the input."""
        if self.world:
            self.world.reset_autocompletion()

        try:
            self.client.write(message)
        except Exception:
            log = logger("client")
            log.exception("An error occurred when sending a message")
Example #48
0
def main(dirname=None):
    """Main entrypoint that builds and stores our models."""

    logger = log.logger()
    logger.info('Running model.py directly to produce schema/goose output.')
    conn = FBO(connect=False)
    fnames = conn.goose_write(dirname)
    logger.info(
        'Finished running model.py directly to produce schema/goose output.')
    return fnames
Example #49
0
def get_movie_url_from_bilibili():
    betelgeuse = Betelgeuse()
    bellatrix = Bellatrix(options.appkey, options.appsecret)

    while True:
        available = model.MovieQueue.select().where(model.MovieQueue.state == 1).count()
        if available > 0:
            prepare_movies =  model.MovieQueue.select().where(model.MovieQueue.state == 1).limit(100)
            for m in prepare_movies:
                movie = Alnilam.get_first_movie(m.douban_id)
                try:
                    search_reault = bellatrix.search(movie.title)
                    if search_reault.is_done:
                        match_movie = False
                        for sr in search_reault.result:
                            if allow_type(sr["typename"]):
                                match_movie = True
                                detail = bellatrix.view(sr["aid"])
                                pages = detail.pages
                                for page in range(1, pages+1):
                                    if hasattr(detail, 'cid'):
                                        detail.download, detail.expires = bellatrix.build_download_url(detail.cid)
                                    with model.database.atomic() as txn:
                                        try:
                                            betelgeuse.build_bilibili(sr, m.douban_id)
                                            betelgeuse.replenish_bilibili(sr["aid"], detail)
                                            m.state = 2
                                            m.save()
                                        except Exception as e:
                                            log.logger().error(e)
                                            txn.rollback()
                                    detail = bellatrix.view(sr["aid"], page + 1)
                                    sleep(2)
                                sleep(2)
                        if not match_movie:
                            m.state = 3
                            m.save()
                    sleep(2)
                except Exception as e:
                    log.logger().error(e)
        else:
            sleep(2)
Example #50
0
    def start(self, ENV="dev"):
        # Specify run environment settings
        settings = settingsLib.get_settings(ENV)

        ping_marker_id = "cron_FiveMinute"

        # Log
        logFile = "starter.log"
        logger = log.logger(logFile, settings.setLevel, ping_marker_id)

        # Data provider
        db = dblib.SimpleDB(settings)
        db.connect()

        # SWF meta data provider
        swfmeta = swfmetalib.SWFMeta(settings)
        swfmeta.connect()

        last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(
            workflow_id=ping_marker_id)

        # Start a ping workflow as a marker
        self.start_ping_marker(ping_marker_id, ENV)

        # Check for S3 XML files that were updated since the last run
        date_format = "%Y-%m-%dT%H:%M:%S.000Z"

        # Date conversion
        time_tuple = time.gmtime(last_startTimestamp)
        last_startDate = time.strftime(date_format, time_tuple)

        logger.info('last run %s %s' % (ping_marker_id, last_startDate))

        # A conditional start for SendQueuedEmail
        #  Only start a workflow if there are emails in the queue ready to send
        item_list = db.elife_get_email_queue_items(
            query_type="count", date_scheduled_before=last_startDate)

        try:
            if (int(item_list[0]["Count"]) > 0):
                # More than one email in the queue, start a workflow
                try:
                    starter_name = "starter_SendQueuedEmail"
                    self.import_starter_module(starter_name, logger)
                    s = self.get_starter_module(starter_name, logger)
                    s.start(ENV=ENV)
                except:
                    logger.info('Error: %s starting %s' %
                                (ping_marker_id, starter_name))
                    logger.exception('')
        except:
            # Some error
            logger.info('Exception encountered starting %s: %s' %
                        (ping_marker_id, last_startDate))
Example #51
0
 def run(self):
     """Run the thread."""
     # Try to connect to the specified host and port
     host = self.factory.world.hostname
     port = self.factory.world.port
     protocol = self.factory.world.protocol.lower()
     protocol = "SSL" if protocol == "ssl" else "telnet"
     log = logger("client")
     log.info("Connecting {protocol} client for {host}:{port}".format(
         protocol=protocol, host=host, port=port))
     self.running = True
Example #52
0
 def connectionLost(self, reason):
     """The connection was lost."""
     self.send_queue()
     host = self.transport.getPeer().host
     port = self.transport.getPeer().port
     log = logger("client")
     log.info("Lost Connection on {host}:{port}: {reason}".format(
         host=host, port=port, reason=reason.type))
     wx.CallAfter(pub.sendMessage, "disconnect", client=self, reason=reason)
     if reason.type is ConnectionDone:
         self.factory.stopTrying()
Example #53
0
    def start(self, settings):

        ping_marker_id = "cron_NewS3POA"

        # Log
        logFile = "starter.log"
        logger = log.logger(logFile, settings.setLevel, ping_marker_id)

        # Data provider
        db = dblib.SimpleDB(settings)
        db.connect()

        # SWF meta data provider
        swfmeta = swfmetalib.SWFMeta(settings)
        swfmeta.connect()

        last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(
            workflow_id=ping_marker_id)

        # Start a ping workflow as a marker
        self.start_ping_marker(ping_marker_id, settings)

        # Check for S3 XML files that were updated since the last run
        date_format = "%Y-%m-%dT%H:%M:%S.000Z"

        # Quick hack - subtract 15 minutes,
        #   the time between S3Monitor running and this cron starter
        last_startTimestamp_minus_15 = last_startTimestamp - (60 * 15)
        time_tuple = time.gmtime(last_startTimestamp_minus_15)

        last_startDate = time.strftime(date_format, time_tuple)

        logger.info('last run %s' % (last_startDate))

        xml_item_list = db.elife_get_POA_delivery_S3_file_items(
            last_updated_since=last_startDate)

        logger.info('POA files updated since %s: %s' % (last_startDate, str(len(xml_item_list))))

        if len(xml_item_list) <= 0:
            # No new XML
            pass
        else:
            # Found new XML files

            # Start a PackagePOA starter
            try:
                starter_name = "starter_PackagePOA"
                self.import_starter_module(starter_name, logger)
                s = self.get_starter_module(starter_name, logger)
                s.start(settings=settings, last_updated_since=last_startDate)
            except:
                logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
                logger.exception('')
Example #54
0
 def __init__(self, use_cache: bool):
     """
     :param use_cache: will use cache or not
     :type <bool>
     """
     self.use_cache = use_cache
     self.logger = logger('state', 'states.log', 'INFO')
     if use_cache:
         self.cache = Cache(use_cache)
     self.logger.info(
         f"Now param use_cache in Cache class instance is {self.use_cache}")
 def __init__(self):
  self.tc = 0
  self.th = {}
  self.jid = jid.JID("%s@%s/%s" % (config.USER, config.SERVER, config.RESOURCE))
  self.onauthd = None
  self.c = client.basicClientFactory(self.jid, config.PASSWD)
  self.c.addBootstrap(xmlstream.STREAM_AUTHD_EVENT, self.authd);
  self.x = None
  self.log = log.logger()
  self.handlers = []
  self.msghandlers = []
  reactor.connectTCP(config.SERVER, 5222, self.c) 
Example #56
0
  def start(self, ENV = "dev"):
    # Specify run environment settings
    settings = settingsLib.get_settings(ENV)
    
    ping_marker_id = "cron_FiveMinute"
    
    # Log
    logFile = "starter.log"
    logger = log.logger(logFile, settings.setLevel, ping_marker_id)
    
    # Data provider
    db = dblib.SimpleDB(settings)
    db.connect()
    
    # SWF meta data provider
    swfmeta = swfmetalib.SWFMeta(settings)
    swfmeta.connect()
    
    last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(workflow_id = ping_marker_id)
  
    # Start a ping workflow as a marker
    self.start_ping_marker(ping_marker_id, ENV)
  
    # Check for S3 XML files that were updated since the last run
    date_format = "%Y-%m-%dT%H:%M:%S.000Z"
    
    # Date conversion
    time_tuple = time.gmtime(last_startTimestamp)
    last_startDate = time.strftime(date_format, time_tuple)
    
    logger.info('last run %s %s' % (ping_marker_id, last_startDate))
    
    # A conditional start for SendQueuedEmail
    #  Only start a workflow if there are emails in the queue ready to send
    item_list = db.elife_get_email_queue_items(
      query_type = "count",
      date_scheduled_before = last_startDate)

    try:
      if(int(item_list[0]["Count"]) > 0):
        # More than one email in the queue, start a workflow
        try:
          starter_name = "starter_SendQueuedEmail"
          self.import_starter_module(starter_name, logger)
          s = self.get_starter_module(starter_name, logger)
          s.start(ENV = ENV)
        except:
          logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
          logger.exception('')
    except:
      # Some error
      logger.info('Exception encountered starting %s: %s' % (ping_marker_id, last_startDate))
Example #57
0
    def run(self, filename):
        """Play the audio file."""
        log = logger("sharp")
        log.debug("#play {}".format(filename))
        filename = self.find_abs_filename(filename)
        if os.path.exists(filename):
            log.debug("#play playing {}".format(filename))
        else:
            log.warning("#play cannot find the file at {}".format(
                    filename))

        sound = mixer.Sound(filename)
        sound.play()
Example #58
0
def get_movie_detail():
    meissa = Meissa()
    alnilam = Alnilam()

    while True:
        available = model.MovieQueue.select().where(model.MovieQueue.state == 0).count()
        if available > 0:
            prepare_movies =  model.MovieQueue.select().where(model.MovieQueue.state == 0).limit(100)
            for m in prepare_movies:
                detail = meissa.get_detail(m.douban_id)
                with model.database.atomic() as txn:
                    try:
                        alnilam.generate_movie(detail)
                        m.state = 1
                        m.save()
                        log.logger().info(detail.id + " " + detail.title)
                    except Exception as e:
                        log.logger().error(e)
                        txn.rollback()
                sleep(2)
        else:
            sleep(2)
Example #59
0
	def start(self, ENV = "dev", all = None, last_updated_since = None, docs = None, doi_id = None):
		# Specify run environment settings
		settings = settingsLib.get_settings(ENV)
		
		# Log
		identity = "starter_%s" % int(random.random() * 1000)
		logFile = "starter.log"
		#logFile = None
		logger = log.logger(logFile, settings.setLevel, identity)
		
		# Simple connect
		conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
	
		if(all == True):
			# Publish all articles, use SimpleDB as the source
			docs = self.get_docs_from_SimpleDB(ENV)
	
		elif(doi_id is not None):
			docs = self.get_docs_from_SimpleDB(ENV, doi_id = doi_id)
			
		elif(last_updated_since is not None):
			# Publish only articles since the last_modified date, use SimpleDB as the source
			docs = self.get_docs_from_SimpleDB(ENV, last_updated_since = last_updated_since)

		if(docs):
			for doc in docs:
				
				document = doc["document"]
				elife_id = doc["elife_id"]
		
				id_string = elife_id
		
				# Start a workflow execution
				workflow_id = "PublishSuppl_%s" % (id_string)
				workflow_name = "PublishSuppl"
				workflow_version = "1"
				child_policy = None
				execution_start_to_close_timeout = str(60*30)
				input = '{"data": ' + json.dumps(doc) + '}'
		
				try:
					response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
		
					logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
					
				except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
					# There is already a running workflow with that ID, cannot start another
					message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
					print message
					logger.info(message)