示例#1
0
 def handle_event(self, event):
     global run
     global scene
     if event == pygame.QUIT:
         scene = scenes['LoginMenu']
     if 'click' in self.close_button.handleEvent(
             event):  # Выход в меню входа в игру
         scene = scenes['LoginMenu']
     if not self.is_new_game_pressed:
         if 'click' in self.new_game_button.handleEvent(
                 event):  # Начало новой игры
             self.is_new_game_pressed = True
     else:
         if 'click' in self.difficulty_easy_button.handleEvent(event):
             settings.update_setting('difficulty', '0')
             player_name = settings.get_settings().get('player_name')
             savegame.save(player_name
                           )  # Создание пустого файла сохранения для игрока
             scene = scenes['GameProcess']
         if 'click' in self.difficulty_medium_button.handleEvent(event):
             settings.update_setting('difficulty', '1')
             player_name = settings.get_settings().get('player_name')
             savegame.save(player_name
                           )  # Создание пустого файла сохранения для игрока
             scene = scenes['GameProcess']
         if 'click' in self.difficulty_hard_button.handleEvent(event):
             settings.update_setting('difficulty', '2')
             player_name = settings.get_settings().get('player_name')
             savegame.save(player_name
                           )  # Создание пустого файла сохранения для игрока
             scene = scenes['GameProcess']
     if 'click' in self.load_game_button.handleEvent(
             event):  # Загрузка игры
         scene = scenes['GameProcess']
示例#2
0
def get_api_key_and_url(gfw_env):

    key = util.get_token(settings.get_settings(gfw_env)['cartodb']['token'])
    api_url = settings.get_settings(gfw_env)["cartodb"]["sql_api"]
    sync_api = settings.get_settings(gfw_env)["cartodb"]["synchronization_api"]

    return key, api_url, sync_api
示例#3
0
def render_kanban(issues):
    # generator can only be traversed once
    issues = list(issues)
    issues.sort(key=lambda issue: -get_sort_value(issue))
    box_rows = get_settings()[BOX_ROWS]
    box_width = get_settings()[BOX_WIDTH]
    statuses = get_board_settings()[STATUS_COLUMNS]
    rows = defaultdict(list)
    status_counter = Counter()
    for issue in issues:
        if STATUS in issue.keys() and issue[STATUS] in statuses:
            rows[issue[STATUS]].append(render_issue(issue, box_width,
                                                    box_rows))
            status_counter[issue[STATUS]] += 1
    hline = (1 + len(statuses) * (3 + box_width)) * '-'
    lines = [hline]
    status_line = '| ' + ' | '.join(
        [render_text(status, box_width) for status in statuses]) + ' |'
    lines.append(status_line)
    lines.append(hline)
    if len(status_counter) == 0:
        return lines
    board_height = max(status_counter.values())
    for height in range(min(board_height, get_settings()[MAX_BOARD_ROWS])):
        for sub_row in range(box_rows):
            line = '| ' + ' | '.join([
                fill_missing(rows[status], height, box_width, sub_row)
                for status in statuses
            ]) + ' |'
            lines.append(line)
        lines.append(hline)
    if board_height > get_settings()[MAX_BOARD_ROWS]:
        lines.append(' ...')
        lines.append(' ...')
    return lines
示例#4
0
def edit_and_validate_settings(dotfile):
    subprocess.run([get_settings()[EDITOR], dotfile])
    try:
        get_settings()
        return True
    except Exception as e:
        print(e)
        return False
示例#5
0
文件: gui.py 项目: tinavas/FSERP
 def __init__(self):
     self.mainwindow = QMainWindow()
     settings.get_settings()
     self.access = tuple(settings.access.items())
     self.progress = QProgressDialog("Setting up modules...", "cancel", 0,
                                     7, self.mainwindow)
     self.progress.setWindowTitle(
         QApplication.translate("MainWindow", str(settings.company), None,
                                QApplication.UnicodeUTF8))
示例#6
0
def get_userheader():
    options = settings.get_settings()
    templates = []
    for fn in settings.get_settings().user_headers:
        filename = utils.which(fn, options.user_data_dirs)
        if filename:
            templates.append(USERHEADER_INFO % fn)
            templates.append(open(filename).read())
    return string.join(templates, '\n')
示例#7
0
def get_userheader():
    options = settings.get_settings()
    templates = []
    for fn in settings.get_settings().user_headers:
        filename = utils.which(fn, options.user_data_dirs)
        if filename:
            templates.append(USERHEADER_INFO % fn)
            templates.append(open(filename).read())
    return string.join(templates, '\n')
示例#8
0
def my_bot():
    GAME = Trade()
    while True:
        try:
            line = input()
        except (KeyboardInterrupt, EOFError):
            break
        av = line.split(' ')
        if GAME.settings.full is False:
            get_settings(GAME, av)
        else:
            get_commands(GAME, av)
示例#9
0
def check_token_get_auth(token: str) -> Auth:
    if not token:
        raise HTTPException(status_code=401, detail="Auth invalid")
    logger.info("check_token_get_auth")
    utils = UtilsForService()
    jwt_settings = get_settings()._jwt_settings
    data = utils.decode_token(token, jwt_settings)
    if "username" in data and "password" in data:
        auth = Auth(username=data['username'],
                    password=data['password'],
                    base_url_ws=get_settings().base_url_ws)
        return auth
    else:
        logger.error("jwt string invalid")
        raise HTTPException(status_code=401, detail="Auth invalid")
示例#10
0
def decide(ENV = "dev"):
	# Specify run environment settings
	settings = settingsLib.get_settings(ENV)
	
	# Decider event history length requested
	maximum_page_size = 100
	
	# Log
	identity = "decider_%s" % int(random.random() * 1000)
	logFile = "decider.log"
	#logFile = None
	logger = log.logger(logFile, settings.setLevel, identity)
	
	# Simple connect
	conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)

	token = None

	# Poll for a decision task
	while(True):
		if(token == None):
			logger.info('polling for decision...')
			
			decision = conn.poll_for_decision_task(settings.domain, settings.default_task_list, identity, maximum_page_size)
			
			# Check for a nextPageToken and keep polling until all events are pulled
			decision = get_all_paged_events(decision, conn, settings.domain, settings.default_task_list, identity, maximum_page_size)
			
			token = get_taskToken(decision)
			
			logger.info('got decision: [json omitted], token %s' % token)
			#logger.info('got decision: \n%s' % json.dumps(decision, sort_keys=True, indent=4))

			if(token != None):
				# Get the workflowType and attempt to do the work
				workflowType = get_workflowType(decision)
				if(workflowType != None):

					logger.info('workflowType: %s' % workflowType)

					# Instantiate and object for the workflow using eval
					# Build a string for the object name
					workflow_name = get_workflow_name(workflowType)
					
					# Attempt to import the module for the workflow
					if(import_workflow_class(workflow_name)):
						# Instantiate the workflow object
						workflow_object = get_workflow_object(workflow_name, settings, logger, conn, token, decision, maximum_page_size)
				
						# Process the workflow
						success = workflow_object.do_workflow()
						
						# Print the result to the log
						logger.info('%s success %s' % (workflow_name, success))
						
					else:
						logger.info('error: could not load object %s\n' % workflow_name)
						
		# Reset and loop
		token = None
示例#11
0
def pull_cmd():
    cmd = ['git', 'pull']
    full_cmd = ' '.join(cmd)
    try:
        ret = subprocess.run(cmd, cwd=get_settings()[DATADIR])
    except:
        raise Exception("Something went wrong with {}".format(full_cmd))
def main():
    parser = OptionParser()
    parser.add_option("-e", "--env", default="dev", action="store", type="string", dest="env",
                      help="set the environment to run, either dev or live")
    (options, args) = parser.parse_args()
    if options.env:
        env = options.env

    global settings
    settings = settings_lib.get_settings(env)

    log_file = "process_dashboard_queue.log"
    global logger
    logger = log.logger(log_file, settings.log_level)

    # Simple connect
    queue = get_queue()

    pool = Pool(settings.event_queue_pool_size)

    while True:
        messages = queue.get_messages(num_messages=settings.event_queue_message_count, visibility_timeout=60,
                                      wait_time_seconds=20)
        if messages is not None:
            logger.info(str(len(messages)) + " message received")
            pool.map(process_message, messages)
        else:
            logger.info("No messages received")
示例#13
0
 def start(self, ENV = "dev"):
   # Specify run environment settings
   settings = settingsLib.get_settings(ENV)
   
   ping_marker_id = "cron_NewS3Suppl"
   
   # Log
   logFile = "starter.log"
   logger = log.logger(logFile, settings.setLevel, ping_marker_id)
   
   # Data provider
   db = dblib.SimpleDB(settings)
   db.connect()
   
   # SWF meta data provider
   swfmeta = swfmetalib.SWFMeta(settings)
   swfmeta.connect()
   
   # Default, if cron never run before
   last_startTimestamp = 0
   
   # Get the last time this cron was run
   last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(workflow_id = ping_marker_id)
 
   # Start a ping workflow as a marker
   self.start_ping_marker(ping_marker_id, ENV)
 
   # Check for S3 Suppl files that were updated since the last run
   date_format = "%Y-%m-%dT%H:%M:%S.000Z"
   
   # Quick hack - subtract 30 minutes to not ignore the top of the hour
   #   the time between S3Monitor running and this cron starter
   last_startTimestamp_minus_30 = last_startTimestamp - (60*30)
   if(last_startTimestamp_minus_30 < 0):
     last_startTimestamp_minus_30 = 0
   time_tuple = time.gmtime(last_startTimestamp_minus_30)
   
   last_startDate = time.strftime(date_format, time_tuple)
   
   logger.info('last run %s' % (last_startDate))
   
   S3_item_list = db.elife_get_article_S3_file_items(file_data_type = "suppl", latest = True, last_updated_since = last_startDate)
   
   logger.info('Suppl files updated since %s: %s' % (last_startDate, str(len(S3_item_list))))
 
   if(len(S3_item_list) <= 0):
     # No new SVG
     pass
   else:
     # Found new SVG files
     
     # Start a PublishSVG starter
     try:
       starter_name = "starter_PublishSuppl"
       self.import_starter_module(starter_name, logger)
       s = self.get_starter_module(starter_name, logger)
       s.start(ENV = ENV, last_updated_since = last_startDate)
     except:
       logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
       logger.exception('')
示例#14
0
	def post( self, bot_name ):
		
		creds = twitter.get_twitter_creds( bot_name )

		if not self.authenticate_user( creds ):
			self.render_notloggedin()
		else:
			bot_settings = settings.get_settings( creds )
			bot_settings.learning_style = self.request.get( 'learnfrom' )
			bot_settings.learning_guru = self.request.get( 'guru_name' )
			bot_settings.locquacity_onschedule = self.request.get( 'locquacity_onschedule' ) == "true"
			bot_settings.locquacity_reply = self.request.get( 'locquacity_reply' ) == "true"
			bot_settings.locquacity_speakonnew = self.request.get( 'locquacity_speakonnew' ) == "true"
			bot_settings.learn_retrospectively = self.request.get( 'learn_retrospectively' ) == "true"

			gn = self.request.get( 'locquacity_greetnew' ) == "true"
			logging.debug( 'SettingsHandler.post(): locquacity_greetnew=%s, bot_settings.locquacity_greetnew=%s' % (gn, bot_settings.locquacity_greetnew) )
			if gn and not bot_settings.locquacity_greetnew:
				logging.debug( '-> fetch follower ids' )
				api = twitter.get_api( creds )
				follower_ids = api.followers_ids()
				creds.follower_ids = follower_ids
				creds.put()
			bot_settings.locquacity_greetnew = gn
			
			tweet_frequency = self.request.get( 'tweet_frequency' )
			if tweet_frequency is not None and len(tweet_frequency) > 0:
				bot_settings.tweet_frequency = float( tweet_frequency )
			tweet_chance = self.request.get( 'tweet_chance' )
			if tweet_chance is not None and len(tweet_chance) > 0:
				bot_settings.tweet_chance = float( tweet_chance )
			self.render_template( creds, bot_settings, { "saved" : True } )
			bot_settings.creds = creds
			bot_settings.put()
示例#15
0
def add_test_users():
    network = get_network()
    settings = get_settings(quiet=True)

    command = get_docker_run_cmd(network)

    password_group = settings['password_group']
    if password_group is not None:
        command += add_secure_config(password_group)

    name = get_image_name(montagu_cli, versions.api)

    run_cmd(command, name, [
        "add", "Test Admin", "test.admin", "*****@*****.**",
        "password", "--if-not-exists"
    ])
    run_cmd(command, name, ["addRole", "test.admin", "user"])
    run_cmd(command, name, ["addRole", "test.admin", "touchstone-reviewer"])
    run_cmd(command, name, ["addRole", "test.admin", "admin"])

    run_cmd(command, name, [
        "add", "Test Modeller", "test.modeller",
        "*****@*****.**", "password", "--if-not-exists"
    ])
    run_cmd(command, name, ["addRole", "test.modeller", "user"])
    run_cmd(command, name, ["addUserToGroup", "test.modeller", "IC-Garske"])
    run_cmd(command, name,
            ["addUserToGroup", "test.modeller", "Harvard-Sweet"])
示例#16
0
 def get_docs_from_SimpleDB(self, ENV = "dev", last_updated_since = None):
     """
     Get the array of docs from the SimpleDB provider
     """
     docs = []
 
     # Specify run environment settings
     settings = settingsLib.get_settings(ENV)
     
     db = dblib.SimpleDB(settings)
     db.connect()
     
     if(last_updated_since is not None):
         xml_item_list = db.elife_get_POA_delivery_S3_file_items(last_updated_since = last_updated_since)
     else:
         # Get all - not implemented for now to avoid mistakes running too many workflows
         pass
         
     for x in xml_item_list:
         tmp = {}
         name = x['name']
         tmp['document'] = name
         docs.append(tmp)
     
     return docs
示例#17
0
def is_disallowed_reaction(r: str) -> bool:
    if r[0] in "-+":
        as_int = try_int(r[1:])
        if as_int is not None and as_int != 1:
            return True

    return r in get_settings().disallowed_reactions
 def start(self, ENV = "dev"):
   # Specify run environment settings
   settings = settingsLib.get_settings(ENV)
   
   ping_marker_id = "cron_NewS3FullArticle"
   
   # Log
   logFile = "starter.log"
   logger = log.logger(logFile, settings.setLevel, ping_marker_id)
   
   # Data provider
   db = dblib.SimpleDB(settings)
   db.connect()
   
   # SWF meta data provider
   swfmeta = swfmetalib.SWFMeta(settings)
   swfmeta.connect()
   
   last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(workflow_id = ping_marker_id)
 
   # Start a ping workflow as a marker
   self.start_ping_marker(ping_marker_id, ENV)
 
   # Check for S3 XML files that were updated since the last run
   date_format = "%Y-%m-%dT%H:%M:%S.000Z"
   
   # Quick hack - subtract 15 minutes, 
   #   the time between S3Monitor running and this cron starter
   if last_startTimestamp is not None:
     last_startTimestamp_minus_15 = last_startTimestamp - (60*15)
   else:
     # On the first run ever the last start timestamp will be unavailable
     last_startTimestamp_minus_15 = time.gmtime() - (60*15)
     
   time_tuple = time.gmtime(last_startTimestamp_minus_15)
   
   last_startDate = time.strftime(date_format, time_tuple)
   
   logger.info('last run %s' % (last_startDate))
   
   # 
   file_list = db.elife_get_production_final_delivery_S3_file_items(last_updated_since = last_startDate)
   
   logger.info('Full Article files updated since %s: %s' % (last_startDate, str(len(file_list))))
 
   if(len(file_list) <= 0):
     # No new XML
     pass
   else:
     # Found new XML files
     
     # Start a PackagePOA starter
     try:
       starter_name = "starter_PublishFullArticleZip"
       self.import_starter_module(starter_name, logger)
       s = self.get_starter_module(starter_name, logger)
       s.start(ENV = ENV, last_updated_since = last_startDate)
     except:
       logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
       logger.exception('')
	def start(self, ENV = "dev"):
		# Specify run environment settings
		settings = settingsLib.get_settings(ENV)
		
		# Log
		identity = "starter_%s" % int(random.random() * 1000)
		logFile = "starter.log"
		#logFile = None
		logger = log.logger(logFile, settings.setLevel, identity)
		
		# Simple connect
		conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
	
		start = True
	
		# Start a workflow execution
		workflow_id = "LensIndexPublish"
		workflow_name = "LensIndexPublish"
		workflow_version = "1"
		child_policy = None
		execution_start_to_close_timeout = str(60*45)
		input = None
	
		if(start):
			response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
	
			logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
示例#20
0
def _input_user_issue(path):
    subprocess.run([get_settings()[EDITOR], path])
    with open(path, 'r') as fin:
        edited_contents = fin.read()
    if not edited_contents.isspace():
        try:
            user_json = json.loads(edited_contents)
        except Exception as e:
            print("Not valid json, please try again.")
            input("Press ENTER to continue.")
            return _input_user_issue(path)
        if TITLE not in user_json or not isinstance(
                user_json[TITLE], str) or user_json[TITLE] == '':
            print("Must use non-empty string value for attribute title.")
            input("Press ENTER to continue.")
            return _input_user_issue(path)
        if STATUS not in user_json or user_json[
                STATUS] not in get_board_settings()[STATUS_COLUMNS]:
            print(
                f"Must have status with a value in {get_board_settings()[STATUS_COLUMNS]}."
            )
            input("Press ENTER to continue.")
            return _input_user_issue(path)
        return user_json
    else:
        return None
示例#21
0
def main():

    # Add options
    parser = OptionParser()
    parser.add_option("-e",
                      "--env",
                      default="dev",
                      action="store",
                      type="string",
                      dest="env",
                      help="set the environment to run, either dev or live")
    parser.add_option("-i",
                      "--article-version-id",
                      default=None,
                      action="store",
                      type="string",
                      dest="article_version_id",
                      help="specify the DOI id the article to process")

    (options, args) = parser.parse_args()
    ENV = None
    if options.env:
        ENV = options.env
    article_version_id = None
    if options.article_version_id:
        article_version_id = options.article_version_id

    import settings as settingsLib
    settings = settingsLib.get_settings(ENV)

    o = starter_ApproveArticlePublication()

    o.start(settings=settings, article_version_id=article_version_id)
	def get_docs_from_SimpleDB(self, ENV = "dev", last_updated_since = None, doi_id = None):
		"""
		Get the array of docs from the SimpleDB provider
		"""
		docs = []
		
		# Specify run environment settings
		settings = settingsLib.get_settings(ENV)
		
		db = dblib.SimpleDB(settings)
		db.connect()
		
		if(last_updated_since is not None):
			xml_item_list = db.elife_get_article_S3_file_items(file_data_type = "xml", latest = True, last_updated_since = last_updated_since)
		elif(doi_id is not None):
			xml_item_list = db.elife_get_article_S3_file_items(file_data_type = "xml", latest = True, doi_id = doi_id)
		else:
			# Get all
			xml_item_list = db.elife_get_article_S3_file_items(file_data_type = "xml", latest = True)
			
		for x in xml_item_list:
			tmp = {}
			elife_id = str(x['name']).split("/")[0]
			document = 'https://s3.amazonaws.com/' + x['item_name']
			tmp['elife_id'] = elife_id
			tmp['document'] = document
			docs.append(tmp)
		
		return docs
示例#23
0
    def __init__(self, root):
        version = subprocess.check_output('git rev-list --count HEAD').decode(
            'utf-8')
        root.title(f'Auto Disenchanter v{version}')
        root.protocol("WM_DELETE_WINDOW", self.on_closing)
        self.builder = builder = pygubu.Builder()
        builder.add_from_file('main_frame.ui')
        builder.get_object('main_frame', root)
        builder.connect_callbacks(self)

        self.builder_wrapper = Builder(builder)
        self.root = root

        self.logger = TkinterLogger(self.builder, '%H:%M:%S')
        self.settings = get_settings(self.logger, debug=True)
        self.logger.log_format = self.settings.log_time_format
        self.macro = Macro(self.logger, self.settings)

        self.incidents = Incidents(self.logger, self.settings)
        self.incidents.start_thread()

        root.resizable(False, False)
        root.wm_attributes("-topmost", 1)

        state = load_state()
        if state is not None and 'options' in state:
            self.builder_wrapper.init_checkboxes(state['options'])
        else:
            self.builder_wrapper.init_checkboxes(dict.fromkeys(OPTIONS, False))
        if state is not None and 'accounts' in state:
            self.accounts = state['accounts']
            self.builder_wrapper.set_treeview('accounts', self.accounts)
        else:
            self.accounts = []
示例#24
0
def cartodb_append(sqlite_db_path,
                   out_cartodb_name,
                   gfw_env,
                   where_clause=None):
    """
    Append a local FC to a cartoDB dataset
    :param sqlite_db_path: path to local sqlite db
    :param out_cartodb_name: cartoDB table
    :param gfw_env: gfw_env
    :param where_clause: where_clause to apply to the dataset
    :return:
    """
    key = util.get_token(settings.get_settings(gfw_env)['cartodb']['token'])
    account_name = get_account_name(gfw_env)

    # Help: http://www.gdal.org/ogr2ogr.html
    # The -dim 2 option ensures that only two dimensional data is created; no Z or M values
    cmd = [
        'ogr2ogr', '--config', 'CARTODB_API_KEY', key, '-append',
        '-skipfailures', '-t_srs', 'EPSG:4326', '-f', 'CartoDB', '-nln',
        out_cartodb_name, '-dim', '2', 'CartoDB:{0}'.format(account_name)
    ]

    cmd = add_fc_to_ogr2ogr_cmd(sqlite_db_path, cmd)
    cmd = add_where_clause_to_ogr2ogr_cmd(where_clause, cmd)

    util.run_subprocess(cmd)
示例#25
0
def get_systemheader():
    options = settings.get_settings()
    fn = utils.which("header.ps",
                     list(options.user_data_dirs) + [SYSTEM_DATA_DIR])
    if fn:
        return open(fn).read()
    return "%%\%%  System header %s not found!\n%%" % fn
示例#26
0
	def get_docs_from_SimpleDB(self, ENV = "dev", last_updated_since = None, doi_id = None):
		"""
		Get the array of docs from the SimpleDB provider
		"""
		docs = []
		
		# Specify run environment settings
		settings = settingsLib.get_settings(ENV)
		
		db = dblib.SimpleDB(settings)
		db.connect()
		
		if(last_updated_since is not None):
			xml_item_list = db.elife_get_article_S3_file_items(file_data_type = "suppl", latest = True, last_updated_since = last_updated_since)
		elif(doi_id is not None):
			xml_item_list = db.elife_get_article_S3_file_items(file_data_type = "suppl", latest = True, doi_id = doi_id)
		else:
			# Get all
			xml_item_list = db.elife_get_article_S3_file_items(file_data_type = "suppl", latest = True)
			
		for x in xml_item_list:
			tmp = {}
			elife_id = str(x['name']).split("/")[0]
			document = 'https://s3.amazonaws.com/' + x['item_name']
			tmp['elife_id'] = elife_id
			tmp['document'] = document
			docs.append(tmp)
		
		return docs
示例#27
0
    def __init__(self, *args, **kwargs):
        logging.info('instantiating request handler: %s' %
                     self.__class__.__name__)
        self.include_host = True
        self.user = None
        self.is_admin = None  # requires @is_admin decorator to check
        self.admin_user = None
        #from main import settings
        self._settings = settings.get_settings()

        self.context = {
            'current_user': None,
            'error': {},
            'base_url': gae_utils.base_url(),
            'user_agent': gae_utils.GetUserAgent(),
            "handler_name": self.__class__.__name__,
            "handler": self,
            'token': None,
            "debug": gae_utils.Debug(),
            "ip_address": str(os.environ.get('REMOTE_ADDR', '')),
            "settings": self._settings,
            "getattr": getattr
        }

        super(BaseHandler, self).__init__(*args, **kwargs)
示例#28
0
	def start(self, ENV = "dev", workflow = "Ping"):
		# Specify run environment settings
		settings = settingsLib.get_settings(ENV)
		
		# Log
		identity = "starter_%s" % int(random.random() * 1000)
		logFile = "starter.log"
		#logFile = None
		logger = log.logger(logFile, settings.setLevel, identity)
		
		# Simple connect
		conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
		if(workflow):
			(workflow_id, workflow_name, workflow_version, child_policy, execution_start_to_close_timeout, input) = self.get_workflow_params(workflow)
	
			logger.info('Starting workflow: %s' % workflow_id)
			try:
				response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
	
				logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
				
			except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
				# There is already a running workflow with that ID, cannot start another
				message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
				print message
				logger.info(message)
示例#29
0
 def create_note(self,
                 filn,
                 title,
                 origin_id=None,
                 origin_title=None,
                 body=None):
     note_id = os.path.basename(filn).split()[0]
     params = {
         'title': title,
         'file': os.path.basename(filn),
         'path': os.path.dirname(filn),
         'id': note_id,
         'origin_id': origin_id,
         'origin_title': origin_title,
         # don't break legacy
         'origin': origin_id,
     }
     settings = get_settings()
     format_str = settings.get('new_note_template', '')
     if not format_str:
         format_str = u'# {title}\ntags = \n\n'
     else:
         format_str = self.note_template_handle_date_spec(
             format_str, note_id)
     with open(filn, mode='w', encoding='utf-8') as f:
         f.write(format_str.format(**params))
         if body is not None:
             f.write('\n' + body)
     return
示例#30
0
  def __init__(self, settings):
    gtk.ScrolledWindow.__init__(self)
    self.set_border_width(4)
    self.settings = settings.get_settings()

    self.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)

    self.vbox = gtk.VBox(False, 5)
    self.hbox = gtk.HBox(False, 8)

    self.vbox.pack_start(self.hbox, True)

    self.left = gtk.VBox()
    self.right = gtk.VBox()

    self.hbox.pack_start(self.left, True)
    self.hbox.pack_start(self.right, True)

    self.create_form()

    viewport = gtk.Viewport()
    viewport.set_shadow_type(gtk.SHADOW_NONE)
    viewport.add(self.vbox)
    self.add(viewport)

    self.show_all()
示例#31
0
    def start(self,
              ENV="dev",
              all=None,
              last_updated_since=None,
              docs=None,
              doi_id=None):
        # Specify run environment settings
        settings = settingsLib.get_settings(ENV)

        # Log
        identity = "starter_%s" % int(random.random() * 1000)
        logFile = "starter.log"
        #logFile = None
        logger = log.logger(logFile, settings.setLevel, identity)

        # Simple connect
        conn = boto.swf.layer1.Layer1(settings.aws_access_key_id,
                                      settings.aws_secret_access_key)

        if (all == True):
            # Publish all articles, use SimpleDB as the source
            docs = self.get_docs_from_SimpleDB(ENV)

        elif (doi_id is not None):
            docs = self.get_docs_from_SimpleDB(ENV, doi_id=doi_id)

        elif (last_updated_since is not None):
            # Publish only articles since the last_modified date, use SimpleDB as the source
            docs = self.get_docs_from_SimpleDB(ENV, last_updated_since)

        if (docs):
            for doc in docs:

                document = doc["document"]
                elife_id = doc["elife_id"]

                id_string = elife_id

                # Start a workflow execution
                workflow_id = "PublishArticle_%s" % (id_string)
                workflow_name = "PublishArticle"
                workflow_version = "1"
                child_policy = None
                execution_start_to_close_timeout = None
                input = '{"data": ' + json.dumps(doc) + '}'

                try:
                    response = conn.start_workflow_execution(
                        settings.domain, workflow_id, workflow_name,
                        workflow_version, settings.default_task_list,
                        child_policy, execution_start_to_close_timeout, input)

                    logger.info('got response: \n%s' %
                                json.dumps(response, sort_keys=True, indent=4))

                except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
                    # There is already a running workflow with that ID, cannot start another
                    message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
                    print message
                    logger.info(message)
示例#32
0
  def start(self, ENV = "dev", limit = None):
    # Specify run environment settings
    settings = settingsLib.get_settings(ENV)
    
    # Log
    identity = "starter_%s" % int(random.random() * 1000)
    logFile = "starter.log"
    #logFile = None
    logger = log.logger(logFile, settings.setLevel, identity)
    
    # Simple connect
    conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
    
    # Start a workflow execution
    workflow_id = "SendQueuedEmail"
    workflow_name = "SendQueuedEmail"
    workflow_version = "1"
    child_policy = None
    execution_start_to_close_timeout = None

    if(limit):
      input = '{"data": {"limit": "' + limit + '"}}'
    else:
      input = None
    
    try:
      response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)

      logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
      
    except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
      # There is already a running workflow with that ID, cannot start another
      message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
      print message
      logger.info(message)
 def start(self, ENV = "dev"):
   # Specify run environment settings
   settings = settingsLib.get_settings(ENV)
   
   ping_marker_id = "cron_NewS3FiguresPDF"
   
   # Log
   logFile = "starter.log"
   logger = log.logger(logFile, settings.setLevel, ping_marker_id)
   
   # Data provider
   db = dblib.SimpleDB(settings)
   db.connect()
   
   # SWF meta data provider
   swfmeta = swfmetalib.SWFMeta(settings)
   swfmeta.connect()
   
   # Default, if cron never run before
   last_startTimestamp = 0
   
   # Get the last time this cron was run
   last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(workflow_id = ping_marker_id)
 
   # Start a ping workflow as a marker
   self.start_ping_marker(ping_marker_id, ENV)
 
   # Check for S3 PDF files that were updated since the last run
   date_format = "%Y-%m-%dT%H:%M:%S.000Z"
   
   # Quick hack - subtract 30 minutes to not ignore the top of the hour
   #   the time between S3Monitor running and this cron starter
   last_startTimestamp_minus_30 = last_startTimestamp - (60*30)
   if(last_startTimestamp_minus_30 < 0):
     last_startTimestamp_minus_30 = 0
   time_tuple = time.gmtime(last_startTimestamp_minus_30)
   
   last_startDate = time.strftime(date_format, time_tuple)
   
   logger.info('last run %s' % (last_startDate))
   
   S3_item_list = db.elife_get_article_S3_file_items(file_data_type = "figures", latest = True, last_updated_since = last_startDate)
   
   logger.info('Figures PDF files updated since %s: %s' % (last_startDate, str(len(S3_item_list))))
 
   if(len(S3_item_list) <= 0):
     # No new PDF
     pass
   else:
     # Found new PDF files
     
     # Start a PublishPDF starter
     try:
       starter_name = "starter_PublishFiguresPDF"
       self.import_starter_module(starter_name, logger)
       s = self.get_starter_module(starter_name, logger)
       s.start(ENV = ENV, last_updated_since = last_startDate)
     except:
       logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
       logger.exception('')
示例#34
0
    def start_ping_marker(self, workflow_id, ENV="dev"):
        """
    Start a ping workflow with a unique name to serve as a time marker
    for determining last time this was run
    """

        # Specify run environment settings
        settings = settingsLib.get_settings(ENV)

        workflow_id = workflow_id
        workflow_name = "Ping"
        workflow_version = "1"
        child_policy = None
        execution_start_to_close_timeout = None
        input = None

        conn = boto.swf.layer1.Layer1(settings.aws_access_key_id,
                                      settings.aws_secret_access_key)
        try:
            response = conn.start_workflow_execution(
                settings.domain, workflow_id, workflow_name, workflow_version,
                settings.default_task_list, child_policy,
                execution_start_to_close_timeout, input)

        except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
            # There is already a running workflow with that ID, cannot start another
            message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
            print message
示例#35
0
def update_metadata(in_fc, tech_title, gfw_env):

    api_url = settings.get_settings(gfw_env)['metadata']['api_url']
    layer_url = api_url + r'/' + tech_title
    layer_url = api_url + r'/' + 'wdpa_protected_areas'

    response = requests.get(layer_url)
    api_data = json.loads(response.text)

    md = arcpy_metadata.MetadataEditor(in_fc)

    md.title = escape_html(api_data['title'])
    md.purpose = escape_html(api_data['function'])
    md.abstract = escape_html(api_data['overview'])
    md.tags = api_data['tags'].split(",")
    md.extent_description = escape_html(api_data['geographic_coverage'])
    md.last_update = escape_html(api_data['date_of_content'])
    md.update_frequency = escape_html(api_data['frequency_of_updates'])
    md.citation = escape_html(api_data['citation'])
    md.limitation = escape_html(api_data['cautions'])
    md.source = escape_html(api_data['source'])
    md.scale_resolution = escape_html(api_data['resolution'])
    md.supplemental_information = escape_html(api_data['other'])

    md.finish()
示例#36
0
 def start(self, ENV = "dev", workflow = "S3Monitor"):
   # Specify run environment settings
   settings = settingsLib.get_settings(ENV)
   
   # Log
   identity = "starter_%s" % int(random.random() * 1000)
   logFile = "starter.log"
   #logFile = None
   logger = log.logger(logFile, settings.setLevel, identity)
   
   # Simple connect
   conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
   if(workflow):
     (workflow_id, workflow_name, workflow_version, child_policy, execution_start_to_close_timeout, input) = self.get_workflow_params(workflow, settings)
 
     logger.info('Starting workflow: %s' % workflow_id)
     try:
       response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
 
       logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
       
     except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
       # There is already a running workflow with that ID, cannot start another
       message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
       print message
       logger.info(message)
示例#37
0
    def process(self, message: Message) -> None:
        command = message.command
        assert command

        if not command.params_clean:
            self.client.reply(
                message,
                "Please choose an expiration time for forwarded messages",
                reply_markup=self._get_keyboard(),
            )
            return

        ttl = command.params_clean[0]

        settings = get_settings(message.chat.id)
        settings.forwards_ttl = ttl
        session.add(settings)
        session.commit()

        logger.debug("Forwards removal enabled")

        if ttl > 0:
            self.client.reply(
                message,
                f"Automatic removal of forwarded messages enabled. Removing forwards "
                f"after {ttl} seconds.",
                reply_markup=get_remove_keyboard(),
            )
        else:
            self.client.reply(
                message,
                f"Automatic removal of forwarded messages disabled.",
                reply_markup=get_remove_keyboard(),
            )
示例#38
0
def get_systemheader():
    options = settings.get_settings()
    fn = utils.which(
        "header.ps", list(options.user_data_dirs) + [SYSTEM_DATA_DIR])
    if fn:
        return open(fn).read()
    return "%%\%%  System header %s not found!\n%%" % fn
示例#39
0
def main():

    # example on how to run:
    # From elife-bot folder run
    # python starter/starter_CopyGlencoeStillImages.py --env=dev --article-id=15224 --no-poa

    parser = ArgumentParser()
    parser.add_argument("-e", "--env", action="store", type=str, dest="env",
                        help="set the environment to run, e.g. dev, live, prod, end2end")
    parser.add_argument("-a", "--article-id", action="store", type=str, dest="article_id",
                        help="specify the article id to process")
    parser.add_argument("-p", "--poa", action="store_true", dest="poa",
                        help="Article is POA. If omitted it defaults to False.")
    parser.add_argument("-np", "--no-poa", action="store_false", dest="poa",
                        help="Article is NOT POA. If omitted it defaults to False.")
    parser.set_defaults(env="dev", article_id=None, poa=False)

    args = parser.parse_args()
    ENV = None
    if args.env:
        ENV = args.env
    article_id = None
    is_poa = False
    if args.article_id:
        article_id = args.article_id
    if args.poa:
        is_poa = args.poa

    import settings as settingsLib
    settings = settingsLib.get_settings(ENV)

    o = starter_CopyGlencoeStillImages()

    o.start(settings=settings, article_id=article_id, standalone=True, standalone_is_poa=is_poa)
示例#40
0
def validate_settings(l=True, v=True):

    log_info("", l, v)
    log_info("Validate GFW-sync settings", l, v)
    #print ""
    #print "Validate GFW-sync settings"
    
    sets = settings.get_settings()
    errors = 0
    warnings = 0
    bucket = sets['folders']['default_bucket']
    bucket_drives = sets['bucket_drives']
    bucket = validate_bucket(bucket, bucket_drives, l, v)
    if not bucket:
        errors += 1

    default_srs = sets['spatial_references']['default_srs']
    default_srs = validate_srs(default_srs, l, v)
    if not default_srs:
        errors += 1

    gdb_srs = sets['spatial_references']['gdb_srs']
    gdb_srs = validate_srs(gdb_srs, l, v)
    if not gdb_srs:
        errors += 1

    return errors, warnings
示例#41
0
 def __init__(self):
     initialize()
     settings = get_settings()
     self.dht = DHT(settings.DHT_PIN)
     self.windsensor = WindSensor()
     if settings.BAROMETER_MODEL == "BMP085":
         self.barom = BMP085()
	def start(self, ENV = "dev"):
		# Specify run environment settings
		settings = settingsLib.get_settings(ENV)
		
		# Log
		identity = "starter_%s" % int(random.random() * 1000)
		logFile = "starter.log"
		#logFile = None
		logger = log.logger(logFile, settings.setLevel, identity)
		
		# Simple connect
		conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
	
		start = True
	
		# Start a workflow execution
		workflow_id = "LensIndexPublish"
		workflow_name = "LensIndexPublish"
		workflow_version = "1"
		child_policy = None
		execution_start_to_close_timeout = str(60*45)
		input = None
	
		if(start):
			response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
	
			logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
def main(flag):
    global settings
    global env
    parser = OptionParser()
    parser.add_option("-e", "--env", default="dev", action="store", type="string", dest="env",
                      help="set the environment to run, either dev or live")
    (options, args) = parser.parse_args()
    if options.env:
        env = options.env

    settings = settings_lib.get_settings(env)
    env = env

    log_file = "queue_workflow_starter.log"
    global logger
    logger = log.logger(log_file, settings.setLevel)

    # Simple connect
    queue = get_queue()

    while flag.green():
        messages = queue.get_messages(1, visibility_timeout=60,
                                      wait_time_seconds=20)
        if messages:
            logger.info(str(len(messages)) + " message received")
            logger.info('message contents: %s', messages[0])
            process_message(messages[0])
        else:
            logger.debug("No messages received")

    logger.info("graceful shutdown")
示例#44
0
 def _get_data(writer, request):
     global last_request_time
     last_request_time = time.time()
     sens_data = sensor.sensor_data()
     settings_data = settings.get_settings()
     sens_data.update(settings_data.get('added_infos_to_sensor_data', {}))
     return userv.json(writer, sens_data)
示例#45
0
def full_sync(addressbook=None):
    my_settings = settings.get_settings()
    if my_settings['resource'] == "":
        return

    # sqlite3 database connection
    conn = sqlite3.connect(os.path.join(glib.get_user_config_dir(), 'pyaddressbook', 'addressbook.db'))

    # local_status 0=nothing, 1=locally new, 2=locally deleted, 3=locally modified
    conn.execute('CREATE TABLE if not exists vcards (etag text primary key unique  not null, href text unique not null, vcard text not null, local_status smallint default 0)')
    conn.commit()

    available_href2etag = {}
    for href,etag in conn.execute('select href,etag from vcards where local_status<>1').fetchall():
        available_href2etag[href] = etag

    cdav = carddav2.PyCardDAV(verify=my_settings['verify'], resource=my_settings['resource'], user=my_settings['user'], passwd=my_settings['passwd'], write_support=True)
    abook = cdav.get_abook()

    deleted_vcards = available_href2etag.copy()
    server_modified_vcards = {}

    for href,etag in abook.items():
        if href in deleted_vcards:
            del deleted_vcards[href]
        if not href in available_href2etag or available_href2etag[href] != etag:
            server_modified_vcards[href] = etag

    # delete local vcards if they have been removed from the server side
    for href,etag in deleted_vcards.items():
        print("Removing contact for href: %s" % href)
        conn.execute('delete from vcards where href=?', (href,))
        if addressbook: addressbook.vcard_removed(href)


    # update local vcards that have been modified on the server side (regardless of the local status, i.e. locally modified vcards will be updated to the server side version)
    href_list = [ href for href,etag in server_modified_vcards.items() ]
    if len(href_list) > 0:
        print('Requesting modified/new vcards from server')
        dav_list = cdav._get_vcards(href_list)
        for dav in dav_list:
            href = dav['href']
            status = dav['status']
            etag = dav['etag']
            vcard = dav['vcard']
            print("Updating vcard for href %s since it was updated on the server-side" % href)
            if href in available_href2etag:
                conn.execute('update vcards set etag=?,href=?,vcard=?,local_status=0 where href=?', (etag,href,vcard,href))
                if addressbook: addressbook.vcard_updated(href, vcard)
            else:
                conn.execute('INSERT INTO vcards VALUES (?,?,?,0)', (etag,href, vcard))
                if addressbook: addressbook.vcard_added(href, etag, vcard)

    sync_local_changes(cdav, conn)
    if my_settings['write_vcf']:
        write_to_vcf(conn, my_settings['vcf_path'])

    conn.commit()
    conn.close()
示例#46
0
文件: main.py 项目: perlman/vizrelay
def main():
    mod = settings.get_settings(request.args)
    result = "<html><head></head><body>"
    result += "<h2>Current settings</h2>"
    result += "<pre>" + pprint.PrettyPrinter(indent=4).pformat(
        mod.args) + "</pre>"
    result += "</body>"
    return result
示例#47
0
async def delete_order(order_id: int,
                       x_token: str = Header(None),
                       conn: Connection = Depends(get_conn)):
    if x_token != get_settings().system_token:
        raise HTTPException(status_code=403, detail="Only system")
    oks_repo = OrderKeySetRepo(conn)
    deleted = await oks_repo.delete(order_id)
    return deleted
示例#48
0
文件: egrit.py 项目: kearnh/gtui
def _settings():
    old_settings = get_settings()

    if request.method == 'POST':
        new_settings = request.get_json()
        for s in new_settings['settings']:
            name = s.get('name')
            value = s.get('value')
            if not name or value is None:
                flask.abort(400)
            setattr(settings, name, value)
        settings.commit()

    return jsonify({
        'settings': sorted([{'name': k, 'value': v}
                            for k, v in get_settings().items()], key=lambda d: d.get('name'))
    })
def reload_settings(config_path=None):
    global config, AUDIO_DIR, ANALYSIS_DIR
    visualizer_settings = settings.get_settings(config_path)
    config.ECHO_NEST_API_KEY = visualizer_settings['echo_nest_api_key']
    AUDIO_DIR = visualizer_settings['upload_dir']
    ANALYSIS_DIR = visualizer_settings['analysis_dir']
    check_dir(AUDIO_DIR, writable=True)
    check_dir(ANALYSIS_DIR, writable=True)
示例#50
0
    def start(self, ENV = "dev", bucket = None, document = None):
        # Specify run environment settings
        settings = settingsLib.get_settings(ENV)
        
        # Log
        identity = "starter_%s" % int(random.random() * 1000)
        logFile = "starter.log"
        #logFile = None
        logger = log.logger(logFile, settings.setLevel, identity)
        
        # Simple connect
        conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
    
        docs = None
    
        if(document is not None):
            docs = []
            doc = {}
            doc["document"] = document
            if bucket is not None:
                doc["bucket"] = bucket
            docs.append(doc)
        
        if(docs):
            for doc in docs:
                
                document = doc["document"]

                # Get a unique id from the document name for the workflow_id
                id_string = None
                try:
                    id_string = ''
                    document_file = document.split("/")[-1]
                    if "bucket" in doc:
                        id_string += doc['bucket'] + '_'
                    id_string += document_file.split("_")[0]
                except:
                    id_string = "000"
        
                # Start a workflow execution
                workflow_id = "PMCDeposit_%s" % (id_string)
                workflow_name = "PMCDeposit"
                workflow_version = "1"
                child_policy = None
                execution_start_to_close_timeout = None
                input = '{"data": ' + json.dumps(doc) + '}'
                
                try:
                    response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
        
                    logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
                    
                except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
                    # There is already a running workflow with that ID, cannot start another
                    message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
                    print message
                    logger.info(message)
示例#51
0
文件: cartodb.py 项目: wri/gfw-sync2
def cartodb_sql(sql, gfw_env):
    """
    Execute a SQL statement using the API
    :param sql: a SQL statment
    :param gfw_env: the gfw_env-- used to grab the correct API token
    :return:
    """

    logging.debug(sql)
    key = util.get_token(settings.get_settings(gfw_env)['cartodb']['token'])
    api_url = settings.get_settings(gfw_env)["cartodb"]["sql_api"]

    result = urllib.urlopen("{0!s}?api_key={1!s}&q={2!s}".format(api_url, key, sql))
    json_result = json.loads(result.readlines()[0], object_pairs_hook=OrderedDict)

    if "error" in json_result.keys():
        raise SyntaxError(json_result['error'])

    return json_result
示例#52
0
 def post(self):
     user = users.get_current_user()
     if not user:
         # Login is required, redirect to login page
         self.redirect(users.create_login_url(self.request.uri))
     else:
         # Get settings for current user
         user_settings = settings.get_settings(user)
         
         # Get key for current list
         list_name = user_settings.active_list
         list_key = ndb.Key('User', user.user_id(), 'TaskList', list_name)
         
         # Validate task name
         input_name = self.request.get('task_name', '')
         if input_name == '' or input_name.isspace():
             # Invalid task name
             self.redirect('/')
             return
         
         # Validate task estimate
         input_estimate = self.request.get('estimate', '')
         if input_estimate == '' or input_estimate[0].isdigit() == False:
             # Invalid estimate
             self.redirect('/')
             return
         
         # Helper function for parsing parts of estimate string
         def parse(string, regex):
             r = regex.findall(string)
             if r:
                 try:
                     value = float(r[0])
                 except ValueError:
                     value = 0
             else:
                 value = 0
             
             return value
         
         # Get estimate in days, hours and minutes
         d = parse(input_estimate, RE_DAY)
         h = parse(input_estimate, RE_HOUR)
         m = parse(input_estimate, RE_MIN)
         hpd = int(user_settings.hours_per_day)
         estimate = datetime.timedelta(hours=d*hpd + h, minutes=m)
         
         # Create and add new task
         task = Task(parent=list_key)
         task.name = input_name
         task.estimate = int(estimate.total_seconds())
         task.put()
         
         # Redirect to main page
         self.redirect('/')
示例#53
0
    def start(self, ENV = "dev", document = None, last_updated_since = None):
        # Specify run environment settings
        settings = settingsLib.get_settings(ENV)
        
        # Log
        identity = "starter_%s" % int(random.random() * 1000)
        logFile = "starter.log"
        #logFile = None
        logger = log.logger(logFile, settings.setLevel, identity)
        
        # Simple connect
        conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)
    
        docs = None
    
        if(document is not None):
            docs = []
            doc = {}
            doc["document"] = document
            docs.append(doc)
            
        elif(last_updated_since is not None):
            # Publish only articles since the last_modified date, use SimpleDB as the source
            docs = self.get_docs_from_SimpleDB(ENV, last_updated_since = last_updated_since)
    
        if(docs):
            for doc in docs:
                
                document = doc["document"]

                # Get a unique id from the document name for the workflow_id
                id_string = None
                try:
                    id_string = document.split("_")[0]
                except:
                    id_string = "000"
        
                # Start a workflow execution
                workflow_id = "PackagePOA_%s" % (id_string)
                workflow_name = "PackagePOA"
                workflow_version = "1"
                child_policy = None
                execution_start_to_close_timeout = None
                input = '{"data": ' + json.dumps(doc) + '}'
                
                try:
                    response = conn.start_workflow_execution(settings.domain, workflow_id, workflow_name, workflow_version, settings.default_task_list, child_policy, execution_start_to_close_timeout, input)
        
                    logger.info('got response: \n%s' % json.dumps(response, sort_keys=True, indent=4))
                    
                except boto.swf.exceptions.SWFWorkflowExecutionAlreadyStartedError:
                    # There is already a running workflow with that ID, cannot start another
                    message = 'SWFWorkflowExecutionAlreadyStartedError: There is already a running workflow with ID %s' % workflow_id
                    print message
                    logger.info(message)
示例#54
0
def run(app):
    global logfile
    import getopt
    import paper
    import settings
    settings = settings.get_settings(app.prefs)
    # do this after loading the settings so the user can just call
    # get_settings() w/out an arg to get a usable object.
    load_rcscript()
    context = None
    help = None
    error = 0
    logfile = None
    title = ''
    url = ''
    tabstop = None
    multi = 0
    verbose = 0
    printer = None
    copies = 1
    levels = None
    outfile = None
    #
    try:
        options, args = getopt.getopt(sys.argv[1:],
                                      'mvhdcaUl:u:t:sp:o:f:C:P:T:',
                                      ['color',
                                       'copies=',
                                       'debug',
                                       'fontsize=',
                                       'footnote-anchors',
                                       'help',
                                       'images',
                                       'logfile=',
                                       'multi',
                                       'orientation=',
                                       'output=',
                                       'papersize=',
                                       'paragraph-indent=',
                                       'paragraph-skip=',
                                       'printer=',
                                       'strict-parsing',
                                       'tab-width=',
                                       'tags=',
                                       'title=',
                                       'underline-anchors',
                                       'url=',
                                       'verbose',
                                       ])
    except getopt.error, err:
        error = 1
        help = 1
        options = ()
        sys.stderr.write("option failure: %s\n" % err)
示例#55
0
def try_reconnecting():
    try:
        global connection
        db = get_settings('database')
        connection = pyhdb.connect(
            host=db['host'],
            port=db['port'],
            user=db['username'],
            password=db['password']
        )
    except Exception, e:
        print e
示例#56
0
文件: api.py 项目: marteinn/Skeppa
def _upload_env_files(env_files):
    settings = skeppa_settings.get_settings()

    current_dir = os.getcwd()
    local_conf_dir = os.path.join(current_dir, settings.env_files_dir)
    remote_conf_dir = os.path.join(env.path, settings.env_files_dir)

    env_files = env_files
    for env_file in env_files:
        env_path = os.path.join(local_conf_dir, env_file)
        remote_path = os.path.join(remote_conf_dir, env_file)
        put(env_path, remote_path)
示例#57
0
	def render_template( self, creds, bot_settings=None, values=None ):
		
		template_values = {}
		if values is not None:
			template_values.update( values )
		template_values[ "form_action" ] = self.request.path

		if creds.screen_name is not None:
			template_values[ 'twitter_username' ] = creds.screen_name

		if bot_settings is None:
			bot_settings = settings.get_settings( creds )
		
		template_path = None
		if bot_settings is None:
			template_path = path_for_template( "nobot.html" )
		else :
			template_values[ 'guru_name' ] = bot_settings.learning_guru
			template_values[ "tweet_frequency" ] = bot_settings.tweet_frequency
			template_values[ "tweet_chance" ] = bot_settings.tweet_chance

			try:
				lists_in = creds.lists
				if lists_in:
					lists_out = []
					for list_in in lists_in:
						lists_out.append( { 'name' : list_in.name, 'id' : list_in.id_str } )  
					template_values[ 'lists' ] = lists_out
			except Exception, err:
				pass

			if bot_settings.learning_style == constants.learning_style_oneuser:
				template_values[ 'learnfrom_oneuser_checked' ] = "checked"
			elif bot_settings.learning_style == constants.learning_style_followers:
				template_values[ 'learnfrom_followers_checked' ] = "checked"
			elif bot_settings.learning_style == constants.learning_style_following:
				template_values[ 'learnfrom_following_checked' ] = "checked"
			elif bot_settings.learning_style == constants.learning_style_list:
				template_values[ 'learnfrom_list_checked' ] = "checked"

			if bot_settings.learn_retrospectively: 
				template_values[ 'learn_retrospectively_checked' ] = "checked"
			if bot_settings.locquacity_onschedule: 
				template_values[ 'locquacity_onschedule_checked' ] = "checked"
			if bot_settings.locquacity_reply:
				template_values[ 'locquacity_reply_checked' ] = "checked"
			if bot_settings.locquacity_speakonnew:
				template_values[ 'locquacity_speakonnew_checked' ] = "checked"
			if bot_settings.locquacity_greetnew:
				template_values[ 'locquacity_greetnew_checked' ] = "checked"

			template_path = path_for_template( "settings.html" )
示例#58
0
文件: api.py 项目: marteinn/Skeppa
def _upload_files(files):
    settings = skeppa_settings.get_settings()

    mount_dir = os.path.join(env.path, settings.mount_dir)
    local_files_dir = os.path.join(os.getcwd(), settings.files_dir)
    formatted_list = []

    # Construct a formatted files to be uploaded/created list
    for target_file in files:
        remote_path = None
        local_path = None

        if isinstance(target_file, six.string_types):
            remote_path = target_file

            if ":" in target_file:
                struct = target_file.split(":")
                remote_path = struct[0]
                local_path = struct[1]
        else:
            remote_path, local_path = target_file.popitem()

        formatted_list.append((remote_path, local_path))

    for remote_path, local_path in formatted_list:
        if not local_path:
            remote_path = os.path.join(mount_dir, remote_path)
            env.run('mkdir -p {0}'.format(remote_path))
            continue

        remote_path = os.path.join(mount_dir, remote_path)
        local_path = os.path.join(local_files_dir, local_path)
        remote_dir = os.path.dirname(remote_path)

        # Upload file, otherwise sync directory
        if not os.path.isdir(local_path):
            env.run('mkdir -p {0}'.format(remote_dir))
            put(local_path, remote_path)
        else:
            local_path = local_path.rstrip('/') + '/'
            remote_path = remote_path.rstrip('/') + '/'
            parent_dir = os.path.abspath(remote_path + "/../")

            env.run('mkdir -p {0}'.format(parent_dir))
            put(local_path, parent_dir)

            local_dir = os.path.basename(os.path.dirname(local_path))
            remote_dir = os.path.basename(os.path.dirname(remote_path))

            if local_dir != remote_dir:
                env.run('mv {0}/{1} {2}/{3}'.format(parent_dir, local_dir,
                                                    parent_dir, remote_dir))
示例#59
0
  def start(self, ENV = "dev"):
    # Specify run environment settings
    settings = settingsLib.get_settings(ENV)
    
    ping_marker_id = "cron_FiveMinute"
    
    # Log
    logFile = "starter.log"
    logger = log.logger(logFile, settings.setLevel, ping_marker_id)
    
    # Data provider
    db = dblib.SimpleDB(settings)
    db.connect()
    
    # SWF meta data provider
    swfmeta = swfmetalib.SWFMeta(settings)
    swfmeta.connect()
    
    last_startTimestamp = swfmeta.get_last_completed_workflow_execution_startTimestamp(workflow_id = ping_marker_id)
  
    # Start a ping workflow as a marker
    self.start_ping_marker(ping_marker_id, ENV)
  
    # Check for S3 XML files that were updated since the last run
    date_format = "%Y-%m-%dT%H:%M:%S.000Z"
    
    # Date conversion
    time_tuple = time.gmtime(last_startTimestamp)
    last_startDate = time.strftime(date_format, time_tuple)
    
    logger.info('last run %s %s' % (ping_marker_id, last_startDate))
    
    # A conditional start for SendQueuedEmail
    #  Only start a workflow if there are emails in the queue ready to send
    item_list = db.elife_get_email_queue_items(
      query_type = "count",
      date_scheduled_before = last_startDate)

    try:
      if(int(item_list[0]["Count"]) > 0):
        # More than one email in the queue, start a workflow
        try:
          starter_name = "starter_SendQueuedEmail"
          self.import_starter_module(starter_name, logger)
          s = self.get_starter_module(starter_name, logger)
          s.start(ENV = ENV)
        except:
          logger.info('Error: %s starting %s' % (ping_marker_id, starter_name))
          logger.exception('')
    except:
      # Some error
      logger.info('Exception encountered starting %s: %s' % (ping_marker_id, last_startDate))