Example #1
0
def prune(config, before, since, offset, limit):
    readConfig(config)
    apiKey = getApiKey()
    query = alert_makequery('open', before, since)
    try:
        res = alerts_list(apiKey, query, offset, limit)
    except Exception as exc:
        click.echo('Error: opsgenie api returned error ' + str(exc.args))
        return None
    click.echo('Alert Count ' + str(len(res)))
    for adef in res:
        al_id = adef['id']
        adata = alerts_get(apiKey, al_id)
        al_alias = adata['alias']
        al_status = adata['status']
        al_desc = adata['description']
        al_createdAt = adata['createdAt']
        
        if 'You are making too many requests!' in al_desc:
            click.echo('Alert ' + al_alias + ' caused by OpsGenie API limits (id:' + al_id + ')')
            try:
                res = alerts_close(apiKey, al_id, 'devops', None, 'Close Api Limit Alert')
            except Exception as exc:
                click.echo('Error: opsgenie api returned error ' + str(exc.args))
                return None
            click.echo('Closed Alert ' + al_id)
        else:
            click.echo('Alert ' + al_createdAt + ' alias:' + al_alias + 'status:' + al_status + ' Ignored')
    return res
Example #2
0
def status(prefix, config, showall):
    readConfig(config)
    apiKey = getApiKey()
    try:
        hbjson = hb_getlist(apiKey)
    except Exception as exc:
        click.echo('Error: opsgenie api returned error ' + str(exc.args))
        return None
    for hbdef in hbjson:
        hbname = hbdef['name']
        if not hbname.startswith(prefix):
            continue
        while True:
            try:
                hbdata = hb_get(apiKey, hbname)
                hbexpired = hbdata['expired']
                if hbexpired:
                    click.echo('Expired: ' + hbdef['name'] + ' interval:' + str(hbdef['interval']))
                elif showall == 'true':
                    click.echo('Healthy: ' + hbdef['name'] + ' interval:' + str(hbdef['interval']))
                break
            except Exception as exc:
                if exc.args[0] == 429:
                    # hit api limit, pause and try again
                    time.sleep(2)
                    continue
                else:
                    click.echo('Error: opsgenie api returned error ' + str(exc.args))
                    return None
    return hbjson
Example #3
0
def count(config, tenantid):
    readConfig(config)
    try:
        res = deployment_count(tenantid)
    except Exception as exc:
        click.echo('Error: rest api returned error ' + str(exc.args))
        return None
    click.echo('Count ' + str(res))
    return res
Example #4
0
def delete(deploymentid, config, cascade, skiplisteners, skipio):
    readConfig(config)
    try:
        res = deployment_delete(deploymentid, cascade, skiplisteners, skipio)
    except Exception as exc:
        click.echo('Error: rest api returned error ' + str(exc.args))
        return None
    click.echo('Deleted ' + deploymentid)
    return res
Example #5
0
def getconfig(config):
    readConfig(config)
    try:
        res = hcleanup_getconfig()
    except Exception as exc:
        click.echo('Error: rest api returned error ' + str(exc.args))
        return None

    click.echo('Got Config: ' + json.dumps(res))
    return res
Example #6
0
def count(config, status, before, since):
    readConfig(config)
    apiKey = getApiKey()
    query = alert_makequery(status, before, since)
    try:
        res = alerts_getcount(apiKey, query)
    except Exception as exc:
        click.echo('Error: opsgenie api returned error ' + str(exc.args))
        return None
    click.echo('Alert Count ' + str(res))
    return res
Example #7
0
def cleanup(config):
    readConfig(config)
    try:
        # True == run job now
        res = hcleanup_schedulejob(True)
    except Exception as exc:
        click.echo('Error: rest api returned error ' + str(exc.args))
        return None

    click.echo('Got Result: ' + json.dumps(res))
    return res
Example #8
0
def getjobs(config):
    readConfig(config)
    try:
        res = hcleanup_getjobs()
    except Exception as exc:
        click.echo('Error: rest api returned error ' + str(exc.args))
        return None

    click.echo('Found ' + str(res) + ' jobs')
    for cjob in res:
        click.echo(json.dumps(cjob))
    return res
Example #9
0
def list(config):
    readConfig(config)
    apiKey = getApiKey()
    try:
        hbjson = hb_getlist(apiKey)
    except Exception as exc:
        click.echo('Error: opsgenie api returned error ' + str(exc.args))
        return None
    click.echo('Heartbeat Count ' + str(len(hbjson)))
    for hbdef in hbjson:
        click.echo('Name: ' + hbdef['name'] + ' interval:' + str(hbdef['interval']))
    return hbjson
Example #10
0
File: digit.py Project: jtonk/Digit
def do_main_program():
	
	do_scheduler()

	# read config and init sensors
	
	global sensors
	sensors = config.readConfig()
	
	logger.debug(sensors.keys())

	
	
	
	threadHTTP = Thread(target=inetServer.threadHTTP)
	threadHTTP.setDaemon(True)
	threadHTTP.start()

	
	while 1:
		try:
			time.sleep(0.1)
		except KeyboardInterrupt:
			print >> sys.stderr, '\nExiting by user request.\n'
			sys.exit(0)
Example #11
0
    def initialize(self, argv):
        
        parser = optparse.OptionParser(usage="%prog [options] <dgen-config>", version=self.VERSION)
        parser.add_option("-s", dest="sf", type="float", default=1.0,
                          help="scaling factor (sf=1 generates 1GB data)")
        parser.add_option("-m", dest="dataset_id", type="str", default="default-dataset",
                          help="ID of the generated Myriad dataset")
        parser.add_option("-x", dest="execute_stages", action="append", type="str", default=[],
                          help="Specify a specific stage to execute")
        parser.add_option("-n", dest="node_config", type="str", default="%s-node.xml" % (self.dgenName),
                          help="name of the node config file (should reside in the config dir)")
        parser.add_option("--config-dir", dest="config_path", type="str", default="%s/config" % (self.basePath),
                          help="path to the myriad config folder (TODO)")
        parser.add_option("--log-dir", dest="log_dir", type="str", default=None, 
                          help="base directory for output logging")
        parser.add_option("--cleanup", dest="cleanup", action="store_true",
                          help="remove output from previously generated job")
            
        self.parser = parser

        args, remainder = parser.parse_args(argv)
        if (len(remainder) != 1):
            self.error(None, True)
            raise
        
        self.log = sysutil.createLogger("myriad.dgen")

        try:
            self.cleanup = args.cleanup
            self.sf = args.sf
            self.datasetID = args.dataset_id
            self.configPath = args.config_path
            self.nodeConfig = args.node_config
            self.logBase = args.log_dir
            self.executeStages = args.execute_stages
            self.configName = remainder.pop()
            
            # load myriad config 
            self.config = config.readConfig(self.dgenName, self.nodeConfig, "%s/%s-frontend.xml" % (self.configPath, self.dgenName))
            # load sizing config
            self.dgenConfig = config.readDGenConfig("%s/%s-node.properties" % (self.configPath, self.dgenName))
            
            DGenNode.MAX_ATTEMPTS = int(self.dgenConfig.getProperty("coordinator.node.max.attempts", DGenNode.MAX_ATTEMPTS))
            DGenNode.DEAD_TIMEOUT = datetime.timedelta(0, 0, 0, int(self.dgenConfig.getProperty("coordinator.node.dead.timeout", DGenNode.DEAD_TIMEOUT.seconds*1000)))
            NodeMonitor.POLL_INTERVAL = int(self.dgenConfig.getProperty("coordinator.node.monitor.interval", NodeMonitor.POLL_INTERVAL*1000))/1000.0
            
            if (self.logBase == None):
                # create log dir
                self.logBase = tempfile.mkdtemp("", "%s-frontend-%s_" % (self.dgenName, self.datasetID))
            
            # make sure that logBase directories exist
            sysutil.checkDir(self.logBase)
            
            # register file handler to the logger
            sysutil.registerFileHandler(self.log, "%s/%s-frontend.log" % (self.logBase, self.dgenName))

        except:
            e = sys.exc_info()[1]
            self.error("unexpected error: %s" % (str(e)), True)
            raise
Example #12
0
def main():
    options, args = parseCmdLineOpt()
    config_file = options.configFile
    config = readConfig(config_file)
    channel = create_connection(config)
    #produce_msg(channel, "FRED")
    consume_msg(channel)
def sethistoryttl(ttl, config, tenantid, deploymentid):
    readConfig(config)
    try:
        res = procdef_list(tenantid, deploymentid)
    except Exception as exc:
        click.echo('Error: rest api returned error ' + str(exc.args))
        return None
    click.echo('Count ' + str(len(res)))
    for adef in res:
        def_id = adef['id']
        def_key = adef['key']
        def_vers = adef['version']
        procdef_set_hittl(def_id, ttl)
        click.echo('Updated:' + def_key + ':' + str(def_vers) + ' ttl:' +
                   str(ttl) + ' id:' + def_id)
    return res
Example #14
0
def list(config, tenantid):
    readConfig(config)
    try:
        res = deployment_list(tenantid)
    except Exception as exc:
        click.echo('Error: rest api returned error ' + str(exc.args))
        return None
    click.echo('Count ' + str(len(res)))
    for adef in res:
        def_id = adef['id']
        def_name = adef['name']
        def_tid = adef['tenantId']
        def_date = adef['deploymentTime']

        click.echo(def_name + ':' + def_tid + ' date:' + def_date + ' id:' +
                   def_id)
    return res
Example #15
0
  def exposed_catalog(self, password, log, c1, c2, sv, startTime, winner_int):
    global logNum
    validNames = config.readConfig("login.cfg")
    if validNames['admin']['password'] != password:
      return None
    
    print "logging a game into the database....:"
    print "client1=",c1[0]
    print "client1 version=",c1[1]
    print "client2=",c2[0]
    print "client2 version=",c2[1]
    print "server version=",sv
    print "start time=",startTime
    print "winner=",winner_int

    logNum += 1
    
    c=db.cursor()
    c.execute("SELECT max(id) FROM fwog_web_game")
    max_id = c.fetchone()[0]

    if max_id == None:
      max_id = 0

    filename = 'logs/%s.gamelog.bz2' % str(max_id+1)

    c.execute("SELECT a.id FROM auth_user a, mstusername m WHERE m.username = a.username AND m.mstname = '%s'" % (c1[0].lower(),))
    try:
      c1id = c.fetchone()[0]
    except:
      print "OH NO! Is",c1[0],"registered on the web server??"
      c1id = 0
    c.execute("SELECT a.id FROM auth_user a, mstusername m WHERE m.username = a.username AND m.mstname = '%s'" % (c2[0].lower(),))
    try:
      c2id = c.fetchone()[0]
    except:
      print "OH NO! Is",c2[0],"registered on the web server??"
      c2id = 0

    query = "INSERT INTO fwog_web_game " + \
        "VALUES (%d, %d, %d, %d, %d, %d, %d, '%s','%s')" % \
        (max_id+1, #game log id
         c1id, #client 1
         c2id, #client 2
         c1[1], #client 1 version
         c2[1], #client 2 version
         winner_int%2, #client 1 score
         winner_int/2, #client 2 score
         filename, #log filename
         str(datetime.datetime.now()) ) #current time
    c.execute(query)
    db.commit()
            
    f=open(logdir+filename,'wb')
    f.write(log)
    f.close()
    print "log saved at: ", logdir+filename
    self.update(c1,c2,logNum-1)
Example #16
0
 def exposed_getVersions(self, password):
   validNames = config.readConfig('login.cfg')
   if not ('admin' in validNames and validNames['admin']['password'] == password):
     return False
   
   versions = {}
   for name in listdir('files'):
     versions[name] = len(listdir(join('files', name)))-1
   
   return versions
Example #17
0
def close(config, before, since, offset, limit):
    readConfig(config)
    apiKey = getApiKey()
    query = alert_makequery('open', before, since)
    try:
        res = alerts_list(apiKey, query, offset, limit)
    except Exception as exc:
        click.echo('Error: opsgenie api returned error ' + str(exc.args))
        return None
    click.echo('Alert Count ' + str(len(res)))
    for adef in res:
        al_id = adef['id']
        try:
            delres = alerts_close(apiKey, al_id, None, None, None)
            click.echo('Closed Alert ' + al_id)
        except Exception as exc:
            click.echo('Error: opsgenie api returned error ' + str(exc.args))
            return None
    return res
Example #18
0
def main():
    global status, skipped_backups, inconsistencies_found
    #Processing configuration
    rex_config = None
    try:
        config_file_path = os.path.join(fileutils.get_working_dir(), "resources", "config.xml")
        logging.info("Reading config file: " + config_file_path)
        rex_config = config.readConfig(config_file_path)
    except Exception as ex:
        logging.fatal("Failed to parse configuration file. Reason: " + ex.__str__())

    if rex_config:
        #step 1:performing backups
        if len(rex_config.backups) > 0:
            for backup in rex_config.backups:
                if not is_downtime_period(backup):
                    try:
                        perform_backup(backup)
                        add_message(Status.Success, Tasks.Backup, backup.source)
                    except Exception as ex:
                        add_message(Status.Failed, Tasks.Backup,backup.source,ex.__str__())
                        logging.error("Failed to perform backup: " + ex.__str__())
                        break
                    try:
                        if rex_config.performChecks:
                            perform_backup_check(backup)
                            add_message(Status.Success, Tasks.Check, backup.source)
                    except ArchiveIntegrityError as ex:
                        add_message(Status.Failed, Tasks.Check,backup.source,ex.__str__())
                        logging.error("Backup check found some archive inconsistencies: " + ex.__str__())
                        inconsistencies_found+=len(ex.inconsistencies)
                    except Exception as ex:
                        add_message(Status.Failed, Tasks.Check,backup.source,ex.__str__())
                        logging.error("Failed to perform backup check: " + ex.__str__())
                else:
                    skipped_backups += 1
                    add_message(Status.Skipped, Tasks.Backup, backup.source)
                fileutils.clean_tmp()
            if skipped_backups == len(rex_config.backups):
                status = Status.Skipped

        #step 2:performing cleanup
        try:
            perform_backup_cleanup(rex_config)
            add_message(Status.Success, Tasks.Cleanup, fileutils.get_tmp_dir())
        except Exception as ex:
            add_message(Status.Failed, Tasks.Cleanup, fileutils.get_tmp_dir(), ex.__str__())
            logging.error("Failed to perform backup cleanup: " + ex.__str__())

        #step 3: performing reporting
        try:
            if rex_config.performReporting:
                perform_reporting(messages, rex_config.reporterConfig)
        except Exception as ex:
            logging.error("Failed to perform reporting: " + ex.__str__())
Example #19
0
def findAndReadConfigFile():
   # first, check tld of TDSter
    conf = None
    found = False
    for loc in [os.path.expanduser("~/.unidata/TDSter"), os.environ.get("TDSTER_CONF")]:
        if not loc is None and not found:
            testConfFile = os.path.join(loc,confFileName)
            if os.path.isfile(testConfFile):
                conf = config.readConfig(testConfFile)
                found = True
    return conf
Example #20
0
 def updateClients(self):
   validNames = config.readConfig('login.cfg')
   password = validNames['admin']['password']
   
   updateServer=rpyc.connect(updateManagerName,18862)
   versions = updateServer.root.getVersions(password)
   self.serverVersion = versions['server']
   
   for i in versions:
     if i in validNames and validNames[i]['player']:
       if i not in self.scheduler.teamlist or self.scheduler.teamlist[i] != versions[i]:
         self.scheduler.updateQueue(i, versions[i])
def list(config, tenantid, deploymentid, showttl):
    readConfig(config)
    try:
        res = procdef_list(tenantid, deploymentid)
    except Exception as exc:
        click.echo('Error: rest api returned error ' + str(exc.args))
        return None
    click.echo('Count ' + str(len(res)))
    for adef in res:
        def_id = adef['id']
        def_key = adef['key']
        def_vers = adef['version']

        if showttl != None:
            my_procdef = procdef_get(def_id)
            def_hittl = my_procdef['historyTimeToLive']
            click.echo(def_key + ':' + str(def_vers) + ' id:' + def_id +
                       ' ttl:' + str(def_hittl))
        else:
            click.echo(def_key + ':' + str(def_vers) + ' id:' + def_id)
    return res
Example #22
0
 def updateClients(self):
   validNames = config.readConfig('login.cfg')
   password = validNames['admin']['password']
   
   updateServer=rpyc.connect(updateManagerName,18862)
   versions = updateServer.root.getVersions(password)
   self.serverVersion = versions['server']
   
   for i in versions:
     if i in validNames and validNames[i]['player']:
       if i not in self.scheduler.teamlist or self.scheduler.teamlist[i] != versions[i]:
         self.scheduler.updateQueue(i, versions[i])
Example #23
0
def list(config, status, before, since, offset, limit):
    readConfig(config)
    apiKey = getApiKey()
    query = alert_makequery(status, before, since)
    try:
        res = alerts_list(apiKey, query, offset, limit)
    except Exception as exc:
        click.echo('Error: opsgenie api returned error ' + str(exc.args))
        return None
    click.echo('Alert Count ' + str(len(res)))
    for adef in res:
        al_id = adef['id']
        adata = alerts_get(apiKey, al_id)
        al_alias = adata['alias']
        al_status = adata['status']
        al_desc = adata['description']
        al_createdAt = adata['createdAt']
        
        click.echo('Alert ' + al_alias + ' createdAt:' + al_createdAt + ' status:' + al_status + ' desc:' + al_desc)
        click.echo()
    return res
Example #24
0
  def exposed_get(self, password, name, version):
    validNames = config.readConfig('login.cfg')
    if validNames['admin']['password'] != password:
      return None
    
    print 'sending out',name,'version',version

    if exists( join('files', name,str(version)+'.tar.bz2') ):
      return open( join('files', name,str(version)+'.tar.bz2'),'rb').read()

    else:
      return None
def listinstances(config, tenantid, deploymentid):
    readConfig(config)
    try:
        res = procdef_list(tenantid, deploymentid)
    except Exception as exc:
        click.echo('Error: rest api returned error ' + str(exc.args))
        return None
    click.echo('Count ' + str(len(res)))
    for adef in res:
        def_id = adef['id']
        def_key = adef['key']
        def_vers = adef['version']

        my_history = procinst_for_procdefid(def_id, deploymentid)

        click.echo(def_key + ':' + str(def_vers) + ' id:' + def_id +
                   ' instances:' + str(len(my_history)))

        for phist in my_history:
            click.echo(json.dumps(phist))

    return res
Example #26
0
def bulkset(prefix, config, timeout):
    if timeout == None:
        click.echo('Must specify --timeout option in minutes')
        return None
    timeout = int(timeout)
    readConfig(config)
    apiKey = getApiKey()
    click.echo('[ bulkset prefix=' + prefix + ' timeout=' + str(timeout) + ' ]')
    try:
        hbjson = hb_getlist(apiKey)
    except Exception as exc:
        click.echo('Error: opsgenie api returned error ' + str(exc.args))
        return None
    click.echo('Heartbeat Count ' + str(len(hbjson)))
    for hbdef in hbjson:
        hbname = hbdef['name']
        if not hbname.startswith(prefix):
            # skip non-matching heartbeat record
            continue
        hbtimeout = hbdef['interval']
        if hbtimeout != timeout:
            # retry loop to patch heartbeat record
            while True:
                try:
                    hbres = hb_patch(apiKey, hbname, timeout)
                    click.echo('HB ' + hbname + ' timeout old:' + str(hbtimeout) + ' new:' + str(timeout))
                    break
                except Exception as exc:
                    if exc.args[0] == 429:
                        # hit api limit, pause and try again
                        time.sleep(2)
                        continue
                    else:
                        click.echo('Error: opsgenie api returned error ' + str(exc.args))
                        return None
    return hbjson
Example #27
0
 def setup(self, args, remainder):
     self.sf = args.sf
     self.configDir = args.config_dir
     self.datasetID = args.dataset
     self.configName = remainder.pop()
     self.logBase = args.log_dir
     self.execute = args.execute
     
     # load myriad config 
     self.config = config.readConfig("%s/myriad-config.xml" % (self.configDir))
     # load cloud config
     self.cloud = self.config.cloudForDgenConfig(self.configName)
     
     # extend common parameters dictionary
     self.commonParams.update({ 'dataset.id': self.datasetID })  
Example #28
0
    def setup(self, args, remainder):
        self.sf = args.sf
        self.configDir = args.config_dir
        self.datasetID = args.dataset
        self.configName = remainder.pop()
        self.logBase = args.log_dir
        self.execute = args.execute

        # load myriad config
        self.config = config.readConfig("%s/myriad-config.xml" %
                                        (self.configDir))
        # load cloud config
        self.cloud = self.config.cloudForDgenConfig(self.configName)

        # extend common parameters dictionary
        self.commonParams.update({'dataset.id': self.datasetID})
Example #29
0
 def run(self):
     # try to read config files
     [authConfig, alarmsConfig, httpdConfig] = \
         config.readConfig(self.data_dir, self.source_dir)
     # create the page handler
     self.authHandler = AuthHandler(authConfig)
     # create action handler
     self.actionHandler = ActionHandler(alarmsConfig, self.authHandler)
     # create the alarms
     self.alarms = {'now': Clock()}
     for (name, alarm) in self.actionHandler:
         self.alarms[name] = AlarmClock(**alarm)
     # create the server
     Server._alarms = self.alarms
     self.server = Server.startServer(**httpdConfig)
     # run the server
     self.server.serve_forever()
Example #30
0
 def run(self):
     # try to read config files
     [authConfig, alarmsConfig, httpdConfig] = \
         config.readConfig(self.data_dir, self.source_dir)
     # create the page handler
     self.authHandler = AuthHandler(authConfig)
     # create action handler
     self.actionHandler = ActionHandler(alarmsConfig, self.authHandler)
     # create the alarms
     self.alarms = {'now': Clock()}
     for (name, alarm) in self.actionHandler:
         self.alarms[name] = AlarmClock(**alarm)
     # create the server
     Server._alarms = self.alarms
     self.server = Server.startServer(**httpdConfig)
     # run the server
     self.server.serve_forever()
Example #31
0
 def update(self, name, version, target):
   shutil.rmtree(target)
   os.mkdir(target)
   try:
     updateServer=rpyc.connect(updateServerName,18862)
     validNames = config.readConfig("login.cfg")
     password = validNames['admin']['password']
     tar = updateServer.root.get(password, name, version)
     
     s = StringIO(decompress(tar))
     f = TarFile(mode='r', fileobj=s)
     
     f.extractall(target)
     print "updated",name
     return True
   except:
     traceback.print_exc()
     print "Failed to connet to the update server :("
     return False
Example #32
0
def main():
    # Creating/loading the chatbot
    my_bot = load_bot('chatterbot.pkl')

    # Setting up twitter functionality
    api = create_twitter_api()
    since_id = readConfig()

    while True:
        new_messages, since_id = checkForDMs(api, since_id)
        if new_messages:
            for message in reversed(new_messages):
                senderID = message[0]
                text = message[1]
                sender = message[2]
                response = my_bot.get_response(text)
                print('Sender: ', sender)
                print('Text: ', text)
                print('Response: ', response)
                api.send_direct_message(senderID, response.text)
        time.sleep(60)
Example #33
0
 def exposed_update(self, name, password, binary):
   #updates a program (a client, unless name=server)
   validNames = config.readConfig('login.cfg')
   if not (name in validNames and validNames[name]['password'] == password):
     return False
   
   if isdir(join('files', name)):
     #already have a version, now increment by 1
     #this is a little sensative to the number of files in the directory
     #if we expect it to get polluted we should use a better method than this
     v = len(listdir(join('files', name)))
   elif exists(join('files', name)):
     print 'Crap! Trying to make %s but it\'s not a directory!' % name
     return False
   else:
     mkdir(join('files', name))
     v = 0
   b = open(join('files', name,str(v)+'.tar.bz2'),'wb')
   b.write(binary)
   b.close()
   
   return True
Example #34
0
# Django settings for babaji project.
import os
import config as cfg
CONFIG = cfg.readConfig()

HOME = CONFIG["HOME"] 

DEBUG = CONFIG["DEBUG"] 
TEMPLATE_DEBUG = DEBUG

ADMINS = (
    ('Yogesh Tomar', '*****@*****.**'),
)

MANAGERS = ADMINS

DATABASES = {
    'default': {
        'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
        'NAME': 'data/sqlite.db',                      # Or path to database file if using sqlite3.
        'USER': '',                      # Not used with sqlite3.
        'PASSWORD': '',                  # Not used with sqlite3.
        'HOST': '',                      # Set to empty string for localhost. Not used with sqlite3.
        'PORT': '',                      # Set to empty string for default. Not used with sqlite3.
    }
}

# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
Example #35
0
import casadi as C
from casadi import pi

from config import readConfig
import models
from newton.nmhe import Nmhe

if __name__=='__main__':
    context   = zmq.Context(1)
    publisher = context.socket(zmq.PUB)
    publisher.bind("tcp://*:5563")

    print "reading config..."
#    conf = readConfig('stingray.ini','configspec.ini')
    conf = readConfig('config.ini','configspec.ini')
    
    print "creating model..."
    dae = models.crosswind(conf)#,extraParams=['endTime'])
    
    nk = 30

    # setup MHE
    mhe = Nmhe(dae,nk)

    # bounds
    mhe.bound('aileron',(-0.04,0.04))
    mhe.bound('elevator',(-0.1,0.3))

    mhe.bound('x',(-200,200))
    mhe.bound('y',(-200,200))
Example #36
0
  def exposed_runGame(self, c1, c2, sv):
    #check versioning:
    self.update(c1[0], c1[1], 'client1')
    self.update(c2[0], c2[1], 'client2')
    self.update('server',sv, 'server')
    
    startTime = time.time()
    
    server = self.startServer()
    time.sleep(1)
    
    player1 = self.startClient1()
    time.sleep(1)
    print 'Waiting for game created'
    if not exists(join('server', 'created')):
      player1.kill()
      server.kill()
      return -1
    
    
    player2 = self.startClient2()
    time.sleep(1)
    
    print 'Waiting for game started'
    if not exists(join('server', 'started')):
      print 'aww'
      player1.kill()
      player2.kill()
      server.kill()
      return -2
    
    print 'playing game'
    #wait for gamelog
    while time.time() < startTime + 600:
      server.poll()
      if server.returncode is not None:
        break
      time.sleep(5)
    
    print 'the game is finished!'
    
    if server.returncode is None:
      player1.kill()
      player2.kill()
      server.kill()
      return 0
    
    print 'I wonder who won!'
    
    time.sleep(2)
    player1.kill()
    player2.kill()
    
    
    logfile=open('server/logs/1.gamelog.bz2','rb')
    log=logfile.read()

    if not exists(join('server', 'winner')):
      win_val = 0
    if 'Player 0 wins' in file(join('server', 'winner'), 'r').read():
      win_val = 1
    win_val = 2

    try:
      validNames = config.readConfig("login.cfg")
      password = validNames['admin']['password']
      
      dbServer=rpyc.connect(dbManagerName,18863)
      dbServer.root.catalog(password, log, c1, c2, sv, startTime, win_val)
    except:
      traceback.print_exc()
    
    return win_val
Example #37
0
        return

    ranges = []
    rangeStrings = args.ranges
    rangePattern = re.compile(r"(?P<chr>.*?):(?P<begin>.*?)-(?P<end>.*?)$")
    if rangeStrings != None:
        for string in rangeStrings:
            rangeProps = rangePattern.match(string)
            try:
                newRange = chromRange(
                    rangeProps.group("chr"),
                    int(rangeProps.group("begin")),
                    int(rangeProps.group("end"))
                )
                ranges.append(newRange)
            except AttributeError, e:
                print >> stderr, "ignoring range \"" + string + "\": improperly formatted"


    if args.configPath != None:
        updateConfigPath(args.configPath)
    readConfig()

    try:
        vcfStream = fetchVcfPipe(args.sampleNames, ranges=ranges)
        print vcfStream.read()[:-1]
    except urllib2.HTTPError, e:
        print >> stderr, "HTTP error: %d" % e.code
    except urllib2.URLError, e:
        print >> stderr, "Network error: %s" % e.reason.args[1]
Example #38
0
  modulesList = []
  modules = []

  def __init__(self, config):
    self.path = [os.path.expanduser(path) for path in config["path"]]
    self.modulesList = config["modules"]
    self.modules = self.loadModules()
    
  def loadModules(self, path=None):
    if not path:
      path = self.path
    oldPath = sys.path
    sys.path = path
    modules = {}
    failedModules = []
    for item in self.modulesList:
      try:
        modules[item.lower()] = getattr(__import__(item),item)()
      except Exception:
        failedModules.append(item)
    sys.path = oldPath
    if failedModules:
      raise Exception("Failed to load modules", failedModules)
    return modules
          
if __name__ == "__main__":
  import config
  test = ModuleManager(config.readConfig()["modules"])
  print test.modules
  print locals()
  print test.modules["introspection"].populate(dir())
Example #39
0
import config
import os
from os import path
import requests
import json
import cryptoLogic

HOME_PATH = os.path.expanduser('~')
URL_SECTION_NAME = "registration"

registrationUrl = config.readConfig(URL_SECTION_NAME, 'register_User')

TOKEN = config.readConfig("token")


def registerUser(userData):

    response = requests.post(registrationUrl,
                             data=json.dumps(userData),
                             headers={"cloud9_token": TOKEN})

    if response.status_code == 200:
        store_seed(userData, json.loads(response.text)["salt"])
        return True
    else:
        return False


def store_seed(userData, response):
    is_accessible = os.access(
        HOME_PATH, os.F_OK)  # Check if you have access, this should be a path
Example #40
0
def main():
    api = create_twitter_api()
    loop = True
    since_id = readConfig()
    parseTweets(api, '@elonmusk')
import os
from os import path
import requests
import json
import cryptoLogic
import config

URL_SECTION_NAME = "login"

VALIDATE_USER_URL = config.readConfig(URL_SECTION_NAME, 'validate_user')
VALIDATE_OTP_URL = config.readConfig(URL_SECTION_NAME, 'validate_otp')
SEND_OTP_URL = config.readConfig(URL_SECTION_NAME, 'send_otp')

TOKEN = config.readConfig("token")
HOME_PATH = os.path.expanduser('~')


def getLocalSeed(userId):
    seedName = userId + "_seed.enc"
    SEED_LOCATION = os.path.join(HOME_PATH, seedName)
    print(SEED_LOCATION)
    if (path.exists(SEED_LOCATION)):
        temp = open(SEED_LOCATION, "r")
        data = temp.read()
        temp.close()
        return data
    return False


def isUserValid(userId, password, otp=""):
    userData = json.dumps({"emailid": userId, "password": password})
import config
import os
from os import path
import requests
import json
from werkzeug.utils import secure_filename

URL_SECTION_NAME = "loan"
LOAN_URL = config.readConfig(URL_SECTION_NAME, 'loan_update')
LOAN_FILE_UPLOAD_URL = config.readConfig(URL_SECTION_NAME, 'uploads')

TOKEN = config.readConfig("token")


def appLoan(userData):
    dataToSend = json.loads(
        json.dumps({
            "emailid": userData["emailid"],
            "send_mail": "true",
            "update_user_loan": "true",
            "username": userData["name"],
            "application_status": "submitted",
            "loan_amount": userData["amount"],
            "loan_tenure_in_days": userData["time"],
            "dob": userData["dob"],
            "annual_income": userData["income"]
        }))

    file = userData["loanFile"]
    file_data = file.read()
Example #43
0
def main():
    document = None
    page_num = 0
    options, args = parseCmdLineOpt()
    config_file = options.configFile
    config = readConfig(config_file)
    channel = create_connection(config)
    documents = []
    scanner = init_scanner(config) 
    for output, image_bar in start_scanning(scanner):
        for symbol in image_bar:
            logger.debug("symbol %s" % symbol.data)
            for clef, valeur in config.items("type"):
                if re.match(valeur, symbol.data):
                    page_num = 0
                    logger.debug("new document detected")
                    document = Document()
                    logger.debug(document)
                    document.name = symbol.data
                    document.genre = clef
                    documents.append(document)
                    break
            if re.match(("^%s.*" % config.get("workflow", "key")), symbol.data):
                document.workflows.append(symbol.data)
        page_num += 1
        if document is not None:
            filename = "%s_%s.tiff" % (document.name, str(page_num))
        else:
            document = Document()
            filename = "undefined_%s_%s.tiff" % (datetime.today().strftime("%Y%m%d-%H%M%S"), str(page_num))
            document.name = "undefined_%s" % datetime.today().strftime("%Y%m%d-%H%M%S")
            documents.append(document)
        filepath = os.path.join(config.get("output", "tmp_dir"), filename)
        output.save(filepath, 'TIFF')
        document.files.append(filepath)
        for symbol in image_bar:
            document.barcodes.append(symbol.data)
        print str(document)

        ocr = Ocr()
        ocr.bin_path = config.get("ocr","ocr_bin")
        ocr.input_image = filepath
        ocr.parse()
        document.ocr.append(ocr.content)
        if config.get("scanner", "interactive") == "True":
            input_cmd = raw_input("press enter to scan the next document or 'quit' to leave ")
            if input_cmd == "quit":
                break
    for document in documents:
        logger.debug("documents : %s" % documents)
        logger.debug("document : %s" % document)
        old_files = document.files
        logger.debug("  old_files :")
        logger.debug(old_files)
        document.files = []
        file_format = config.get("output", "file_format")
        for image in old_files:
            logger.debug("     image : %s" % image)
            document.files.append(convert_file(image,file_format))
        if file_format == "PDF":
            new_file = merge_files(document.files,file_format)
            document.files = [new_file]
        logger.debug("we gonna produce a new message in the queue for %s" % document)
        produce_msg_document(channel, document, config.get("queues", "documents"))
Example #44
0
import casadi as C
from casadi import pi

from config import readConfig
import models
from newton.nmhe import Nmhe

if __name__ == '__main__':
    context = zmq.Context(1)
    publisher = context.socket(zmq.PUB)
    publisher.bind("tcp://*:5563")

    print "reading config..."
    #    conf = readConfig('stingray.ini','configspec.ini')
    conf = readConfig('config.ini', 'configspec.ini')

    print "creating model..."
    dae = models.crosswind(conf)  #,extraParams=['endTime'])

    nk = 30

    # setup MHE
    mhe = Nmhe(dae, nk)

    # bounds
    mhe.bound('aileron', (-0.04, 0.04))
    mhe.bound('elevator', (-0.1, 0.3))

    mhe.bound('x', (-200, 200))
    mhe.bound('y', (-200, 200))
Example #45
0
    def cleanup(self):
        # 关闭所有连接
        db.closeAll()
        exit()


'''
启动
'''
if __name__ == '__main__':
    app = tornado.web.Application([
        ('/process', Process)
    ])
    # 处理配置参数
    options.parse_command_line()
    config.readConfig(options.cfg)
    # 初始化日志
    _port = config.d['db']['base_port']
    logFile = '%s_db.log' % (config.d['serv_id'])
    _log.init_log(config.d.get('log') + logFile, _port)
    # 初始化db
    # mongoConfig = config.d.get('mongodb')
    # uri = 'mongodb://'
    # user = mongoConfig.get('user', None)
    # pwd = mongoConfig.get('pwd', None)
    # if user and pwd:
    #     uri += '%s:%s@'%(user, pwd)
    # uri += '%s:%s/admin'%(mongoConfig.get('host'), mongoConfig.get('port'))
    # db.connect_mongo(uri, mongoConfig.get('db'))
    # db.initRedis(config.d.get('redis'))
    # 初始化mysqlDb
Example #46
0
    timestamp = t.strftime('%d.%m.%Y %H:%M:%S')
    print("%s %s" % (timestamp, msg))


def getTimeStamp():
    return datetime.datetime.now().strftime(b'%d.%m.%Y %H:%M:%S')


GPIO.setmode(GPIO.BCM)

GPIO.setup(23, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(GPIO_PFO_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)

GPIO.add_event_detect(23, GPIO.BOTH, callback=gpio_callback_23, bouncetime=10)

config.readConfig()

try:
    while True:
        # 		try:
        # 			resp = gsmModule.sendATCommand("AT+CNUM\r")
        # 			print(resp)
        # 		except:
        # 			pass
        # 		try:
        # 			resp = gsmModule.sendATCommand("AT+CNMI?\r")
        # 			print(resp)
        # 		except:
        # 			pass
        #
        # 		print("Read SMS")
import evaluate
import payout
#import db
import config

import logging
import time
import threading
#import requests
import json

# Evaluate node data
# After the end of every 10 minute period, after 1 minute run evaluation of the last 2+ slots
# After the end of every 4-hours, reevaluate the last 1+ days

config = config.readConfig()
logging.basicConfig(level=logging.INFO)

evaluate_period_last_started = 100000
evaluate_period_last_finished = 100000
evaluate_daily_last_started = 100000
evaluate_daily_last_finished = 100000

def get_logger():
    # Get named logger
    return logging.getLogger(__name__)

def start_job():
    global config
    time.sleep(30)
    get_logger().info('Started')
Example #48
0
        # Create snapshot
        if (not recursed): 
            # Only done once at top level, recursed is the path for 
            # 'try again with a full backup'
            cmd = ['lvcreate', '-s', '-L', self.snapsize, '-n', 
                self.snapshot_name, self.lvmpath]
            #print cmd
            subprocess.check_call(cmd)
            cmd = ['mount', '-t', 'auto', self.snapshot_path, self.root]
            #print cmd
            subprocess.check_call(cmd)
        try:
            BackupTarget.Run(self, recursed=recursed)
        finally:
            if (not recursed): 
                # Only done once at top level, recursed is the path for 
                # 'try again with a full backup'
                # release snapshot - making sure to do that in a cleanup 
                # handler so we release the snap no matter what
                cmd = ['umount', self.root]
                #print cmd
                subprocess.check_call(cmd)
                cmd = ['lvremove', '-f', self.snapshot_path]
                #print cmd
                subprocess.check_call(cmd)

if __name__ == '__main__':
    import config
    backup = config.readConfig(sys.argv)
    backup['backup'].Run()
Example #49
0
import config

config.printConfig()
config.readConfig()

print "\n"

config.printConfig()
config.speed = 0
config.camera = "Cannon EOS5"
config.writeConfig()

print "\n"
config.printConfig()
Example #50
0

if __name__ == '__main__':
    """Направляет следующие напоминания:
    1) Если у пользователя стоит Рассматривается = 1, то находит обращения, по которым уже идет работа,
    а они висят в статусе новое или принято. Меняет им статус на Рассматривается
    2) Ищет все обращения пользователя со статусом задания выданы, у которых все задания закрыты выполенны или
    отменены и предалгает их закрыть.
    3) Находит все обращения по которым пользователи не отвечают больше 5 дней
    4) Находит все обращения в статусе новые.
    5) Находит все обращения висящие в статусе принято более 5 дней.


    """
    # читаю конфигурационный файл
    DB, users, work, err = config.readConfig()
    if err:
        exit(1)
    # проверяю конфигурационный файл
    err = config.configValidator(DB, users, work)
    d = work['noActiveObr']
    if err:
        print('В конфигурационном файле найдена ошибка(и):', err)
        exit(1)
    # пробуем соединится с БД
    try:
        con = pypyodbc.connect('DRIVER=FreeTDS; SERVER=%s; PORT=%s; DATABASE=%s; UID=%s; PWD=%s; TDS_Version=8.0; ClientCharset=UTF8;'
                               % (DB['adr'], DB['port'], DB['name'], DB['user'], DB['pwd']))
        cur = con.cursor()
    except :
        print("Возникла ошибка при соединении с БД")
Example #51
0
# -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
from selenium.common.exceptions import NoAlertPresentException
import unittest, time, datetime, re
import pypyodbc
import os, sys
import test_1005
import config
import smev


TI, ASP, LK, err = config.readConfig()
if err > 0:
    print('Ошибка при чтении конфигурационного файла')
    exit(1)
addr = 'http://%s:%s/%s/' % (ASP['adr'], ASP['port'], ASP['url'])


def delTI():
        """Удаляет загруженные ранее записи для 1005 сервиса по району 159"""
        DB = TI
        conS = "DRIVER=FreeTDS; SERVER=%s; PORT=%s; DATABASE=%s; UID=sa; PWD=%s; TDS_Version=8.0; ClientCharset=UTF8" \
               % (DB['DB_address'], DB['DB_port'], DB['DB_name'], DB['DB_password'])
        try:
            conTI = pypyodbc.connect(conS)
        except:
            print("Возникла ошибка при соединении с БД АСП")
Example #52
0
                not recursed
        ):  # only done once at top level, recursed is the path for 'try again with a full backup'
            cmd = [
                'lvcreate', '-s', '-L', self.snapsize, '-n',
                self.snapshot_name, self.lvmpath
            ]
            print cmd
            subprocess.check_call(cmd)
            cmd = ['mount', '-t', 'auto', self.snapshot_path, self.root]
            print cmd
            subprocess.check_call(cmd)
        try:
            BackupTarget.Run(self, recursed=recursed)
        finally:
            if (
                    not recursed
            ):  # only done once at top level, recursed is the path for 'try again with a full backup'
                # release snapshot - making sure to do that in a cleanup handler so we release the snap no matter what
                cmd = ['umount', self.root]
                print cmd
                subprocess.check_call(cmd)
                cmd = ['lvremove', '-f', self.snapshot_path]
                print cmd
                subprocess.check_call(cmd)


if (__name__ == '__main__'):
    import config
    backup = config.readConfig(sys.argv)
    backup['backup'].Run()