def __init__(self, ip, port, pid_file): self.register_options() common_config.init(sys.argv[1:]) logger.init_logger() LOG.debug('Full set of CONF:') cfg.CONF.log_opt_values(LOG, syslog.DEBUG) self._ip = ip self._port = port super(DeamonMain, self).__init__(pid_file)
def main(): config.init(sys.argv[1:]) config.setup_logging() n_utils.log_opt_values(LOG) if not CONF.config_file: sys.exit( _("ERROR: Unable to find configuration file via the default" " search paths (~/.neutron/, ~/, /etc/neutron/, /etc/) and" " the '--config-file' option!")) run()
def main(): register_options() common_config.init(sys.argv[1:]) config.setup_logging() server = neutron_service.Service.create( binary='neutron-dhcp-agent', topic=topics.DHCP_AGENT, report_interval=cfg.CONF.AGENT.report_interval, manager='nspagent.dhcp.agent.DhcpAgentWithStateReport') service.launch(server).wait()
def run(func, args): config.init('../xmpp.conf') client_jid = config.get('users', 'client_jid') client_password = config.get('users', 'client_password') server_jid = config.get('users', 'server_jid') session = Remote.new_session(client_jid, client_password) endpoint = session.new_proxy(server_jid + '/rpc', TestRunner) job = Future() heartbeat = Heartbeat(job, endpoint.ping, session) heartbeat.start() getattr(endpoint.async(job), func)(*args)
def main(): config.init(sys.argv[1:]) config.setup_logging() try: daemon = MlxEswitchDaemon() daemon.start() except Exception as e: LOG.exception("Failed to start EswitchDaemon - Daemon terminated! %s", e) sys.exit(1) daemon.daemon_loop()
def __init__(self): print('极客模式:' + str(self.jike)) self.src.link(self.ch1) self.ch1.link(self.kws) self.src.link(self.doa) def on_detected(keyword): position = self.doa.get_direction() pixels.wakeup(position) voice = os.path.join( constants.DATA_PATH, 'sysvoices', 'sysvoice' + str(random.randint(1, 8)) + '.mp3') direction = pixels.positionToDirection() print('detected {} at direction {} is {}'.format( keyword, position, direction)) #这里是唤醒后进行打招呼的语音播放 pixels.speak(voice) print(str(keyword) + voice) record.speech('你在我' + direction) if self.jike: print('进入极客模式') # 创建线程01,不指定参数 thread_01 = Thread(target=instructions.jikeThread, args=( "jikeThread01", record, pixels, )) # 启动线程01 thread_01.start() else: #开始录音 outputtext = record.record() #这里是指令,语音识别后返回文字,将文字传入instructionsFunc,进行处理和播放 instructions.instructionsFunc(outputtext, record, pixels) self.kws.set_callback(on_detected) self.src.recursive_start() config.init()
def login_handler(event, context): client_request_data = config.init(event) try: body = json.loads(event["body"]) headers = event["headers"] ttnOauthAccessToken = body["ttn_oauth_access_token"] client_id = headers["client_id"] except Exception as e: traceback.print_exc() return Builder.bad_request_error_message(e.message) builder = Builder(target=authenticator.login, args=(ttnOauthAccessToken, client_id, client_request_data)) return builder.build()
def main(argv): # the configuration will be read into the cfg.CONF global data structure config.init(sys.argv[1:]) cfgFile = 'None' testScript = 'None' #Parse command line arguments try: opts,args = getopt.getopt(argv,"hc:s:",["config-file=","test-script="]) except getopt.GetoptError: print utils.Utils.helpText() raise for opt, arg in opts: if opt == '-h': print utils.helpText() sys.exit() elif opt in ("-c", "--config-file"): cfgFile = arg elif opt in ("-s", "--test-script"): testScript = arg else : print utils.Utils.helpText() raise try: if cfgFile is 'None': print utils.Utils.helpText() sys.exit(("FATAL : Unable to proceed without configuration file")) elif testScript is 'None': print utils.Utils.helpText() sys.exit(_("FATAL : Unable to proceed without test script")) elif not cfg.CONF.config_file: sys.exit(_("FATAL : Unable to load the configuration" "Please rerun with --help option")) #validate the input arguments #logger = Log #TO-DO: Park for now.. will work on this later try: host = 'localhost' #cfg.CONF.host except Exception: host = 'localhost' #get a free port number to start the flask server try : sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.bind((host, 0)) port = sock.getsockname()[1] sock.close() except : pass threadExeCore = executionCore.ExecutionCore(cfg,cfgFile,testScript,host,port,utils.comm_queue) threadExeCore.start() objEvent = eventListner.EventListner(host,port,utils.comm_queue,threadExeCore.sessionDir) objEvent.run() except KeyboardInterrupt: pass except RuntimeError as e: sys.exit(_("ERROR: %s") % e)
class ApiClient: config.init() @staticmethod def get_all_rhics(): c = config.get_rhic_serve_config_info() status, data = request(c['host'], c['port'], '/api/rhic/', c['user'], c['passwd'], False) if status == 200: return data raise Exception(status, data) @staticmethod def get_rhic(rhic_id): c = config.get_rhic_serve_config_info() api = '/api/rhic/503e31fdd9c1416fd0000003/' #status, data = request(c['host'], c['port'], api, c['user'], c['passwd'], False) data = request(c['host'], c['port'], api, c['user'], c['passwd'], False) return data #if status == 200: # return data #raise Exception(status, data) @staticmethod def get_account(): c = config.get_rhic_serve_config_info() api = '/api/account/' #status, data = request(c['host'], c['port'], api, c['user'], c['passwd'], False) data = request(c['host'], c['port'], api, c['user'], c['passwd'], False) return data @staticmethod def get_contract(api): c = config.get_rhic_serve_config_info() #status, data = request(c['host'], c['port'], api, c['user'], c['passwd'], False) data = request(c['host'], c['port'], api, c['user'], c['passwd'], False) return data @staticmethod def get_rhic_details(RHIC): id = getRHICdata(RHIC) c = config.get_rhic_serve_config_info() if id: api = '/api/rhic/' + id + '/' #status, data = request(c['host'], c['port'], api, c['user'], c['passwd'], False) data = request(c['host'], c['port'], api, c['user'], c['passwd'], False) return data @staticmethod def getRHIC_in_account(): c = config.get_rhic_serve_config_info() api = '/api/account/' #status, data = request(c['host'], c['port'], api, c['user'], c['passwd'], False) data = request(c['host'], c['port'], api, c['user'], c['passwd'], False) account_doc = data[0] print(account_doc) account_id = account_doc[0]['account_id'] api = '/api/rhic/' data = request(c['host'], c['port'], api, c['user'], c['passwd'], False) #all_rhics = json.loads(data[0]) all_rhics = data my_rhics = [] for rhic in all_rhics: if rhic['account_id'] == account_id: my_rhics.append(rhic['uuid']) return my_rhics
def checkin_data(): #config fail/pass on missing rhic config.init() c = config.get_import_info() results = [] #debug format = "%a %b %d %H:%M:%S %Y" start = datetime.utcnow() time = {} time['start'] = start.strftime(format) #debug hr_fmt = "%m%d%Y:%H" # committing every 100 records instead of every 1 record saves about 5 # seconds. commit_count = 100 cached_rhics = {} cached_contracts = {} rds = [] for pu in ProductUsage.objects.all(): uuid = pu.consumer if uuid in cached_rhics: rhic = cached_rhics[uuid] else: try: _LOG.info('using RHIC: ' + uuid) rhic = RHIC.objects.filter(uuid=uuid)[0] cached_rhics[uuid] = rhic except IndexError: _LOG.critical('rhic not found @ import: ' + uuid) if c['continue_on_error'] == 0: raise Exception('rhic not found: ' + uuid) else: continue account = Account.objects( account_id=rhic.account_id).only('contracts').first() contract = None if rhic.contract in cached_contracts: contract = cached_contracts['rhic.contract'] else: for c in account.contracts: if c.contract_id == rhic.contract: cached_contracts['rhic.contract'] = c contract = c break # Set of used engineering ids for this checkin product_set = Set(pu.allowed_product_info) # Iterate over each product in the contract, see if it matches sla and # support level, and consumed engineering ids. If so, save an instance # of ReportData for product in contract.products: # Match on sla and support level if not (product.sla == rhic.sla and product.support_level == rhic.support_level): continue # Set of engineering ids for this product. product_eng_id_set = set(product.engineering_ids) # If the set of engineering ids for the product is a subset of the # used engineering ids for this checkin, create an instance of # ReportData, check for dupes, and save the instance. if product_eng_id_set.issubset(product_set): # This line isn't technically necessary, but it improves # performance by making the set we need to search smaller each # time. product_set.difference_update(product_eng_id_set) splice_server = SpliceServer.objects.get( id=pu.splice_server.id) rd = ReportData( instance_identifier=str(pu.instance_identifier), consumer=rhic.name, consumer_uuid=uuid, product=product.engineering_ids, product_name=product.name, date=pu.date, hour=pu.date.strftime(hr_fmt), sla=product.sla, support=product.support_level, contract_id=rhic.contract, contract_use=str(product.quantity), memtotal=int(pu.facts['memory_dot_memtotal']), cpu_sockets=int(pu.facts['lscpu_dot_cpu_socket(s)']), environment=str(splice_server.environment), splice_server=str(splice_server.hostname)) # If there's a dupe, log it instead of saving a new record. dupe = ReportData.objects.filter( consumer_uuid=rhic.uuid, instance_identifier=str(pu.instance_identifier), hour=pu.date.strftime(hr_fmt), product=product.engineering_ids) if dupe: _LOG.info("found dupe:" + str(pu)) else: _LOG.info('recording: ' + str(product.engineering_ids)) # rd.save() rds.append(rd) if rds and len(rds) % commit_count == 0: ReportData.objects.insert(rds) rds = [] if rds: ReportData.objects.insert(rds) end = datetime.utcnow() time['end'] = end.strftime(format) results.append(time) _LOG.info('import complete') return json.dumps(time)
def checkin_data(): #config fail/pass on missing rhic config.init() c = config.get_import_info() results = [] #debug format = "%a %b %d %H:%M:%S %Y" start = datetime.utcnow() time = {} time['start'] = start.strftime(format) #debug hr_fmt = "%m%d%Y:%H" # committing every 100 records instead of every 1 record saves about 5 # seconds. commit_count = 100 cached_rhics = {} cached_contracts = {} rds = [] for pu in ProductUsage.objects.all(): uuid = pu.consumer if uuid in cached_rhics: rhic = cached_rhics[uuid] else: try: _LOG.info('using RHIC: ' + uuid) rhic = RHIC.objects.filter(uuid=uuid)[0] cached_rhics[uuid] = rhic except IndexError: _LOG.critical('rhic not found @ import: ' + uuid) if c['continue_on_error'] == 0: raise Exception('rhic not found: ' + uuid) else: continue account = Account.objects( account_id=rhic.account_id).only('contracts').first() contract = None if rhic.contract in cached_contracts: contract = cached_contracts['rhic.contract'] else: for c in account.contracts: if c.contract_id == rhic.contract: cached_contracts['rhic.contract'] = c contract = c break # Set of used engineering ids for this checkin product_set = Set(pu.allowed_product_info) # Iterate over each product in the contract, see if it matches sla and # support level, and consumed engineering ids. If so, save an instance # of ReportData for product in contract.products: # Match on sla and support level if not (product.sla == rhic.sla and product.support_level == rhic.support_level): continue # Set of engineering ids for this product. product_eng_id_set = set(product.engineering_ids) # If the set of engineering ids for the product is a subset of the # used engineering ids for this checkin, create an instance of # ReportData, check for dupes, and save the instance. if product_eng_id_set.issubset(product_set): # This line isn't technically necessary, but it improves # performance by making the set we need to search smaller each # time. product_set.difference_update(product_eng_id_set) splice_server = SpliceServer.objects.get(id=pu.splice_server.id) rd = ReportData(instance_identifier = str(pu.instance_identifier), consumer = rhic.name, consumer_uuid = uuid, product = product.engineering_ids, product_name = product.name, date = pu.date, hour = pu.date.strftime(hr_fmt), sla = product.sla, support = product.support_level, contract_id = rhic.contract, contract_use = str(product.quantity), memtotal = int(pu.facts['memory_dot_memtotal']), cpu_sockets = int(pu.facts['lscpu_dot_cpu_socket(s)']), environment = str(splice_server.environment), splice_server = str(splice_server.hostname) ) # If there's a dupe, log it instead of saving a new record. dupe = ReportData.objects.filter( consumer_uuid=rhic.uuid, instance_identifier=str(pu.instance_identifier), hour=pu.date.strftime(hr_fmt), product= product.engineering_ids) if dupe: _LOG.info("found dupe:" + str(pu)) else: _LOG.info('recording: ' + str(product.engineering_ids)) # rd.save() rds.append(rd) if rds and len(rds) % commit_count == 0: ReportData.objects.insert(rds) rds = [] if rds: ReportData.objects.insert(rds) end = datetime.utcnow() time['end'] = end.strftime(format) results.append(time) _LOG.info('import complete') return json.dumps(time)
def authorize(event): # log.debug("********** jwt payload > "+str(event)) client_request_data = config.init(event) try: body = json.loads(event["body"]) accessToken = body['authorizationToken'] headers = event["headers"] client_id = headers["client_id"] except Exception as e: traceback.print_exc() return Builder.bad_request_error_message(e.message) payload = private.validate(accessToken, client_id, client_request_data) # client_request_data = config.init(event) # client_id = event["client_id"] # accessToken = event['authorizationToken'] ''' Validate the incoming token and produce the principal user identifier associated with the token. This can be accomplished in a number of ways: 1. Call out to the OAuth provider 2. Decode a JWT token inline 3. Lookup in a self-managed DB ''' # payload = private.validate(accessToken, client_id=client_id, client_request_data=client_request_data) log.debug("********** jwt payload > " + str(payload)) principalId = payload["sub"] ''' You can send a 401 Unauthorized response to the client by failing like so: raise Exception('Unauthorized') If the token is valid, a policy must be generated which will allow or deny access to the client. If access is denied, the client will receive a 403 Access Denied response. If access is allowed, API Gateway will proceed with the backend integration configured on the method that was called. This function must generate a policy that is associated with the recognized principal user identifier. Depending on your use case, you might store policies in a DB, or generate them on the fly. Keep in mind, the policy is cached for 5 minutes by default (TTL is configurable in the authorizer) and will apply to subsequent calls to any method/resource in the RestApi made with the same token. The example policy below denies access to all resources in the RestApi. ''' method_arn = "arn:aws:execute-api:us-east-1:187632318301:dhxwub3cn5/*/POST/login" # method_arn = event['methodArn'] tmp = method_arn.split(':') apiGatewayArnTmp = tmp[5].split('/') awsAccountId = tmp[4] policy = AuthPolicy(principalId, awsAccountId) policy.restApiId = apiGatewayArnTmp[0] policy.region = tmp[3] policy.stage = apiGatewayArnTmp[1] data = payload["scope"]["access"] for endpoint, httpMethods in data.iteritems(): for httpMethod in httpMethods: policy.allowMethod(httpMethod["http_method"], endpoint) # Finally, build the policy authResponse = policy.build() # new! -- add additional key-value pairs associated with the authenticated principal # these are made available by APIGW like so: $context.authorizer.<key> # additional context is cached context = json.dumps(payload) authResponse['context'] = context return authResponse