def _apt_purge(self, package_name): purged = 0 purge_command = [self.APT_GET_EXE, 'purge', '-y', package_name] try: result, err = self.utilcmds.run_command(purge_command) if err: raise Exception(err) found = re.search('\\d+ to remove', result) if found: amount_purged = found.group().split(' ')[0] purged = int(amount_purged) if purged > 0: logger.debug( 'Successfuly removed {0} packages.'.format(purged) ) return 'true', '' else: logger.info('No packages were removed.') return 'false', 'No packages removed.' except Exception as e: logger.error('Problem while uninstalling package: ' + package_name) logger.exception(e) return 'false', str(e)
def load_queue(file_path): if not os.path.exists(file_path): return OperationQueue() loaded = [] try: with open(file_path, 'r') as _file: loaded = cPickle.load(_file) except Exception as e: logger.error("Failed to load operations from: {0}".format(file_path)) logger.exception(e) loaded = [] logger.debug("Loaded operations: {0}".format(loaded)) q = OperationQueue() for operaion in loaded: q.put(operaion) return q
def _result_queue_loop(self): while True: self.result_queue_file_dump() queue_dump = self._result_queue.queue_dump() should_send = [result_op for result_op in queue_dump if result_op.should_be_sent()] if should_send: logger.debug("Results to be sent: {0}".format(should_send)) for result_op in should_send: # TODO: what should be done if fails to remove? self._result_queue.remove(result_op) self.process_result_operation(result_op) self._result_queue.done() else: #logger.debug( # "Results in queue: {0}".format(queue_dump) #) time.sleep(4)
def _move_pkgs(self, install_data, app_plist_data): """ Move all pkgs in src to dest. """ try: product_key = app_plist_data["productKey"] src = os.path.join(settings.UpdatesDirectory, install_data.id) dest = os.path.join("/Library/Updates", product_key) if not os.path.exists(dest): self._make_dir(dest) time.sleep(3) for _file in os.listdir(src): if _file.endswith(".pkg"): su_pkg_path = os.path.join(dest, _file) if os.path.exists(su_pkg_path): os.remove(su_pkg_path) logger.debug("Removed existing pkg from /Library/Updates: %s " % su_pkg_path) src_pkg = os.path.join(src, _file) shutil.move(src_pkg, dest) logger.debug("Moved " + _file + " to: " + dest) except Exception as e: logger.error("Failed moving pkgs to /Library/Updates.") logger.exception(e) raise
def _apt_install(self, package_name): logger.debug('Installing {0}'.format(package_name)) install_command = [self.APT_GET_EXE, 'install', '-y', package_name] #TODO: figure out if restart needed restart = 'false' # TODO: parse out the error, if any try: result, err = self.utilcmds.run_command(install_command) if err: # Catch non-error related messages if 'reading changelogs' in err.lower(): pass else: raise Exception(err) except Exception as e: logger.error('Faled to install {0}'.format(package_name)) logger.exception(e) return 'false', str(e), restart logger.debug('Done installing {0}'.format(package_name)) return 'true', '', restart
def do_get_task_total(self, tqname=None, namespace='mobile', account=1, version=1, tags=None): ''' Get total number of the specific task queue. Params: tqname: Name of task queue. namespace: Namespace of the task queue. account: Account ID. version: Version of the resource, default value 1. tags: Search conditions. Return: Total number of tasks on this taskqueue Exception message ''' assert(tqname) resource = TQRES_TEMPLATE_STR % (version, account, namespace, tqname) resource = '/'.join([resource, 'stats']) if tags: data = json.dumps({'tags' : tags}, encoding="utf-8") else: data = tags logger.debug('Get total tasks number - resource:%s, data:%s' % (resource, data)) result = self.m_wrest.do_access(resource, 'POST', data=data, headers=None) if result.code >= 200 and result.code < 300: logger.debug('Get Task Queue stats success!') try: #parse stat data tqs = json.loads(result.content) total_tasks = tqs.get('total_tasks') return total_tasks except Exception, e: logger.error('Get Task Queue stats Failed! %s' % repr(e))
def do_bulk_add(self, tqname=None, namespace='mobile', account=1, version=1, taskslist=[]): ''' Insert tasks into the specific task queue. Params: tqname: Name of task queue. namespace: Namespace of the task queue. account: Account ID. version: Version of the resource, default value 1. taskslist: task structures in list. For example: [ { "payload_base64": "TXkgbGl0dGxlIHRhc2sK", "tags": ["state=red", "watchthis", "code=41"], "priority" : "high" }, ... ] Return: Instance object of class RestResult; Exception message ''' #check data is available assert(tqname) assert(isinstance(taskslist, list)) for task in taskslist: assert(isinstance(task, dict)) resource = TQRES_TEMPLATE_STR % (version, account, namespace, tqname) resource = '/'.join([resource, 'addtasks']) data = json.dumps(taskslist, encoding="utf-8") logger.debug('Add %d tasks - resource:%s, data:%s' % (len(taskslist), resource, data)) return self.m_wrest.do_access(resource, 'POST', data=data, headers=None)
def do_lease(self, tqname=None, namespace='mobile', account=1, version=1, tasknum=0, tags=None, leasesec=10): ''' Acquire a lease on the topmost {numTask} unowned tasks in the specified queue. Params: tqname: Name of task queue. namespace: Namespace of the task queue. account: Account ID. version: Version of the resource, default value 1. tasknum: Number of the tasks that expected. tags: Search conditions. Return: Instance object of class RestResult; Exception message ''' assert(tqname) resource = TQRES_TEMPLATE_STR % (version, account, namespace, tqname) resource = '/'.join([resource, 'lease?numTasks=%s']) resource = resource % tasknum resource = '&'.join([resource, 'leaseSecs=%s']) resource = resource % leasesec if tags: data = json.dumps({'tags' : tags}, encoding="utf-8") else: data = tags logger.debug('Lease tasks - resource:%s, data:%s' % (resource, data)) return self.m_wrest.do_access(resource, 'POST', data=data, headers=None)
def do_add(self, tqname=None, namespace='mobile', account=1, version=1, payload=None, tags=None, priority='low', delay=0): ''' Insert one task into the specific task queue. Params: tqname: Name of task queue. namespace: Namespace of the task queue. account: Account ID. version: Version of the resource, default value 1. payload: Base64 encode string. tags: Tags in the task. priority: Priority of the task. delay: If delay is set, the lease request will not see the added task in [delay] seconds. Return: Instance object of class RestResult; Exception message ''' assert(tqname) resource = TQRES_TEMPLATE_STR % (version, account, namespace, tqname) resource = '/'.join([resource, 'addtask']) if delay > 0: resource += '?delay=%s' % delay data = json.dumps({'payload_base64' : payload, 'tags' : tags, 'priority' : priority}, encoding="utf-8") logger.debug('Add task - resource:%s, data:%s' % (resource, data)) return self.m_wrest.do_access(resource, 'POST', data=data, headers=None)
def do_update(self, tqname=None, namespace='mobile', account=1, version=1, taskname=0, payload=None, tags=None, priority='low', enqueuetime=''): ''' Modify seciton value of one task. Params: tqname: Name of task queue. namespace: Namespace of the task queue. account: Account ID. version: Version of the resource, default value 1. taskname: Name of the specific task. payload: Base64 encode string. tags: Tags in the task. priority: Priority of the task. enqueuetime: Last modify time. Return: Instance object of class RestResult; Exception message ''' assert(tqname) assert(payload and tags) resource = TQRES_TEMPLATE_STR % (version, account, namespace, tqname) temp = 'task-%s' % taskname resource = '/'.join([resource, temp]) data = json.dumps({'payload_base64' : payload, 'tags' : tags, 'priority' : priority}, encoding="utf-8") header = self.m_wrest.generate_default_header() header.update({'If-Unmodified-Since' : enqueuetime}) logger.debug('Update task - resource:%s, header:%s, data:%s' % (resource, header, data)) return self.m_wrest.do_access(resource, 'PUT', data=data, headers=header)
def download(version=None): """ Download App Engine SDK """ if not version: version = get_latest_version() if type(version) is str: version = tuple(version.split('.')) response = requests.get('https://storage.googleapis.com/appengine-sdks') response.raise_for_status() tree = ET.fromstring(response.text) path = None for key in tree.iter('{http://doc.s3.amazonaws.com/2006-03-01}Key'): match = re.match('^.*google_appengine_{0}.{1}.{2}\.zip$'.format(*version), key.text) if match: path = key.text break url = 'https://storage.googleapis.com/appengine-sdks/{path}'.format(**locals()) logger.debug(' * Starting SDK download for version {0}.{1}.{2}'.format(*version)) response = requests.get(url) response.raise_for_status() temp_zip = os.path.join(gettempdir(), 'google_appengine_{0}.{1}.{2}.zip'.format(*version)) writefile(temp_zip, response.content, encode=None) return temp_zip
def do_get_many(self, tqname=None, namespace='mobile', account=1, version=1, tasknum=0, tags=None): ''' Get a bulk of tasks' information from the specific task queue. Params: tqname: Name of task queue. namespace: Namespace of the task queue. account: Account ID. version: Version of the resource, default value 1. tasknum: Number of the tasks that expected. tags: Search conditions. Return: Instance object of class RestResult; Exception message ''' assert(tqname) resource = TQRES_TEMPLATE_STR % (version, account, namespace, tqname) resource = '/'.join([resource, 'search?numTasks=%s']) resource = resource % tasknum resource = '&'.join([resource, 'orderby=priority']) if tags: data = json.dumps({'tags' : tags}, encoding="utf-8") else: data = tags logger.debug('Get tasks - resource:%s, data:%s' % (resource, data)) return self.m_wrest.do_access(resource, 'POST', data=data, headers=None)
def do_insert(self, devinfo=None): ''' Insert one new device information to RS Params: devinfo: Detail information of the device. Return: Instance object of class RestResult; Exception message ''' if not self.m_udid: raise Exception('Require device UDID!') resource = DEVRES_TEMPLATE_STR % (self.m_ver, self.m_udid) if isinstance(devinfo, dict): temp = devinfo elif isinstance(devinfo, basestring): temp = json.loads(devinfo) else: raise Exception('Invalid input!') temp = {'attributes':temp} data = json.dumps(temp) wrest = HostedAccess() logger.debug('Insert new device info - resource:%s, data:%s' % (resource, data)) retval = wrest.do_access(resource, 'PUT', data=data, headers=None) del wrest return retval
def run_operation(self, operation): """Executes an operation given to it by the agent core. Returns: - Nothing """ logger.debug("agent-id: {0}, agent-version: {1}" .format(settings.AgentId, settings.AgentVersion)) if not isinstance(operation, MonitOperation): operation = MonitOperation(operation.raw_operation) if operation.type == MonitOperationValue.MonitorData: monit_data = self.get_monit_data() operation.raw_result = json.dumps(monit_data) operation.urn_response = MonitUrn.get_monit_data_urn() operation.request_method = RequestMethod.POST else: logger.warning("Unknown operation %s. Ignoring." % operation.type) self._send_results(operation, retry=False)
def do_update(self, udid=None, attributes=None): ''' Update one device information to RS Params: attributes: Attributes of the devicd information. Return: Instance object of class RestResult; Exception message ''' if udid: udid_str = str(udid) else: udid_str = self.m_udid if not udid_str: raise Exception('Require device UDID!') resource = DEVRES_TEMPLATE_STR % (self.m_ver, udid_str) if isinstance(attributes, list): attr = json.dumps(attributes) elif isinstance(attributes, basestring): attr = attributes else: raise Exception('Invalid input!') content = { 'method':'modify', 'params':attributes, } data = json.dumps(content) wrest = HostedAccess() logger.debug('Update device info - resource:%s, data:%s' % (resource, data)) retval = wrest.do_access(resource, 'POST', data=data, headers=None) del wrest return retval
def do_get_many(self, idx, max, sortby, **conditions): ''' Get devices' information from RS Params: idx: Page index number. (0 ~ ) max: Page size. sortby: Sort condition string. conditions: The conditions for device selecting. Return: One dict with devices information. Exception message ''' offset_val = idx * max + 1 after_val = max - 1 vlv_str = '{"sort":["%s"],"vlv":{"before":0,"after":%d,"offset":%d},' % (sortby, after_val, offset_val) resource = DEVSEARCH_TEMPLATE_STR % self.m_ver method = 'POST' post_data = ''.join([vlv_str, '"base":"ou=devices", "filter":"(&(objectClass=mobileDevice)']) if conditions: for k,v in conditions.iteritems(): cond_str = '(%s=%s)' % (k, str(v)) post_data = ''.join([post_data, cond_str]) post_data = ''.join([post_data, ')"}']) logger.debug('Get %d devices info - resource:%s, data:%s' % (max, resource, repr(post_data))) wrest = HostedAccess() devinfo = wrest.do_access(resource, method, data=post_data, headers=None) del wrest return devinfo
def do_bulk_op(self, content): ''' Bulk add or/and update device information into RS Params: content: data in a specific format, generated by format_bulk_data Return: instance object of class RestResult; exception message ''' if isinstance(content, list): data = json.dumps(content) elif isinstance(attributes, basestring): data = content else: raise Exception('Invalid content!') resource = BULK_OP_URL % self.m_ver wrest = HostedAccess() logger.debug('bulk operation info - resource:%s, data:%s' % (resource, data)) retval = wrest.do_access(resource, 'POST', data=data, headers=None) del wrest return retval
def resolve(address, uuid, adapter=None, bus=dbus.SystemBus()): try: adapter.CreateDevice(address) logger.debug("Device created: %s" % address) except: logger.debug("Device all ready known: %s" % address) path = adapter.FindDevice(address) device = dbus.Interface(bus.get_object("org.bluez", path), "org.bluez.Device") properties = device.GetProperties() if uuid in properties['UUIDs']: services = device.DiscoverServices(uuid); for key in services.keys(): root=etree.XML(str(services[key])) res=CHANNEL_XPATH(root) if len(res)>0: return int(res[0], 16) # lets check if the service is there now #adapter.RemoveDevice(path) raise Exception("UUID not found")
def do_get(self, **conditions): ''' Get one device information from RS Params: conditions: The conditions for device selecting. Return: One dict with device information. Exception message ''' if self.m_udid: # search by udid resource = DEVRES_TEMPLATE_STR % (self.m_ver, self.m_udid) method = 'GET' post_data = None else: # without udid if conditions: resource = DEVSEARCH_TEMPLATE_STR % self.m_ver method = 'POST' post_data = '{"base":"ou=devices", "filter":"(&(objectClass=mobileDevice)' for cond in conditions: temp_str = '(%s=%s)' % (str(cond), str(conditions[cond])) post_data = post_data + temp_str post_data = ''.join([post_data,')"}']) else: raise Exception('Require device UDID or conditions for searching!') logger.debug('Get one device info - resource:%s' % (resource)) wrest = HostedAccess() devinfo = wrest.do_access(resource, method, data=post_data, headers=None) del wrest return devinfo
def get(self): """ Attempts to get an operation from the queue if no operation is pending. Returns: The operation if it was successfully retrieved, None otherwise. """ operation = None if (not self.op_in_progress) and (not self.paused): try: operation = self.queue.get_nowait() self.op_in_progress = True try: logger.debug( "Popping {0} from OpQueue.".format(operation.id) ) except Exception: logger.debug("Popping {0} from OpQueue.".format(operation)) except Queue.Empty as e: # logger.debug("Operations queue is empty.") operation = None except Exception as e: logger.error("Error accessing operation queue.") logger.error("Message: %s" % e) operation = None return operation
def add_update_data(self, name, needs_restart=False): """ Adds a new install item to the database. @param name: The vendor id this item refers to. @param needs_restart: True if restart required. False otherwise. @return: Nothing """ if self.update_exist(name): logger.debug("Update %s already exist. Ignoring." % name) return _connection = self._connection with _connection: cursor = _connection.cursor() values = (name, needs_restart) cursor.execute("INSERT INTO %s (%s) " "VALUES (?, ?)" % (self._table, UpdateDataColumn.AllColumns), values)
def move_task_to_errorqueue(self, error_msg): hostedTQ = HostedTaskQueue() if type(self.m_tags)!=list: logger.error('task tags is not a list:[%s]' % self.m_tags) raise Exception('tags type error in move_task_to_errorqueue') if TASK_STATUS_DICT['normal'] in self.m_tags: self.m_tags.remove(TASK_STATUS_DICT['normal']) if TASK_STATUS_DICT['retry'] in self.m_tags: self.m_tags.remove(TASK_STATUS_DICT['retry']) self.m_tags.append(TASK_STATUS_DICT['error']) tag_error = 'error_msg_base64=%s' % base64.encodestring(error_msg).replace('\n', '') self.m_tags.append(tag_error) try: hostedTQ.do_add(tqname=self.m_error_qname, namespace='mobile', account=int(self.m_account), version=1, payload=self.m_payload_base64, tags=self.m_tags, priority = self.m_priority) except MDMiHttpError, e: logger.debug('add error task failed:%s, will try to create error queue' % repr(e)) error_qname_list = [ {'name' : 'enrollmentError', 'settings' : {"description" : "Enrollment Decision Error task queue", "max_leases" : "30", "max_age" : "0"}} ] tqo = TaskQueueUtils(error_qname_list) tqo.init_task_queue() del tqo hostedTQ.do_add(tqname=self.m_error_qname, namespace='mobile', account=int(self.m_account), version=1, payload=self.m_payload_base64, tags=self.m_tags, priority = self.m_priority)
def list_sdk_versions(args): logger.debug(' * Retreiving list of App Engine SDK versions') versions = sdk.get_versions() for version in sdk.get_versions(): print '{0}.{1}.{2}'.format(*version) sys.exit(0)
def install_update(self, install_data, update_dir=None): logger.debug('Received install_update call.') old_install_list = self.get_installed_applications() success = 'false' error = '' restart = 'false' app_encoding = CreateApplication.null_application().to_dict() apps_to_delete = [] apps_to_add = [] success, error, restart = self._yum_update(install_data.name) if success == 'true': new_install_list = self.get_installed_applications() app = self._get_installed_app(install_data.name, new_install_list) app_encoding = app.to_dict() apps_to_add, apps_to_delete = self._get_apps_to_add_and_delete( old_install_list, new_install_list ) return InstallResult( success, error, restart, app_encoding, apps_to_delete, apps_to_add )
def setup_table(self, num): table = 'test_%s' % num self.cursor.execute('SHOW TABLES LIKE %s', [table]) if self.cursor.fetchone(): # Table already exists. return table logger.debug('Creating table "%s"' % table) _dir = dirname(__file__) or '.' with open(_dir + '/mysql_mid_table.sql', 'rb') as f: sql = f.read() % num self.cursor.execute(sql.strip(' ;\r\n')) delta = timedelta(seconds=self.CREATED_INTERVAL) start_dt = datetime.now() - delta * num for chunk in xrange(int(ceil(float(num) / self.CHUNK_SIZE))): values = list(chain(*[[ MID_RECORD['name'] % (i + chunk * self.CHUNK_SIZE), MID_RECORD['data'], start_dt + delta * i, ] for i in xrange(self.CHUNK_SIZE)])) sql = ( 'INSERT INTO `%s` (`name`, `data`, `created`) VALUES ' % table ) + ('(%s, %s, %s), ' * self.CHUNK_SIZE)[:-2] self.cursor.execute('BEGIN') self.cursor.execute(sql, values) self.cursor.execute('COMMIT') # logger.debug( # 'Inserted chunk #%s with %s middle sized records', # chunk + 1, self.CHUNK_SIZE # ) logger.debug('Created with %s records' % num) return table
def _get(self, method, args): """ private helper to perform an api call """ url = self.API_BASE % (self.api_key, method, "/".join(args)) logger.debug("fetching url '%s'" % url) return self._get_url(url)
def save_package(package): with _connection: cursor = _connection.cursor() values = (package.name, package.version_release, package.description, package.support_url, package.severity, package.toppatch_id, package.vendor_id, package.date_installed, package.date_published, package.is_update, package.package_url) logger.debug("Adding TPID# %s" % package.toppatch_id) cursor.execute("INSERT INTO %s (name, version_release, description," "support_url, severity, toppatch_id, vendor_id," "date_installed, date_published, is_update, package_url)" " VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)" % _packages_table, values)
def run(self): # read keys from files at first if self.m_enable_init: logger.info('Creator %d running, doing init!' % os.getpid()) keys_list = self._init_keys_list() for item in keys_list: if not self.m_keylist_queue.full(): self._put(item) else: # waiting for init finished logger.debug('Creator %d running, waiting init done!' % os.getpid()) while not self.m_ready_evt.is_set(): if self._idle(1): return logger.info('Creator %d init done!' % os.getpid()) self._generate_keys_handle() # clean multiprocess queue self.m_keylist_queue.close() while not self.m_keylist_queue.empty(): self.m_keylist_queue.get(1) logger.info('key pool creator(%d) exits' % os.getpid()) return pass
def put_data(self, data, block=True, timeout=0): task_data = {} data = json.loads(data) if data['sync_type'] == 'metanate': key = ':'.join(['metanate', str(data['account']), data['trans_type'], data['sync_source']]) task_data['key'] = key task_data['value'] = data elif data['sync_type'] == 'hybrid': key = ':'.join([data['sync_type'], str(data['account'])]) task_data['key'] = key task_data['value'] = data while not self.__stop_event.is_set(): try: self.__queue.put(task_data, block, timeout) logger.debug('put task data (%s, %s) successfully' % (key, data)) break except Full: logger.debug('task cache size reachs max size') self.full_event.clear() self.full_event.wait() except Exception, e: logger.error('put task data into cache error %s' % repr(e)) break
def signal_handler(signum, frame): logger.debug('Catched interrupt signal: %d in main process', signum) if signum == signal.SIGHUP: reload_config() g_service_status.reload_status() else: stop_children()
def restartPollers(self): log.debug("%s: restarting pollers", self.monitorId) for poller in self.pollerSet: if poller.init is not None: poller.init(poller)
def _signal_handler(self, signum, frame): logger.debug('Catched interrupt signal in socket process: %d', signum) self.is_teminate = True
def signal_handler(signal, frame): logger.debug('Terminating PhantomJS before exit') self.driver.quit() sys.exit(0)
def get_article_obj_from_url(url): logger.debug('Fetching content from url: [{}]'.format(url)) article_page_content = get_content(url) logger.debug('Fetched article page') article_div = bs( article_page_content, 'html.parser').body.find('main').div.div.div.div.div.div.article logger.debug('Parsed article tag') # For debugging # write_html('tmp.html', str(article_div)) title = article_div.find('h1').string.strip() logger.debug('Fetched title: [{}]'.format(title)) author = article_div.find('div', { 'class': 'post-meta__author-name' }).string.strip() logger.debug('Fetched author: [{}]'.format(author)) date_tag = article_div.find('div', { 'class': 'post-meta__publish-date' }).time date = date_tag['datetime'] diff = date_tag.string.strip() logger.debug('Fetched date: [{}] and diff: [{}]'.format(date, diff)) stats_div = article_div.find( 'div', {'class': 'post-actions post__block post__block_post-actions'}) views = -1 shares = -1 for div in stats_div.find_all( 'div', {'class': 'post-actions__item post-actions__item_stat'}): name = div.find('span', { 'class': 'post-actions__item-title' }).string.strip().lower() number = int( div.find('span', { 'class': 'post-actions__item-count' }).string.strip()) if name.find('views') > -1: views = number else: shares = number logger.debug('Fetched views: [{}] and shares [{}]'.format( views, shares)) tags_div = article_div.find('ul', { 'class': 'tags-list__list' }).find_all('li') tags = [tag.a.string.strip() for tag in tags_div] logger.debug('Fetched tags: [{}]'.format(tags)) return Article(url, title, author, views, shares, date, diff, tags)
def from_html(cls, html_filepath): logger.debug( 'Collecting data from HTML file: [{}]'.format(html_filepath)) return cls(ArticleParser.get_urls_from_html(html_filepath))
def from_csv(cls, csv_filepath): logger.debug( 'Collecting data from CSV file: [{}]'.format(csv_filepath)) return cls(ArticleParser.get_urls_from_csv(csv_filepath))
def validate_urls(self): logger.debug('Validating URLs') self.urls = list(filter(is_url_valid, self.urls)) logger.debug('URL validation complete')
def add_article(self, article, add_to_file=True): self.articles.append(article) if add_to_file: self.writer.writerow(article.__dict__) self.csv_file.flush() logger.debug('Article added')
def run(self, output_fn, **kwargs): logger.debug( "---------------------------------------------------------------------------------" ) logger.debug( "------------------------Welcome to Monitor Daemon.-------------------------------" ) logger.debug( "------Copyright (2019, ) Institute of Software, Chinese Academy of Sciences------" ) logger.debug( "---------author: [email protected],[email protected]" ) logger.debug( "--------------------------------wuheng@otcaix.iscas.ac.cn------------------------" ) logger.debug( "---------------------------------------------------------------------------------" ) start_http_server(19998) # registry = CollectorRegistry(auto_describe=False) config.load_kube_config(config_file=TOKEN) zone = get_field_in_kubernetes_node(HOSTNAME, ['metadata', 'labels', 'zone']) while True: # init(registry) collect_vm_metrics(zone) collect_storage_metrics(zone) time.sleep(10)
def main(args): args.batch_size = None logger.debug('CONFIGURATION: {}'.format(args)) """ Set up the graph, the agents, and run the agents in parallel. """ if args.env == 'GYM': from environments import atari_environment num_actions, action_space, _ = atari_environment.get_actions(args.game) input_shape = atari_environment.get_input_shape(args.game) else: num_actions = get_num_actions(args.rom_path, args.game) args.action_space = action_space args.summ_base_dir = '/tmp/summary_logs/{}/{}'.format( args.game, time.strftime('%m.%d/%H.%M')) logger.info('logging summaries to {}'.format(args.summ_base_dir)) Learner, Network = ALGORITHMS[args.alg_type] network = Network({ 'name': 'shared_vars_network', 'input_shape': input_shape, 'num_act': num_actions, 'args': args }) args.network = Network #initialize shared variables args.learning_vars = SharedVars(network.params) args.opt_state = SharedVars( network.params, opt_type=args.opt_type, lr=args.initial_lr) if args.opt_mode == 'shared' else None args.batch_opt_state = SharedVars( network.params, opt_type=args.opt_type, lr=args.initial_lr) if args.opt_mode == 'shared' else None #TODO: need to refactor so TRPO+GAE doesn't need special treatment if args.alg_type == 'trpo': baseline_network = PolicyValueNetwork( { 'name': 'shared_value_network', 'input_shape': input_shape, 'num_act': num_actions, 'args': args }, use_policy_head=False) args.baseline_vars = SharedVars(baseline_network.params) if args.alg_type in ['q', 'sarsa', 'dueling', 'dqn-cts']: args.target_vars = SharedVars(network.params) args.target_update_flags = SharedFlags(args.num_actor_learners) args.barrier = Barrier(args.num_actor_learners) args.global_step = SharedCounter(0) args.num_actions = num_actions cuda_visible_devices = os.getenv('CUDA_VISIBLE_DEVICES') num_gpus = 0 if cuda_visible_devices: num_gpus = len(cuda_visible_devices.split()) #spin up processes and block if (args.visualize == 2): args.visualize = 0 actor_learners = [] task_queue = Queue() experience_queue = Queue() for i in xrange(args.num_actor_learners): if (args.visualize == 2) and (i == args.num_actor_learners - 1): args.args.visualize = 1 args.actor_id = i args.device = '/gpu:{}'.format(i % num_gpus) if num_gpus else '/cpu:0' rng = np.random.RandomState(int(time.time())) args.random_seed = rng.randint(1000) #only used by TRPO args.task_queue = task_queue args.experience_queue = experience_queue args.input_shape = input_shape actor_learners.append(Learner(args)) actor_learners[-1].start() try: for t in actor_learners: t.join() except KeyboardInterrupt: #Terminate with extreme prejudice for t in actor_learners: t.terminate() logger.info('All training threads finished!')
def gain_honor(self, value): assert value >= 0 self.honor += value logger.debug("Gain honor[value=%d]" % value)
def delete_profile(self, *args): logger.debug(f'[UI] "{self.name}" Edit-Dialogue: Delete-Profile-Clicked') u.Profile(username=self.username).delete() self.dialogue.dismiss(force=True) self.screen.refresh_visible_profiles()
metadata_products = pd.read_csv(os.path.join(DIR, 'products.csv')) with open(os.path.join(DIR, 'model.pkl'), 'rb') as f: model = joblib.load(f) time_bechmark = [] scores = [] if __name__ == '__main__': for i in range(len(sample_queries)): query = json.loads(sample_queries.at[i, 'query']) actual = json.loads(sample_queries.at[i, 'next_trans'])['product_ids'] if len(query['transaction_history']) == 0: continue start_time = time.time() products_hist_counter, histdata_products = extract_data_from_json_dict(query) recs = model.recommend(products_hist_counter, metadata_products, histdata_products) # recs = model.recommend(products_hist_counter) finish_time = time.time() score = normalized_average_precision(actual, recs) scores.append(score) # logger.debug(f'Query: {i}, processed in {(finish_time - start_time):.5f} seconds') logger.debug(f'Query {i}, NAP@30: {score}, Num transactions: {len(query["transaction_history"])}') time_bechmark.append(finish_time - start_time) logger.debug(f'Ran {len(sample_queries)} queries, Average elapsed time: {np.mean(time_bechmark)} ± {np.std(time_bechmark)}') logger.debug(f'MNAP@30: {np.mean(scores)}')
def dumps(self, user_info, pac_url, exceptlst, pkey, vpn_bypass_except, sn=None): """ Dumps vpn profile to plist string. Parameter Notes: user_info(string): must contains Username and DeviceID keyword, like "Username:[email protected];DeviceID:someone's device" pack_url(string): a valid http or https url """ p12, cn, expire_date = issuer_certificate(self.issuer_ca, self.issuer_key, user_info, config['passout'], pkey) if not p12 or not cn: return None, None data = dumps_p12_to_string(p12) if not data: return None, None self.activesync_ca_data = base64.encodestring(data) self.add_node(self.user_ca_payload, 'PayloadCertificateFileName', cn, 'string') self.add_node(self.user_ca_payload, 'PayloadDisplayName', cn, 'string') self.add_node(self.user_ca_payload, 'PayloadContent', self.activesync_ca_data, 'data') node_proxy_val = self.get_next_node(self.node_vpn, 'Proxies') node_proxy_url = self.get_next_node(node_proxy_val, 'ProxyAutoConfigURLString') node_proxy_url.text = self.update_pacfile_type(pac_url) if len(exceptlst) > 0: domain_match_node = self.get_next_node( self.node_ipsec_v, 'OnDemandMatchDomainsNever') d_node_val = self.get_domainaction_node() for vpnexcept in exceptlst: try: assert vpnexcept.has_key( 'attributes' ), 'vpn exception dict do not has key: attributes' node_except_string = ElementTree.SubElement( domain_match_node, 'string') node_except_string1 = ElementTree.SubElement( d_node_val, 'string') if vpnexcept['attributes'].has_key('domainName'): node_except_string.text = vpnexcept["attributes"][ "domainName"] node_except_string1.text = vpnexcept["attributes"][ "domainName"] elif vpnexcept['attributes'].has_key('ipaddress'): node_except_string.text = vpnexcept["attributes"][ "ipaddress"] node_except_string1.text = vpnexcept["attributes"][ "ipaddress"] elif vpnexcept['attributes'].has_key('subnet'): node_except_string.text = vpnexcept["attributes"][ "subnet"] node_except_string1.text = vpnexcept["attributes"][ "subnet"] else: logger.debug( 'no domainName, ipaddress and subnet in vpn exception dict: %s' % vpnexcept) except Exception, ve: logger.error( "add vpn exception value to vpn profile error %s" % repr(ve))
def train(self): """ Main actor learner loop for n-step Q learning. """ logger.debug("Actor {} resuming at Step {}, {}".format( self.actor_id, self.global_step.value(), time.ctime())) s = self.emulator.get_initial_state() s_batch = list() a_batch = list() y_batch = list() bonuses = deque(maxlen=1000) episode_over = False t0 = time.time() global_steps_at_last_record = self.global_step.value() while (self.global_step.value() < self.max_global_steps): # # Sync local learning net with shared mem # self.sync_net_with_shared_memory(self.local_network, self.learning_vars) # self.save_vars() rewards = list() states = list() actions = list() max_q_values = list() local_step_start = self.local_step total_episode_reward = 0 total_augmented_reward = 0 episode_ave_max_q = 0 ep_t = 0 while not episode_over: # Sync local learning net with shared mem self.sync_net_with_shared_memory(self.local_network, self.learning_vars) self.save_vars() # Choose next action and execute it a, q_values = self.choose_next_action(s) new_s, reward, episode_over = self.emulator.next(a) total_episode_reward += reward max_q = np.max(q_values) current_frame = new_s[..., -1] bonus = self.density_model.update(current_frame) bonuses.append(bonus) # Rescale or clip immediate reward reward = self.rescale_reward( self.rescale_reward(reward) + bonus) total_augmented_reward += reward ep_t += 1 rewards.append(reward) states.append(s) actions.append(a) max_q_values.append(max_q) s = new_s self.local_step += 1 episode_ave_max_q += max_q global_step, _ = self.global_step.increment() if global_step % self.q_target_update_steps == 0: self.update_target() if global_step % self.density_model_update_steps == 0: self.write_density_model() # Sync local tensorflow target network params with shared target network params if self.target_update_flags.updated[self.actor_id] == 1: self.sync_net_with_shared_memory(self.target_network, self.target_vars) self.target_update_flags.updated[self.actor_id] = 0 if self.density_model_update_flags.updated[self.actor_id] == 1: self.read_density_model() self.density_model_update_flags.updated[self.actor_id] = 0 if self.local_step % self.q_update_interval == 0: self.batch_update() if self.is_master() and (self.local_step % 500 == 0): bonus_array = np.array(bonuses) steps = global_step - global_steps_at_last_record global_steps_at_last_record = global_step logger.debug( 'Mean Bonus={:.4f} / Max Bonus={:.4f} / STEPS/s={}'. format(bonus_array.mean(), bonus_array.max(), steps / float(time.time() - t0))) t0 = time.time() else: #compute monte carlo return mc_returns = np.zeros((len(rewards), ), dtype=np.float32) running_total = 0.0 for i, r in enumerate(reversed(rewards)): running_total = r + self.gamma * running_total mc_returns[len(rewards) - i - 1] = running_total mixed_returns = self.cts_eta * np.asarray(rewards) + ( 1 - self.cts_eta) * mc_returns #update replay memory states.append(new_s) episode_length = len(rewards) for i in range(episode_length): self.replay_memory.append(states[i], actions[i], mixed_returns[i], i + 1 == episode_length) s, total_episode_reward, _, ep_t, episode_ave_max_q, episode_over = \ self.prepare_state(s, total_episode_reward, self.local_step, ep_t, episode_ave_max_q, episode_over, bonuses, total_augmented_reward)
def _call(self, txn, commands): for sql in commands: logger.debug("SQL execute[sql=%s]" % sql) txn.execute(sql) return txn.fetchall()
def update_food_capacity(self, capacity): """更新粮食储量 """ logger.debug("update food capacity[capacity=%f]" % capacity) assert capacity >= 0 self.food_capacity = capacity
def train(self): """ Main actor learner loop for advantage actor critic learning. """ logger.debug("Actor {} resuming at Step {}".format( self.actor_id, self.global_step.value())) s = self.emulator.get_initial_state() steps_at_last_reward = self.local_step total_episode_reward = 0.0 mean_entropy = 0.0 q_update_counter = 0 episode_start_step = 0 while (self.global_step.value() < self.max_global_steps): # Sync local learning net with shared mem self.sync_net_with_shared_memory(self.local_network, self.learning_vars) self.save_vars() params = self.session.run(self.local_network.flat_vars) self.assign_vars(self.batch_network, params) local_step_start = self.local_step reset_game = False episode_over = False states = list() rewards = list() actions = list() values = list() s_batch = list() a_batch = list() y_batch = list() adv_batch = list() while not (episode_over or (self.local_step - local_step_start == self.max_local_steps)): # Choose next action and execute it a, readout_v_t, readout_pi_t = self.choose_next_action(s) if self.is_master() and (self.local_step % 100 == 0): logger.debug("pi={}, V={}".format(readout_pi_t, readout_v_t)) new_s, reward, episode_over = self.emulator.next(a) if reward != 0.0: steps_at_last_reward = self.local_step total_episode_reward += reward # Rescale or clip immediate reward reward = self.rescale_reward(reward) self.replay_memory.append(s, a, reward, episode_over) rewards.append(reward) states.append(s) actions.append(a) values.append(readout_v_t) s = new_s self.local_step += 1 self.global_step.increment() # Calculate the value offered by critic in the new state. if episode_over: R = 0 else: R = self.session.run( self.local_network.output_layer_v, feed_dict={self.local_network.input_ph: [new_s]})[0][0] sel_actions = [] for i in reversed(xrange(len(states))): R = rewards[i] + self.gamma * R y_batch.append(R) a_batch.append(actions[i]) s_batch.append(states[i]) adv_batch.append(R - values[i]) sel_actions.append(np.argmax(actions[i])) # Compute gradients on the local policy/V network and apply them to shared memory feed_dict = { self.local_network.input_ph: s_batch, self.local_network.critic_target_ph: y_batch, self.local_network.selected_action_ph: a_batch, self.local_network.adv_actor_ph: adv_batch, } grads, entropy = self.session.run( [self.local_network.get_gradients, self.local_network.entropy], feed_dict=feed_dict) self.apply_gradients_to_shared_memory_vars(grads) # q_grads = self.batch_q_update() # if q_grads is not None: # grads = [p + q for p, q in zip(policy_grads, q_grads)] # else: # grads = policy_grads q_update_counter += 1 if q_update_counter % self.q_update_interval == 0: q_grads = self.batch_q_update() if q_grads is not None: # grads = [p + q for p, q in zip(grads, q_grads)] self.apply_gradients_to_shared_memory_vars(q_grads) delta_old = local_step_start - episode_start_step delta_new = self.local_step - local_step_start mean_entropy = (mean_entropy * delta_old + entropy * delta_new) / (delta_old + delta_new) s, mean_entropy, mean_value, episode_start_step, total_episode_reward, steps_at_last_reward = self.prepare_state( s, mean_entropy, np.array(values).mean(), episode_start_step, total_episode_reward, steps_at_last_reward, sel_actions, episode_over)
def update_food_output(self, output): """更新粮食产量 """ logger.debug("update food output[output=%f]" % output) assert output >= 0 self.food_output = output
def _train(self): """ Main actor learner loop for advantage actor critic learning. """ logger.debug("Actor {} resuming at Step {}".format( self.actor_id, self.global_step.value())) bonuses = deque(maxlen=100) while (self.global_step.value() < self.max_global_steps): # Sync local learning net with shared mem s = self.emulator.get_initial_state() self.reset_hidden_state() self.local_episode += 1 episode_over = False total_episode_reward = 0.0 episode_start_step = self.local_step while not episode_over: self.sync_net_with_shared_memory(self.local_network, self.learning_vars) self.save_vars() rewards = list() states = list() actions = list() values = list() local_step_start = self.local_step self.set_local_lstm_state() while self.local_step - local_step_start < self.max_local_steps and not episode_over: # Choose next action and execute it a, readout_v_t, readout_pi_t = self.choose_next_action(s) new_s, reward, episode_over = self.emulator.next(a) total_episode_reward += reward # Update density model current_frame = new_s[..., -1] bonus = self.density_model.update(current_frame) bonuses.append(bonus) if self.is_master() and (self.local_step % 400 == 0): bonus_array = np.array(bonuses) logger.debug( 'π_a={:.4f} / V={:.4f} / Mean Bonus={:.4f} / Max Bonus={:.4f}' .format(readout_pi_t[a.argmax()], readout_v_t, bonus_array.mean(), bonus_array.max())) # Rescale or clip immediate reward reward = self.rescale_reward( self.rescale_reward(reward) + bonus) rewards.append(reward) states.append(s) actions.append(a) values.append(readout_v_t) s = new_s self.local_step += 1 global_step, _ = self.global_step.increment() if global_step % self.density_model_update_steps == 0: self.write_density_model() if self.density_model_update_flags.updated[ self.actor_id] == 1: self.read_density_model() self.density_model_update_flags.updated[ self.actor_id] = 0 next_val = self.bootstrap_value(new_s, episode_over) advantages = self.compute_gae(rewards, values, next_val) targets = self.compute_targets(rewards, next_val) # Compute gradients on the local policy/V network and apply them to shared memory entropy = self.apply_update(states, actions, targets, advantages) elapsed_time = time.time() - self.start_time steps_per_sec = self.global_step.value() / elapsed_time perf = "{:.0f}".format(steps_per_sec) logger.info( "T{} / EPISODE {} / STEP {}k / REWARD {} / {} STEPS/s".format( self.actor_id, self.local_episode, self.global_step.value() / 1000, total_episode_reward, perf)) self.log_summary(total_episode_reward, np.array(values).mean(), entropy)
def signal_handler(signum, frame): global is_signal_up logger.debug('Catched a stop signal!') is_signal_up = True pass
def update_money_capacity(self, capacity): """更新金钱储量 """ logger.debug("update money capacity[capacity=%f]" % capacity) assert capacity >= 0 self.money_capacity = capacity
def choose_rival(self, self_battle_score, rivals_user_id, rivals_win_score, rivals_user_id_origin, rivals_battle_score): """选择对手 Args: rivals_win_score: 三个对手分别打赢获得的积分 """ if len(rivals_win_score) != MAX_RIVALS_NUM: return False if self.continuous_win_num <= CONTINUS_WIN_NUM: #从第一个对手到第二个对手依次比较我方战力和敌方战力,如果我方战力超过敌方战力15%(可调),则直接匹配 for i in range(len(rivals_user_id) - 1): if self.is_arena_rival_pve(rivals_user_id[i]): logger.debug("rival is pve[rival_id=%d]" % rivals_user_id[i]) continue diff_ratio = 1.0 * (self_battle_score - rivals_battle_score[i] ) / rivals_battle_score[i] logger.debug( "choose_rival[self_battle_score=%d][diff_ratio=%f][rival_battle_score=%d]]" % (self_battle_score, diff_ratio, rivals_battle_score[i])) if diff_ratio > DIFF_BATTLE_SCORE_RATIO: #选定此对手 for j in range(len(rivals_user_id_origin)): if rivals_user_id_origin[j] == rivals_user_id[i]: self.choose_rival_index = j return True win_max = max(data_loader.ArenaChoosePlayer_dict.keys()) win_min = min(data_loader.ArenaChoosePlayer_dict.keys()) if self.continuous_win_num >= 0: num = min(self.continuous_win_num, win_max) else: num = max(self.continuous_win_num, win_min) ratio = [] ratio.append( float(data_loader.ArenaChoosePlayer_dict[num].firstProbability) / 100) ratio.append( float(data_loader.ArenaChoosePlayer_dict[num].secondProbability) / 100 + ratio[0]) ratio.append( float(data_loader.ArenaChoosePlayer_dict[num].threeProbability) / 100 + ratio[1]) #roll choose_index = 0 random.seed() c = random.random() for index, r in enumerate(ratio): if c < r: choose_index = index break for i in range(len(rivals_user_id_origin)): if rivals_user_id_origin[i] == rivals_user_id[choose_index]: self.choose_rival_index = i break logger.debug("roll rival[roll=%f][ratio1=%f][ratio2=%f][ratio3=%f]" % (c, ratio[0], ratio[1], ratio[2])) return True
def update_money_output(self, output): """更新金钱产量 """ logger.debug("update money output[output=%f]" % output) assert output >= 0 self.money_output = output
if __name__ == "__main__": import argparse parser = argparse.ArgumentParser() parser.add_argument('--distmap', default=False, action="store_true") parser.add_argument('--final', default=False, action="store_true") parser.add_argument('--depth', default=False, action="store_true") parser.add_argument('--iou', default=False, action="store_true") parser.add_argument('--debug', default=True, action="store_true") parser.add_argument( '--input', default="results/cityscapes/test/cityscapes-munster_000061_000019") args = parser.parse_args() logger.debug(args) lwmap_range = (0, 10) if args.debug: enable_debug() if args.distmap: create_folder("results/distmap/") gt_path = f'{args.input}-gt_affordances.png' vis_path = f'{args.input}-orig-rgb_affordances.png' depth_path = 'datasets/freiburg-forest/freiburg_forest_multispectral_annotated/freiburg_forest_annotated/test/depth_gray/b1-09517_Clipped_redict_depth_gray.png' pred_path = gt_path image_orig = imread(gt_path) image_orig = cv2.resize(image_orig, dsize=(480, 240)) if args.depth:
def check_hero(data, message, with_level=False, with_star=False, with_soldier=False, with_equipment_type=0, with_skill_index=-1, with_evolution=False, with_equipment_stone=False, with_herostar=False, with_awaken=False): """ 验证英雄信息 Args: data[UserData] message[protobuf] with_xxx[bool] 是否需要验证某些字段,包括:等级、星级、兵种、装备、技能 """ hero_id = HeroInfo.generate_id(data.id, message.basic_id) hero = data.hero_list.get(hero_id, True) assert hero is not None if with_level: logger.debug("check hero[level=%d][exp=%d]" % (hero.level, hero.exp)) assert hero.level == message.level #客户端bug导致此处经常失败,先放过 #assert hero.exp == message.exp if hero.exp != message.exp: logger.warning( "check hero error[basic_id=%d][exp=%d][req_exp=%d]" % (message.basic_id, hero.exp, message.exp)) if with_star: assert hero.star == message.star_level if with_soldier: assert hero.soldier_basic_id == message.soldier_basic_id assert hero.soldier_level == message.soldier_level if with_equipment_type != 0 and with_equipment_stone != True: if with_equipment_type == HeroInfo.EQUIPMENT_TYPE_WEAPON: assert hero.get_equipment( with_equipment_type) == message.equipment_weapon_id elif with_equipment_type == HeroInfo.EQUIPMENT_TYPE_ARMOR: assert hero.get_equipment( with_equipment_type) == message.equipment_armor_id else: assert hero.get_equipment( with_equipment_type) == message.equipment_treasure_id if with_skill_index != -1: if with_skill_index == 0: assert hero.get_skill(with_skill_index) == message.first_skill_id elif with_skill_index == 1: assert hero.get_skill(with_skill_index) == message.second_skill_id elif with_skill_index == 2: assert hero.get_skill(with_skill_index) == message.third_skill_id else: assert hero.get_skill(with_skill_index) == message.fourth_skill_id if with_evolution: assert hero.evolution_level == message.evolution_level if with_equipment_stone == True and with_equipment_type != 0: if with_equipment_type == HeroInfo.EQUIPMENT_TYPE_WEAPON: assert cmp(hero.get_equipment_stones(with_equipment_type), message.stone_weapon) == 0 elif with_equipment_type == HeroInfo.EQUIPMENT_TYPE_ARMOR: assert cmp(hero.get_equipment_stones(with_equipment_type), message.stone_armor) == 0 else: assert cmp(hero.get_equipment_stones(with_equipment_type), message.stone_treasure) == 0 if with_herostar: herostars = hero.get_herostars() assert cmp(hero.get_herostars(), message.hero_star) == 0 if with_awaken: assert hero.is_awaken == message.hero_awakening
class VPNProfile(object): objs = {} objs_locker = threading.Lock() def __new__(cls, *args, **kwargs): if cls in cls.objs: return cls.objs[cls]['obj'] cls.objs_locker.acquire() # check again if cls in cls.objs: return cls.objs[cls]['obj'] obj = object.__new__(cls) cls.objs[cls] = {'obj': obj, 'init': False} setattr(cls, '__init__', cls.private_vpn_init(cls.__init__)) cls.objs_locker.release() return cls.objs[cls]['obj'] @classmethod def private_vpn_init(cls, fn): def init_wrap(*args): if not cls.objs[cls]['init']: fn(*args) cls.objs[cls]['init'] = True return return init_wrap def get_child_node(self, node, child_node_name): node_childs = node.getchildren() for child in node_childs: if child.text == child_node_name: return child return None def add_node(self, node_dict, node_name, node_value, value_type): node_pl_key = ElementTree.SubElement(node_dict, 'key') node_pl_key.text = node_name node_pl_string = ElementTree.SubElement(node_dict, value_type) node_pl_string.text = node_value return node_dict def add_bool_node(self, node_dict, node_name, node_value=False): node_pl_key = ElementTree.SubElement(node_dict, 'key') node_pl_key.text = node_name if node_value is True: ElementTree.SubElement(node_dict, 'true') else: ElementTree.SubElement(node_dict, 'false') def read_p12_data(self, p12): if p12: return file(p12, 'rb').read() else: return None def generate_payload(self): node_dict = ElementTree.Element('dict') self.add_node(node_dict, 'PayloadVersion', '1', 'integer') self.add_node(node_dict, 'PayloadDescription', 'Wensense Mobile Security Profile', 'string') self.add_node(node_dict, 'PayloadOrganization', 'Websense,Inc.', 'string') self.add_node(node_dict, 'PayloadUUID', str(uuid4()).upper(), 'string') return node_dict def get_next_node(self, node, child_node_name): if node: node_childs = node.getchildren() next_node = False for child in node_childs: if next_node: return child elif child.text == child_node_name: next_node = True return None def __init__(self, account=None): logger.debug("Begin to init vpn profile") self.init_done = 0 if not config: return logger.debug("profile template in config file: %s, account: %s", config['template'], account) # templete is not right #for customed vpn profile self.profile = None self.activesync_ca_data = None if account: template_dir = os.path.dirname(config['template']) customed_vpn_template_path = template_dir + '/customized_VPN_profile_' + str( account) possible_file_name_list = [ customed_vpn_template_path, config['template'] ] else: possible_file_name_list = [config['template']] logger.debug('possible_file_name_list: %s', possible_file_name_list) for name in possible_file_name_list: if os.path.isfile(name): self.profile = load_plist_from_file(name) logger.debug('will use template: %s', name) break if not self.profile: logger.error("load profile error: %s", config['template']) return self.node_root = self.profile.getroot() node_dict = self.node_root.getchildren()[0] if not node_dict: logger.error("profile do not contains any payload") node_ident = self.get_child_node(node_dict, 'PayloadIdentifier') if not node_ident.text: logger.error("profile do not contains payload identifier") return else: identifier = self.get_next_node(node_dict, 'PayloadIdentifier') logger.debug("Payload identifier: %s", identifier.text) node_payloads = self.get_child_node(node_dict, 'PayloadContent') if not node_payloads.text: logger.error("profile do not contains any payload content") return node_payloads_val = self.get_next_node(node_dict, 'PayloadContent') if node_payloads_val.tag != 'array' or len( node_payloads_val.getchildren()) < 1: logger.error("The format of profile's payload content is invalid") return self.node_vpn = node_payloads_val.getchildren()[0] logger.debug('remote address: %s', config['remote_address']) node_ipsec_val = self.get_next_node(self.node_vpn, 'IPSec') node_ipsec_remote_addr_val = self.get_next_node( node_ipsec_val, 'RemoteAddress') node_ipsec_remote_addr_val.text = config['remote_address'] self.node_ipsec_v = self.get_next_node(self.node_vpn, 'IPSec') root_ca = load_ca_from_file(config['root_ca']) if not root_ca: logger.error("load root ca error: %s", config['root_ca']) return vpn_ca = load_ca_from_file(config['vpn_ca']) if not vpn_ca: logger.error("load vpn ca error: %s", config['vpn_ca']) return self.issuer_ca = load_ca_from_file(config['issuer_ca']) if not self.issuer_ca: logger.error("load issuer ca error: %s", config['issuer_ca']) return self.issuer_key = load_key_from_file(config['issuer_key'], config['issuer_passcode']) if not self.issuer_key: logger.error("load issuer key error: %s", config['issuer_key']) return self.ssl_ca = load_ca_from_file(config['ssl_ca']) if not self.ssl_ca: logger.error("load ssl ca error: %s", config['ssl_ca']) return logger.debug("Finish loading ca and key from file") # root ca payload root_ca_payload = self.generate_payload() self.add_node(root_ca_payload, 'PayloadIdentifier', identifier.text + '.credential1', 'string') self.add_node(root_ca_payload, 'PayloadType', 'com.apple.security.root', 'string') self.add_node(root_ca_payload, 'PayloadDisplayName', 'MOBILEVPN ROOT CA', 'string') self.add_node(root_ca_payload, 'PayloadCertificateFileName', 'MOBILEVPN ROOT CA', 'string') self.add_node(root_ca_payload, 'PayloadContent', base64.encodestring(dumps_ca_to_string(root_ca)), 'data') node_payloads_val.append(root_ca_payload) # vpn ca payload vpn_ca_payload = self.generate_payload() self.add_node(vpn_ca_payload, 'PayloadIdentifier', identifier.text + '.credential2', 'string') self.add_node(vpn_ca_payload, 'PayloadType', 'com.apple.security.pkcs1', 'string') self.add_node(vpn_ca_payload, 'PayloadDisplayName', 'MOBILEVPN VPN CA', 'string') self.add_node(vpn_ca_payload, 'PayloadCertificateFileName', 'MOBILEVPN VPN CA', 'string') self.add_node(vpn_ca_payload, 'PayloadContent', base64.encodestring(dumps_ca_to_string(vpn_ca)), 'data') node_payloads_val.append(vpn_ca_payload) # issuer ca payload issuer_ca_payload = self.generate_payload() self.add_node(issuer_ca_payload, 'PayloadIdentifier', identifier.text + '.credential3', 'string') self.add_node(issuer_ca_payload, 'PayloadType', 'com.apple.security.pkcs1', 'string') self.add_node(issuer_ca_payload, 'PayloadDisplayName', 'MOBILE ES CA', 'string') self.add_node(issuer_ca_payload, 'PayloadCertificateFileName', 'MOBILE ES CA', 'string') self.add_node(issuer_ca_payload, 'PayloadContent', base64.encodestring(dumps_ca_to_string(self.issuer_ca)), 'data') node_payloads_val.append(issuer_ca_payload) # user certificate payload self.user_ca_payload = self.generate_payload() self.add_node(self.user_ca_payload, 'PayloadIdentifier', identifier.text + '.credential4', 'string') self.add_node(self.user_ca_payload, 'PayloadType', 'com.apple.security.pkcs12', 'string') self.add_node(self.user_ca_payload, 'Password', config['passout'], 'string') node_payloads_val.append(self.user_ca_payload) node_ipsec_udid_val = self.get_next_node(node_ipsec_val, 'PayloadCertificateUUID') node_ipsec_udid_val.text = self.get_next_node(self.user_ca_payload, 'PayloadUUID').text # ssl root ca ssl_ca_payload = self.generate_payload() self.add_node(ssl_ca_payload, 'PayloadIdentifier', identifier.text + '.credential5', 'string') self.add_node(ssl_ca_payload, 'PayloadType', 'com.apple.security.root', 'string') self.add_node(ssl_ca_payload, 'PayloadDisplayName', 'MOBILEProxy ROOT CA', 'string') self.add_node(ssl_ca_payload, 'PayloadCertificateFileName', 'MOBILEProxy ROOT CA', 'string') self.add_node(ssl_ca_payload, 'PayloadContent', base64.encodestring(dumps_ca_to_string(self.ssl_ca)), 'data') node_payloads_val.append(ssl_ca_payload) # activesync account self.activesync_payload = self.generate_payload() self.add_node(self.activesync_payload, 'PayloadIdentifier', 'com.websense.activesync.account', 'string') self.add_node(self.activesync_payload, 'PayloadType', 'com.apple.eas.account', 'string') self.add_node(self.activesync_payload, 'PayloadDisplayName', 'websense exchange activesync', 'string') #should read these information from conf self.add_node(self.activesync_payload, 'Host', config['asa_host'], 'string') self.add_node(self.activesync_payload, 'EmailAddress', config['asa_email_addr'], 'string') #just to work around password popup self.add_node(self.activesync_payload, 'CertificatePassword', config['passout'], 'string') self.add_node(self.activesync_payload, 'MailNumberOfPastDaysToSync', '0', 'integer') self.add_bool_node(self.activesync_payload, 'SSL', True) self.add_bool_node(self.activesync_payload, 'PreventMove', True) self.add_bool_node(self.activesync_payload, 'PreventAppSheet', True) node_payloads_val.append(self.activesync_payload) #set flag self.init_done = 1 def dumps(self, user_info, pac_url, exceptlst, pkey, vpn_bypass_except, sn=None): """ Dumps vpn profile to plist string. Parameter Notes: user_info(string): must contains Username and DeviceID keyword, like "Username:[email protected];DeviceID:someone's device" pack_url(string): a valid http or https url """ p12, cn, expire_date = issuer_certificate(self.issuer_ca, self.issuer_key, user_info, config['passout'], pkey) if not p12 or not cn: return None, None data = dumps_p12_to_string(p12) if not data: return None, None self.activesync_ca_data = base64.encodestring(data) self.add_node(self.user_ca_payload, 'PayloadCertificateFileName', cn, 'string') self.add_node(self.user_ca_payload, 'PayloadDisplayName', cn, 'string') self.add_node(self.user_ca_payload, 'PayloadContent', self.activesync_ca_data, 'data') node_proxy_val = self.get_next_node(self.node_vpn, 'Proxies') node_proxy_url = self.get_next_node(node_proxy_val, 'ProxyAutoConfigURLString') node_proxy_url.text = self.update_pacfile_type(pac_url) if len(exceptlst) > 0: domain_match_node = self.get_next_node( self.node_ipsec_v, 'OnDemandMatchDomainsNever') d_node_val = self.get_domainaction_node() for vpnexcept in exceptlst: try: assert vpnexcept.has_key( 'attributes' ), 'vpn exception dict do not has key: attributes' node_except_string = ElementTree.SubElement( domain_match_node, 'string') node_except_string1 = ElementTree.SubElement( d_node_val, 'string') if vpnexcept['attributes'].has_key('domainName'): node_except_string.text = vpnexcept["attributes"][ "domainName"] node_except_string1.text = vpnexcept["attributes"][ "domainName"] elif vpnexcept['attributes'].has_key('ipaddress'): node_except_string.text = vpnexcept["attributes"][ "ipaddress"] node_except_string1.text = vpnexcept["attributes"][ "ipaddress"] elif vpnexcept['attributes'].has_key('subnet'): node_except_string.text = vpnexcept["attributes"][ "subnet"] node_except_string1.text = vpnexcept["attributes"][ "subnet"] else: logger.debug( 'no domainName, ipaddress and subnet in vpn exception dict: %s' % vpnexcept) except Exception, ve: logger.error( "add vpn exception value to vpn profile error %s" % repr(ve)) #add rules for local network access ondemandrules_node = self.get_next_node(self.node_ipsec_v, "OnDemandRules") self.add_vpn_ondemandrules(ondemandrules_node, vpn_bypass_except) # activesync account self.add_node(self.activesync_payload, 'Certificate', self.activesync_ca_data, 'data') #same with self.user_ca_payload #add encrypted serial number as user name for activesync if sn: logger.debug('will ADD username to activesync payload') #encrypted_sn = base64.b32encode(encrypt_sn(sn)) encrypted_sn = base64.b32encode(sn) self.add_node(self.activesync_payload, 'UserName', encrypted_sn, 'string') self.add_node(self.activesync_payload, 'Password', 'fakedata', 'string') else: logger.debug('will NOT ADD username to activesync payload') try: plist = '<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">\n' + ElementTree.tostring( self.node_root) return plist, expire_date except Exception, e: return None, None
def forward(self, output, target, debug=False, already_flattened=False, weight_map=None): if not already_flattened: bs, output, target, weight_map = losses.prepare_sample( output, target, weight_map=weight_map, masking=self.masking) output = torch.argmax(output, dim=-1) if self.masking: mask = target.ge(0) # logger.debug(mask, mask.shape) # logger.debug(output.shape,target.shape) output = output[mask] target = target[mask] if weight_map is not None: weight_map = weight_map[mask] target_orig, output_orig = torch.clone(target).float(), torch.clone( output).float() for i, r in enumerate(self.ranks): target[target_orig == i] = r output[output_orig == i] = r incorrect = (target != output) correct = (target == output) if weight_map is not None: correct_w = correct.long() * weight_map logger.debug(f"correct_w {correct_w}, {weight_map}") else: correct_w = correct if weight_map is not None: samples_w = torch.sum(weight_map, dim=0, keepdim=False) else: samples_w = torch.sum(torch.ones_like(correct), dim=0, keepdim=False) target, output = target.float(), output.float() dist_l1 = self.l1(output, target) dist_l2 = self.l2(output, target) dist_logl2 = self.logl2(output, target) #dist_logl1 = self.logl1(output, target) mistake_severity = self.l1(output[incorrect], target[incorrect]) logger.debug(f"L1 distance {dist_l1}") logger.debug(f"L1 distance {dist_l1}") result = { "dist_l1": dist_l1, "dist_l2": dist_l2, #"dist_logl1": dist_logl1, "dist_logl2": dist_logl2, "dist_mistake_severity": (mistake_severity - self.mistake_min) / (self.mistake_max - self.mistake_min), "correct": correct, "correct_w": correct_w, "samples_w": samples_w } return result
def __init__(self, account=None): logger.debug("Begin to init vpn profile") self.init_done = 0 if not config: return logger.debug("profile template in config file: %s, account: %s", config['template'], account) # templete is not right #for customed vpn profile self.profile = None self.activesync_ca_data = None if account: template_dir = os.path.dirname(config['template']) customed_vpn_template_path = template_dir + '/customized_VPN_profile_' + str( account) possible_file_name_list = [ customed_vpn_template_path, config['template'] ] else: possible_file_name_list = [config['template']] logger.debug('possible_file_name_list: %s', possible_file_name_list) for name in possible_file_name_list: if os.path.isfile(name): self.profile = load_plist_from_file(name) logger.debug('will use template: %s', name) break if not self.profile: logger.error("load profile error: %s", config['template']) return self.node_root = self.profile.getroot() node_dict = self.node_root.getchildren()[0] if not node_dict: logger.error("profile do not contains any payload") node_ident = self.get_child_node(node_dict, 'PayloadIdentifier') if not node_ident.text: logger.error("profile do not contains payload identifier") return else: identifier = self.get_next_node(node_dict, 'PayloadIdentifier') logger.debug("Payload identifier: %s", identifier.text) node_payloads = self.get_child_node(node_dict, 'PayloadContent') if not node_payloads.text: logger.error("profile do not contains any payload content") return node_payloads_val = self.get_next_node(node_dict, 'PayloadContent') if node_payloads_val.tag != 'array' or len( node_payloads_val.getchildren()) < 1: logger.error("The format of profile's payload content is invalid") return self.node_vpn = node_payloads_val.getchildren()[0] logger.debug('remote address: %s', config['remote_address']) node_ipsec_val = self.get_next_node(self.node_vpn, 'IPSec') node_ipsec_remote_addr_val = self.get_next_node( node_ipsec_val, 'RemoteAddress') node_ipsec_remote_addr_val.text = config['remote_address'] self.node_ipsec_v = self.get_next_node(self.node_vpn, 'IPSec') root_ca = load_ca_from_file(config['root_ca']) if not root_ca: logger.error("load root ca error: %s", config['root_ca']) return vpn_ca = load_ca_from_file(config['vpn_ca']) if not vpn_ca: logger.error("load vpn ca error: %s", config['vpn_ca']) return self.issuer_ca = load_ca_from_file(config['issuer_ca']) if not self.issuer_ca: logger.error("load issuer ca error: %s", config['issuer_ca']) return self.issuer_key = load_key_from_file(config['issuer_key'], config['issuer_passcode']) if not self.issuer_key: logger.error("load issuer key error: %s", config['issuer_key']) return self.ssl_ca = load_ca_from_file(config['ssl_ca']) if not self.ssl_ca: logger.error("load ssl ca error: %s", config['ssl_ca']) return logger.debug("Finish loading ca and key from file") # root ca payload root_ca_payload = self.generate_payload() self.add_node(root_ca_payload, 'PayloadIdentifier', identifier.text + '.credential1', 'string') self.add_node(root_ca_payload, 'PayloadType', 'com.apple.security.root', 'string') self.add_node(root_ca_payload, 'PayloadDisplayName', 'MOBILEVPN ROOT CA', 'string') self.add_node(root_ca_payload, 'PayloadCertificateFileName', 'MOBILEVPN ROOT CA', 'string') self.add_node(root_ca_payload, 'PayloadContent', base64.encodestring(dumps_ca_to_string(root_ca)), 'data') node_payloads_val.append(root_ca_payload) # vpn ca payload vpn_ca_payload = self.generate_payload() self.add_node(vpn_ca_payload, 'PayloadIdentifier', identifier.text + '.credential2', 'string') self.add_node(vpn_ca_payload, 'PayloadType', 'com.apple.security.pkcs1', 'string') self.add_node(vpn_ca_payload, 'PayloadDisplayName', 'MOBILEVPN VPN CA', 'string') self.add_node(vpn_ca_payload, 'PayloadCertificateFileName', 'MOBILEVPN VPN CA', 'string') self.add_node(vpn_ca_payload, 'PayloadContent', base64.encodestring(dumps_ca_to_string(vpn_ca)), 'data') node_payloads_val.append(vpn_ca_payload) # issuer ca payload issuer_ca_payload = self.generate_payload() self.add_node(issuer_ca_payload, 'PayloadIdentifier', identifier.text + '.credential3', 'string') self.add_node(issuer_ca_payload, 'PayloadType', 'com.apple.security.pkcs1', 'string') self.add_node(issuer_ca_payload, 'PayloadDisplayName', 'MOBILE ES CA', 'string') self.add_node(issuer_ca_payload, 'PayloadCertificateFileName', 'MOBILE ES CA', 'string') self.add_node(issuer_ca_payload, 'PayloadContent', base64.encodestring(dumps_ca_to_string(self.issuer_ca)), 'data') node_payloads_val.append(issuer_ca_payload) # user certificate payload self.user_ca_payload = self.generate_payload() self.add_node(self.user_ca_payload, 'PayloadIdentifier', identifier.text + '.credential4', 'string') self.add_node(self.user_ca_payload, 'PayloadType', 'com.apple.security.pkcs12', 'string') self.add_node(self.user_ca_payload, 'Password', config['passout'], 'string') node_payloads_val.append(self.user_ca_payload) node_ipsec_udid_val = self.get_next_node(node_ipsec_val, 'PayloadCertificateUUID') node_ipsec_udid_val.text = self.get_next_node(self.user_ca_payload, 'PayloadUUID').text # ssl root ca ssl_ca_payload = self.generate_payload() self.add_node(ssl_ca_payload, 'PayloadIdentifier', identifier.text + '.credential5', 'string') self.add_node(ssl_ca_payload, 'PayloadType', 'com.apple.security.root', 'string') self.add_node(ssl_ca_payload, 'PayloadDisplayName', 'MOBILEProxy ROOT CA', 'string') self.add_node(ssl_ca_payload, 'PayloadCertificateFileName', 'MOBILEProxy ROOT CA', 'string') self.add_node(ssl_ca_payload, 'PayloadContent', base64.encodestring(dumps_ca_to_string(self.ssl_ca)), 'data') node_payloads_val.append(ssl_ca_payload) # activesync account self.activesync_payload = self.generate_payload() self.add_node(self.activesync_payload, 'PayloadIdentifier', 'com.websense.activesync.account', 'string') self.add_node(self.activesync_payload, 'PayloadType', 'com.apple.eas.account', 'string') self.add_node(self.activesync_payload, 'PayloadDisplayName', 'websense exchange activesync', 'string') #should read these information from conf self.add_node(self.activesync_payload, 'Host', config['asa_host'], 'string') self.add_node(self.activesync_payload, 'EmailAddress', config['asa_email_addr'], 'string') #just to work around password popup self.add_node(self.activesync_payload, 'CertificatePassword', config['passout'], 'string') self.add_node(self.activesync_payload, 'MailNumberOfPastDaysToSync', '0', 'integer') self.add_bool_node(self.activesync_payload, 'SSL', True) self.add_bool_node(self.activesync_payload, 'PreventMove', True) self.add_bool_node(self.activesync_payload, 'PreventAppSheet', True) node_payloads_val.append(self.activesync_payload) #set flag self.init_done = 1
def agent_signal_handler(signum, frame): ''' Agent signal handler ''' logger.debug("Server terminate signal is received") self.stop()