def main(project_name): tic = time.time() logger = Logger('_03_embed_index_{}'.format(project_name)) logger.info('=' * 50) model_path = '_model/embedding_model_{}.pt'.format(project_name) logger.info('load model from {}'.format(model_path)) model = torch.load(model_path) model.eval() dir_target = '../../input_large_delf/index' embedder = ImgEmbedder(model, dir_target) f = 512 t = AnnoyIndex(f, metric='euclidean') target_files = os.listdir(dir_target) print(len(os.listdir(dir_target))) print(len(os.listdir('../../input/index'))) assert len(os.listdir(dir_target)) == len(os.listdir('../../input/index')) assert len(target_files) == len([ target_file for target_file in target_files if target_file[-5:] == '.delf' ]) num_index = len(target_files) index_names = list() logger.info('===> embed index images') for i in tqdm(range(num_index)): target_file = target_files[i] index_names.append(target_file[:-5]) # for p in range(3): p = 0 img_feature = embedder.get_vector(target_file[:-5], pos=p) t.add_item(i + num_index * p, img_feature.tolist()) dir_index = '_embed_index' os.makedirs(dir_index, exist_ok=True) with open( os.path.join(dir_index, 'index_names_{}.json'.format(project_name)), 'w') as f: json.dump(index_names, f) t.build(100) t.save( os.path.join(dir_index, 'index_features_{}.ann'.format(project_name))) toc = time.time() - tic logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
class BasePlugin(): def __init__(self,host_dict): self.logger = Logger() self.host_dict=host_dict self.hostname=self.os_hostname() @property def ssh(self): host_dict=self.host_dict import paramiko ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy) try: ssh.connect(hostname=host_dict['ip'],port=host_dict['port'], username=host_dict['username'], password=host_dict['password']) except Exception as e: msg = "%s linux BasePlugin-ssh connect error: %s" self.logger.log(msg % (self.hostname, traceback.format_exc()), False) return ssh def os_hostname(self): """ 获取主机名 :return: """ stdin, stdout, stderr = self.ssh.exec_command('hostname') hostname = stdout.read().decode('gbk').strip() return hostname def linux(self): raise Exception('You must implement linux method.')
def __init__(self): """ 初始化 """ self.log = Logger() self.config = configparser.RawConfigParser() # self.log = Log.MyLog() # path try: PATH_LIST = os.getcwd().split("\\") self.log.info('执行路径为:{}'.format(PATH_LIST)) PATH = "\\".join(PATH_LIST[:PATH_LIST.index('mts') + 1]) except Exception: PATH_LIST = os.getcwd().split("/") self.log.info('执行路径为:{}'.format(PATH_LIST)) PATH = "/".join(PATH_LIST[:PATH_LIST.index('mts') + 1]) self.log.info('跟路径为:{}'.format(PATH)) try: self.conf_path = os.path.join(PATH, 'conf\conf.ini') os.stat(self.conf_path) except Exception: self.conf_path = os.path.join(PATH, 'conf/conf.ini') os.stat(self.conf_path) if not os.path.exists(self.conf_path): raise FileNotFoundError("请确保配置文件存在!") self.config.read(self.conf_path, encoding='utf-8') self.db_host = self.get_conf(Config.TITLE_DB, Config.VALUE_DB_HOST) self.db_port = self.get_conf(Config.TITLE_DB, Config.VALUE_DB_PORT) self.db_user = self.get_conf(Config.TITLE_DB, Config.VALUE_DB_USER) self.db_password = self.get_conf(Config.TITLE_DB, Config.VALUE_DB_PASSWORD) self.db_charset = self.get_conf(Config.TITLE_DB, Config.VALUE_DB_CHARSET) self.tech_db_host = self.get_conf(Config.TITLE_TECH_DB, Config.VALUE_DB_HOST) self.tech_db_port = self.get_conf(Config.TITLE_TECH_DB, Config.VALUE_DB_PORT) self.tech_db_user = self.get_conf(Config.TITLE_TECH_DB, Config.VALUE_DB_USER) self.tech_db_password = self.get_conf(Config.TITLE_TECH_DB, Config.VALUE_DB_PASSWORD) self.tech_db_charset = self.get_conf(Config.TITLE_TECH_DB, Config.VALUE_DB_CHARSET) self.url_base = self.get_conf(Config.TITLE_DEBUG, Config.VALUE_URL_BASE) self.ai_url_base = self.get_conf(Config.TITLE_AI, Config.VALUE_AI_URL_BASE) self.ai_url_base_sms = self.get_conf(Config.TITLE_AI, Config.VALUE_AI_URL_BASE_SMS) self.oa_url_base = self.get_conf(Config.TITLE_OA, Config.VALUE_OA_URL_BASE) self.grow_url_base = self.get_conf(Config.TITLE_Grow, Config.Value_Grow_Base)
def process(self): """ 根据主机名获取资产信息,将其发送到API :return: { "data": [ {"hostname": "c1.com"}, {"hostname": "c2.com"}], "error": null, "message": null, "status": true } """ task = self.get_asset() # if not task['status']: print(task,"task") # {'status': True, 'message': None, 'data': [{'hostname': 'c1.com'}, {'hostname': 'c10.com'}], 'error': None} # print "tsk的值是:",task if isinstance(task,ConnectionError): Logger().log(task, False) return # if type(task)=='str': # Logger().log(task, False) elif not task['status']: Logger().log(task['message'], False) # 创建线程池:最大可用线程10 pool = ThreadPoolExecutor(10) # "data": [ {"hostname": "c1.com"}, {"hostname": "c2.com"}], for item in task['data']: # c1.com c2.com hostname = item['hostname'] pool.submit(self.run, hostname) # run(c1.com) 1 # run(c2.com) 2 pool.shutdown(wait=True)
def __init__(self): self.logger = Logger() self.test_mode = getattr(settings, 'TEST_MODE', False) self.asset_api = settings.ASSET_API self.key = settings.KEY self.key_header_name = settings.AUTH_KEY_NAME self.key_header = self.auth_key()
def callback(self, status, response): if not status: Logger().log(str(response), False) return ret = json.loads(response.text) if ret['code'] == 1000: Logger.log(ret['message'], True) else: Logger.log(ret['message'], False)
def __init__(self, node_name, config, verbose=False): '''Create a model object with given node_name, configuration and labels gathered from label_section in config options''' self.verbose = verbose self.node_name = node_name # get options self.attack_keys = config.options(config.get(node_name, 'labels')) n_labels = len(self.attack_keys) outputs = [[1 if j == i else 0 for j in range(n_labels)] for i in range(n_labels)] self.outputs = dict(zip(self.attack_keys, outputs)) self.force_train = config.has_option(node_name, 'force_train') self.use_regressor = config.has_option(node_name, 'regressor') self.unsupervised = config.has_option(node_name, 'unsupervised') self.save_path = config.get(node_name, 'saved-model-path') if self.use_regressor or self.unsupervised: # generate outputs for unsupervised or regressor models self.outputs = { k: np.argmax(self.outputs[k]) for k in self.outputs } self.label_map = dict(config.items(config.get( node_name, 'labels-map'))) if config.has_option( node_name, 'labels-map') else dict() # model settings self.classifier = config.get(node_name, 'classifier') self.classifier_module = config.get( node_name, 'classifier-module') if config.has_option( node_name, 'classifier-module') else None self.feature_selection = config.get( node_name, 'feature-selection') if config.has_option( node_name, 'feature-selection') else None self.feature_selection_module = config.get( node_name, 'feature-selection-module') if config.has_option( node_name, 'feature-selection-module') else None self.scaler = config.get(node_name, 'scaler') if config.has_option( node_name, 'scaler') else None self.scaler_module = config.get( node_name, 'scaler-module') if config.has_option( node_name, 'scaler-module') else None self.model = None # leave uninitialized (run self.train) self.saved_model_file = None self.saved_feature_selection_file = None self.saved_scaler_file = None self.stats = Stats(self) self.logger = Logger( config.get('ids', 'log-dir'), node_name, self.classifier.split('\n')[0].strip('()').split('.')[-1])
def __init__(self, table, charset='utf8'): path = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) conf_path = path + "/conf/db.conf" cf = configparser.ConfigParser() cf.read(conf_path) self.host = cf.get('monkey_test_db', 'host') self.port = int(cf.get('monkey_test_db', 'port')) self.user = cf.get('monkey_test_db', 'user') self.passward = cf.get('monkey_test_db', 'passward') self.table = table self.charset = charset self.log = Logger()
def callback(self, status, response): ''' 资产提交后的回调函数 :param status: 是否请求成功 :param response: 请求成功,则是响应内容对象;请求错误,则是异常对象 :return: ''' if not status: Logger.log(str(response), False) ret = json.loads(response.text) if ret['code'] == 1000: Logger().log(ret['message'], True) else: Logger().log(ret['message'], False)
def main(project_name): tic = time.time() logger = Logger('_04_embed_test_{}'.format(project_name)) logger.info('=' * 50) model_path = '_model/embedding_model_{}.pt'.format(project_name) logger.info('load model from {}'.format(model_path)) model = torch.load(model_path) model.eval() dir_target = '../../input/test' embedder = ImgEmbedder(model, dir_target) f = 512 t = AnnoyIndex(f, metric='euclidean') target_files = os.listdir(dir_target) num_index = len(target_files) index_names = list() logger.info('===> embed index images') for i in tqdm(range(num_index)): target_file = target_files[i] index_names.append(target_file[:-4]) t.add_item(i, (sum( embedder.get_vector(target_file[:-4], pos=p) for p in range(3)) / 3.0).tolist()) dir_index = '_embed_index' os.makedirs(dir_index, exist_ok=True) with open( os.path.join(dir_index, 'test_names_{}.json'.format(project_name)), 'w') as f: json.dump(index_names, f) t.build(100) t.save(os.path.join(dir_index, 'test_features_{}.ann'.format(project_name))) toc = time.time() - tic logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
def callback(self, status, response): """ 提交资产后的回调函数 :param status: 是否请求成功 :param response: 请求成功:则是响应内容,请求失败:则是异常对象 :return: """ if not status: Logger().log(str(response), False) return ret = json.loads(response.text) if ret['code'] == 201: Logger().log(ret['message'], True) else: Logger().log(ret['message'], False)
def callback(self, status, response): """ 将返回结果写入日志 :param status: :param response: :return: """ if not status: Logger().log(str(response), False) return ret = json.loads(response.text) if ret['code'] == 1000: Logger().log(ret['message'], True) else: Logger().log(ret['message'], False)
def callback(self, status, response): """ 提交资产后的回调函数 :param status: :param response: :return: """ if not status: Logger().log(str(response), False) return ret = json.loads(response.text) if ret["code"] == 1000: Logger().log(ret["message"], True) else: Logger().log(ret["message"], False)
def process(self): """ 根据主机名获取资产信息,将其发送到API :return: { "data": [ {"hostname": "c1.com",'ip':'0.0.0.0'}, {"hostname": "c2.com",'ip':'0.0.0.0'}], "error": null, "message": null, "status": true } """ for i in settings.SELECT_OPTIONS: task = {} if i == 'asset': task = self.get_asset() elif i == 'database': task = self.get_database() task = self.get_asset() if not task['status']: Logger().log(task['message'], False) # 创建线程池:最大可用线程10 pool = ThreadPoolExecutor(10) # "data": [ {"hostname": "c1.com"}, {"hostname": "c2.com"}], for item in task['data']: # c1.com c2.com hostname = item['hostname'] pool.submit(self.run, hostname) # run(c1.com) 1 # run(c2.com) 2 pool.shutdown(wait=True)
def process(self): """ 根据主机名获取资产信息,将其发送到API :return: { "data": [ {"hostname": "c1.com"}, {"hostname": "c2.com"}], "error": null, "message": null, "status": true } """ task = self.get_asset() if not task['status']: Logger().log(task['message'], False) # 创建线程池:最大可用线程10 pool = ThreadPoolExecutor(10) # "data": [ {"hostname": "c1.com"}, {"hostname": "c2.com"}], for item in task['data']: # c1.com c2.com hostname = item['hostname'] pool.submit(self.run, hostname) # run(c1.com) 1 # run(c2.com) 2 pool.shutdown(wait=True)
def process(self): """ 根据主机名获取资产信息,将其发送到API :return: { "data":[{"hostname":"slave1"},{"hostname":"slave2"}], "error":null, "message":null, "status":true } """ # # 采集资产 # from src.package import pack # data_dict = pack # # 将资产数据发送到API(POST) # self.send_data(data_dict) task = self.get_asset() if not task["task"]: Logger().log(task["message"], False) # 创建线程池:最大可用线程10 pool = ThreadPoolExecutor(10) for item in task["data"]: hostname = item["hostname"] pool.submit(self.run, hostname) pool.shutdown(wait=True)
def setup(self): path = os.path.abspath(os.path.join(os.path.dirname(__file__), "../..")) conf_path = path + "/conf/demo.conf" cf = configparser.ConfigParser() cf.read(conf_path) url_base = cf.get('english', 'url_base') self.log = Logger() self.request = RequestBase() self.assertion = Assertions() self.data = {\ 'userLid': '12614789456ceshijinbishangxian11', 'lessonLid': '4e746cee84d24ea8bf354ceb722cabac'} self.headers = {"Content-type": "application/json"} self.url1 = url_base + 'api/app/v1/lesson/report/generateLessonReport' self.url2 = url_base + 'api/app/v1/lesson/report/pushTeacherRemark' self.url3 = url_base + 'api/app/v1/lesson/report/pushTeacherRemarkMsg'
def callback(self, status, response): """ 提交资产后的回调函数 :param status: 是否请求成功 :param response: 请求成功,则是响应内容对象;请求错误,则是异常对象 :return: """ # print status if not status: # print type(response) Logger().log(str(response), False) return ret = json.loads(response.text) if ret['code'] == 1000: Logger().log(ret['message'], True) else: Logger().log(ret['message'], False)
def __init__(self, hostnome=''): self.logger = Logger() self.test_mode = settings.TEST_MODE self.mode_list = ['agent', 'ssh', 'salt'] if hasattr(settings, 'MODE'): self.mode = settings.MODE else: self.mode = 'agent' self.hostname = hostnome
def __init__(self, hostname=None): self.logger = Logger() self.test_model = settings.TEST_MODEL self.model_list = ['agent', 'ssh', 'salt'] if hasattr(settings, 'COLLECT_MODEL'): self.collect_model = settings.COLLECT_MODEL else: self.collect_model = 'agent' self.hostname = hostname
def main(project_name, aux_projext_name): tic = time.time() logger = Logger('_01_embed_index_{}'.format(project_name)) logger.info('=' * 50) dir_prj = os.path.join('..', project_name[:-7]) dir_aux = os.path.join('..', aux_projext_name[:-7]) with open( os.path.join(dir_prj, '_embed_index', 'index_names_{}.json'.format(project_name)), 'r') as f: prj_index_names = json.load(f) with open( os.path.join(dir_aux, '_embed_index', 'index_names_{}.json'.format(aux_projext_name)), 'r') as f: aux_index_names = json.load(f) prj_u = AnnoyIndex(512, metric='euclidean') prj_u.load( os.path.join(dir_prj, '_embed_index', 'index_features_{}.ann'.format(project_name))) aux_u = AnnoyIndex(512, metric='euclidean') aux_u.load( os.path.join(dir_aux, '_embed_index', 'index_features_{}.ann'.format(aux_projext_name))) logger.info('===> embed index images') index_names, t = merge_index(prj_index_names, prj_u, aux_index_names, aux_u) dir_index = '_embed_index' os.makedirs(dir_index, exist_ok=True) new_prj_name = project_name + '_' + aux_projext_name with open( os.path.join(dir_index, 'index_names_{}.json'.format(new_prj_name)), 'w') as f: json.dump(index_names, f) t.build(100) t.save( os.path.join(dir_index, 'index_features_{}.ann'.format(new_prj_name))) toc = time.time() - tic logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
def main(project_name, aux_projext_name): logger = Logger('_03_make_submission_{}'.format(project_name)) logger.info('=' * 50) tic = time.time() project_name = project_name + '_' + aux_projext_name sample_submission = pd.read_csv('../../dataset/sample_submission.csv') images = list() test_id_list = sample_submission.id logger.info('===> embed test images and get nearest neighbors') manager = Manager() return_dict = manager.dict() num_processor = 8 l = [(len(test_id_list) + i) // num_processor for i in range(num_processor)] processor_target = 0 list_processors = list() for p in range(num_processor): pr = Process(target=process, args=(project_name, test_id_list[processor_target:processor_target+l[p]], p, return_dict)) list_processors.append(pr) processor_target += l[p] for p in range(num_processor): list_processors[p].start() for p in range(num_processor): list_processors[p].join() for p in range(num_processor): images.extend(return_dict[p]) assert len(images) == len(test_id_list) submission = pd.DataFrame(test_id_list, columns=['id']) submission['images'] = images output_path = '../../submission/submission_{}.csv'.format(project_name) submission.to_csv(output_path, index=False) toc = time.time() - tic logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
def __init__(self,hostname=""): self.logger=Logger() self.mode_list=["Agent","SSH","Salt"] # if settings.MODE in mode_list: if hasattr(settings,"MODE"): self.mode=settings.MODE else: self.mode="Agent" #设置默认方式为:Agent # raise Exception("配置文件错误!") self.hostname=hostname
def process(self): task = self.get_asset() if not task['status']: Logger().log(task['message'], False) pool = ThreadPoolExecutor(10) for item in task['data']: hostname = item['hostname'] pool.submit(self.run, hostname) pool.shutdown(wait=True)
def on_connect(self, client, userdata, flags, rc): print("Connected with result code " + str(rc)) Logger().log(True, 'Connection Successful') self.client.subscribe(('pub', 2)) if models.Gateway.objects.exists(): topic = models.Gateway.objects.values('network_id')[0]['network_id'] self.client.subscribe((topic, 2)) header = 'connect_status' result = {'status': True, 'gw_nework_id': topic, 'msg': 'Connection Successful'} handle_func.send_gwdata_to_server(client, 'pub', result, header)
def __init__(self, hostname='', platform_str='Linux'): # self.platform_str = platform_str self.logger = Logger() self.test_mode = settings.TEST_MODE self.mode_list = ["ssh", "agent", "salt"] if hasattr(settings, 'MODE'): self.mode = settings.MODE else: self.mode = 'agent' self.hostname = hostname
def invoke_plugin(self, application_name, value): """调用插件""" plugin_name = value[0] if hasattr(plugin_map, plugin_name): func = getattr(plugin_map, plugin_name) try: plugin_data = func() # 执行插件 except Exception as e: Logger().log(message='插件异常,%s' % str(e), mode=False) return report_data = { 'hostname': self.hostname, 'application_name': application_name, 'data': plugin_data } self.post_data(data=report_data, callback=self.call_back) else: Logger().log(message='plugin_map中未找到应用集插件,%s,%s' % (application_name, plugin_name), mode=False)
def get_asset_entries(): ret = dict() ret.update(BasicPlugin().asset) plugins = getattr(settings, 'PLUGINS', []) for plugin in plugins: try: model_path, cls_name = plugin.rsplit('.', maxsplit=1) cls = getattr(importlib.import_module(model_path), cls_name) ret.update(cls().asset) except Exception as e: Logger().error('Error no module {}: {}'.format(plugin, str(e))) return ret
def main(project_name): tic = time.time() logger = Logger('_01_training_{}'.format(project_name)) logger.info('==> initialize model') embedding = build_model(pretrained=True) logger.info('==> train model') train(embedding, project_name=project_name) toc = time.time() - tic logger.info('Elapsed time: {:.1f} [min]'.format(toc / 60.0))
def get_latest_config(self): """获取最新的监控配置信息""" try: headers = {} headers.update(self.auth_key()) # 生成api认证的主机头信息 payload = {'hostname': self.hostname} # 将hostname作为参数传递给api Logger().log(message='获取最新的监控配置信息', mode=True) response = requests.get(url=self.config_api, params=payload, headers=headers) return response.json() except Exception as e: return {'code': 400, 'message': str(e)}