def shutdown_cfs(self): log.info("Shutting down CFS on{}".format(self.config.name)) # Close the command socket, close the telemetry socket and write the CFS EVS Log File if self.cfs: self.cfs.stop_cfs() # Close any subprocess launched by CTF which include the CFS application that was being tested for current_process in self.cfs_process_list: process = psutil.Process(current_process) for pro_child in process.children(recursive=True): try: pro_child.kill() except psutil.NoSuchProcess as e: log.debug(e) log.debug( "Failed to close process {}".format(current_process)) continue try: process.kill() except Exception as e: log.debug(e) log.debug("Failed to close parent process {}".format( current_process)) self.cfs_process_list = [] self.cfs_running = False return True
def run(self, hour_range=HOUR_RANGE): date_range = kuzuha.build_date_filter_by_range({'hours': hour_range}) posts = kuzuha.search('http', _filter=date_range, sort=[]) tweet = '' for (url, count) in self._count_url(posts).most_common(): if url.startswith('https://twitter.com/'): tweet_id = self.extract_tweet_id(url) if tweet_id: logger.info('RT: id=%s (%s)' % (tweet_id, url)) if not self.debug: try: self.twitter.api.statuses.retweet(id=tweet_id) except TwitterHTTPError as e: logger.warn('%s %s' % (type(e), str(e))) continue title = self._get_title(url) new_url_info = TWEET_FORMAT % (title, url, count) expected_length = self.calc_tweet_length(tweet, title, count) if expected_length < (MAX_TWEET_LENGTH - len(HASH_TAG)): tweet += new_url_info else: tweet = tweet[:-len(DELIMITER)] + HASH_TAG if tweet != HASH_TAG: tweet = tweet.replace('\n', '').replace('\r', '') yield tweet tweet = new_url_info if tweet: if tweet.endswith(DELIMITER): tweet = tweet[:-len(DELIMITER)] tweet = tweet.replace('\n', '').replace('\r', '') yield tweet + HASH_TAG
def resolve(name): ip = core.get_ip_by_name(name) msg = ip if msg: logger.info('[+] Resolved! IP: ' + msg) else: logger.error('[-] Error: Impossible to resolve ' + name)
def get_bootKey(self): bootKey = '' ans = rrp.hOpenLocalMachine(self.__rrp) self.__regHandle = ans['phKey'] for key in ['JD', 'Skew1', 'GBG', 'Data']: logger.debug('Retrieving class info for %s' % key) ans = rrp.hBaseRegOpenKey( self.__rrp, self.__regHandle, 'SYSTEM\\CurrentControlSet\\Control\\Lsa\\%s' % key) keyHandle = ans['phkResult'] ans = rrp.hBaseRegQueryInfoKey(self.__rrp, keyHandle) bootKey = bootKey + ans['lpClassOut'][:-1] rrp.hBaseRegCloseKey(self.__rrp, keyHandle) transforms = [8, 5, 4, 2, 11, 9, 13, 3, 0, 6, 1, 12, 14, 10, 15, 7] bootKey = bootKey.decode('hex') for i in xrange(len(bootKey)): self.__bootKey += bootKey[transforms[i]] logger.info('Target system bootKey: 0x%s' % self.__bootKey.encode('hex')) return self.__bootKey
def exploit(self): ip = [] is_cdn = False logger.warn('DNS resolve starting of target {}'.format(self.target_netloc)) match_result = re.compile('flightHandler\((.*?)\)') url = 'http://ping.aizhan.com/?r=site/PingResult&callback=flightHandler&type=ping&id={}' data = requests.get(url.format(self.target_netloc)).content result = match_result.findall(data) if not result: logger.critical('Failed to get ping result of target {}'.format(self.target_netloc)) return result = json.loads(result[0]) if 'status' in result and result['status'] == 500: logger.critical('Failed to get ping result of target {}'.format(self.target_netloc)) return for i in result: logger.info(u'IP address of {} ({}): {}'.format(self.target_netloc, result[i]['monitor_name'], result[i]['ip'])) ip.append(result[i]['ip']) if len(set(ip)) > 2: logger.warn('It seems target use CDN according to the result'.format(self.target_netloc)) is_cdn = True return {'result': {'is_cdn': is_cdn, 'ip': ip}, 'status': True}
def __check_remote_registry(self): status = self.status(self.__service_name, return_state=True) if status in ('PAUSED', 'STOPPED'): logger.info('Service %s is in stopped state' % self.__service_name) self.__should_stop = True self.__started = False elif status == 'RUNNING': logger.debug('Service %s is already running' % self.__service_name) self.__should_stop = False self.__started = True else: raise Exception('Unknown service status: %s' % status) # Let's check its configuration if service is stopped, maybe it is disabled if self.__started is False: ans = self.query(self.__service_name, return_answer=True) if ans['lpServiceConfig']['dwStartType'] == 0x4: logger.info('Service %s is disabled, enabling it' % self.__service_name) self.__disabled = True self.change(self.__service_name, start_type=0x3) self.start(self.__service_name) time.sleep(3)
def __getPek(self): logger.info('Searching for pekList, be patient') pek = None while True: record = self.__ESEDB.getNextRow(self.__cursor) if record is None: break elif record[self.NAME_TO_INTERNAL['pekList']] is not None: pek = record[self.NAME_TO_INTERNAL['pekList']].decode('hex') break elif record[self.NAME_TO_INTERNAL[ 'sAMAccountType']] in self.ACCOUNT_TYPES: # Okey.. we found some users, but we're not yet ready to process them. # Let's just store them in a temp list self.__tmpUsers.append(record) if pek is not None: encryptedPek = self.PEK_KEY(pek) md5 = hashlib.new('md5') md5.update(self.__bootKey) for i in range(1000): md5.update(encryptedPek['KeyMaterial']) tmpKey = md5.digest() rc4 = ARC4.new(tmpKey) plainText = rc4.encrypt(encryptedPek['EncryptedPek']) self.__PEK = plainText[36:]
def main(): # 解析配置文件 parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', dest='config_path', default='./conf/feature_sync_client.conf', help='set conf file') args = parser.parse_args() g_conf_parameter.load_conf(args.config_path) # 启动日志模块 logger.set_child_name('feature_sync_server') logger.set_file_path(get_log_dir()) logger.set_file_name(g_conf_parameter.log_file_name) logger.set_log_level(g_conf_parameter.log_level) logger.set_back_count(g_conf_parameter.log_back_count) logger.start() event_loop = asyncio.get_event_loop() feature_sync_service = FeatureSyncService(event_loop=event_loop) try: logger.info( "{0} feature_sync_client start use conf file: {1} {0}".format( "*" * 10, args.config_path)) event_loop.run_until_complete(feature_sync_service.sync_feature()) event_loop.run_forever() except KeyboardInterrupt: event_loop.stop() event_loop.close() logger.info("complete sync or for uncaught exception")
def search(query='', field='q1', _operator='and', sort=[('_score', 'desc'), ('quoted_by', 'desc')], _filter=[], size=1000, _id=False, indices=['misao']): es = Elasticsearch([elasticsearch_setting]) if query: if isinstance(query, list): es_query = {'match': {field: ' '.join(query)}} else: es_query = {'match': {field: query}} else: es_query = {"match_all": {}} if _filter: body = { "query": { "bool": { "must": es_query, "filter": _filter }, }, 'size': size } else: body = { 'query': es_query, 'size': size } sort_item = _build_sort(sort) if sort_item: body.update({'sort': sort_item}) logger.info(body) result = es.search(index=indices, body=body, _source=True) if _id: return (x for x in result['hits']['hits']) return (x['_source'] for x in result['hits']['hits'])
def enable_cfs_output(self, target: str = None) -> bool: """Implements the instruction EnableCfsOutput.""" log.info("EnableCfsOutput for target: {}".format(target)) # Collect the results of enable_cfs_output on each specified target, and check that all passed status = [t.enable_cfs_output() for t in self.get_cfs_targets(target)] return all(status) if status else False
def set_credentials(): credentials = [] logger.info('Loading credentials') if conf.user is not None: logger.debug('Loading credentials from command line') credentials.append( add_credentials(conf.user, conf.password or '', conf.lmhash or '', conf.nthash or '', conf.domain or '')) if conf.credsfile is not None: logger.debug('Loading credentials from file %s' % conf.credsfile) parsed_credentials = parse_credentials_file(conf.credsfile) for credential in parsed_credentials: if credential is not None: credentials.append(credential) unique_credentials = [] for credential in credentials: if credential not in unique_credentials: unique_credentials.append(credential) if len(unique_credentials) < 1: logger.error('No valid credentials loaded') sys.exit(1) logger.info('Loaded %s unique credential%s' % (len(credentials), 's' if len(credentials) > 1 else '')) return unique_credentials
def run(self): # Here we write a mini config for the server smbConfig = ConfigParser() smbConfig.add_section('global') smbConfig.set('global', 'server_name', 'server_name') smbConfig.set('global', 'server_os', 'UNIX') smbConfig.set('global', 'server_domain', 'WORKGROUP') smbConfig.set('global', 'log_file', self.__smbserver_log) smbConfig.set('global', 'credentials_file', '') # Let's add a dummy share smbConfig.add_section(self.__smbserver_share) smbConfig.set(self.__smbserver_share, 'comment', '') smbConfig.set(self.__smbserver_share, 'read only', 'no') smbConfig.set(self.__smbserver_share, 'share type', '0') smbConfig.set(self.__smbserver_share, 'path', self.__smbserver_dir) # IPC always needed smbConfig.add_section('IPC$') smbConfig.set('IPC$', 'comment', '') smbConfig.set('IPC$', 'read only', 'yes') smbConfig.set('IPC$', 'share type', '3') smbConfig.set('IPC$', 'path') self.localsmb = smbserver.SMBSERVER(('0.0.0.0', 445), config_parser=smbConfig) logger.info('Setting up SMB Server') self.localsmb.processConfigFile() logger.debug('Ready to listen...') try: self.localsmb.serve_forever() except Exception as _: pass
def main(): # 解析配置文件 parser = argparse.ArgumentParser() parser.add_argument('-c', '--config', dest='config_path', default='./conf/feature_sync_server.conf', help='set conf file') args = parser.parse_args() g_conf_parameter.load_conf(args.config_path) # 启动日志模块 logger.set_child_name('feature_sync_server') logger.set_file_path(get_log_dir()) logger.set_file_name(g_conf_parameter.log_file_name) logger.set_log_level(g_conf_parameter.log_level) logger.set_back_count(g_conf_parameter.log_back_count) logger.start() app = Sanic("feature_sync_server") app.add_route(process_feature_request, "/feature", methods=["GET", "POST"]) logger.info( "{0} feature_sync_server start at port {1} wrokers {2} conf {3} {0}". format("*" * 10, g_conf_parameter.server_port, g_conf_parameter.workers_num, args.config_path)) app.run(host=g_conf_parameter.server_host, port=g_conf_parameter.server_port, workers=g_conf_parameter.workers_num, debug=False, access_log=False)
def _mailListHandlerApi(self, phone): try: #获取af_base和af_tag_baes表中的数据 dict_info = self._getTagAndBaseData(phone) if dict_info == False: return False # 初始化通讯录 address_list = AddressList().getByUserPhoneDict(phone) # 格式通讯录的手机号 phone_list = self._getUserFordb(address_list) phone_list = phone_list.phone # 通讯录条数 phone_list_num = phone_list.count() # 将手机号转成元组 phone_list = self._getTupleList(phone_list) if not phone_list: logger.info("number_label: %s不存在通讯录" % phone) return False # 号码标签条数 label_num = self._getNumberLableNum(phone_list) if not label_num: logger.info("number_label: %s通讯录号码标签解析失败" % phone) print("号码标签解析失败!") return False dict_info['label_num'] = label_num # 通讯录号码数 dict_info['mail_list_num'] = len(phone_list) # 通讯录去重号码数 dict_info['weight_loss_num'] = len(set(phone_list)) return dict_info except Exception as err: print(err) return False
def run(self): # Here we write a mini config for the server smbConfig = ConfigParser.ConfigParser() smbConfig.add_section("global") smbConfig.set("global", "server_name", "server_name") smbConfig.set("global", "server_os", "UNIX") smbConfig.set("global", "server_domain", "WORKGROUP") smbConfig.set("global", "log_file", self.__smbserver_log) smbConfig.set("global", "credentials_file", "") # Let's add a dummy share smbConfig.add_section(self.__smbserver_share) smbConfig.set(self.__smbserver_share, "comment", "") smbConfig.set(self.__smbserver_share, "read only", "no") smbConfig.set(self.__smbserver_share, "share type", "0") smbConfig.set(self.__smbserver_share, "path", self.__smbserver_dir) # IPC always needed smbConfig.add_section("IPC$") smbConfig.set("IPC$", "comment", "") smbConfig.set("IPC$", "read only", "yes") smbConfig.set("IPC$", "share type", "3") smbConfig.set("IPC$", "path") self.localsmb = smbserver.SMBSERVER(("0.0.0.0", 445), config_parser=smbConfig) logger.info("Setting up SMB Server") self.localsmb.processConfigFile() logger.debug("Ready to listen...") try: self.localsmb.serve_forever() except: pass
def download(self, origin, index=1): import toml import os file_path = os.path.join('..', 'Toml', 'waypoint.yaml') key = "waypoint{}".format(index) with open(file_path) as f: wps = toml.loads(f.read()) count = len(wps) if index > count or index == 0: logger.error('index out of range when download Waypoints') return waypoints = wps[key] Trail = waypoints['Trail'] result = [origin] points = waypoints['points'] number = 0 for point in points: result.append(get_location_metres( result[number], point[0], point[1])) number += 1 self.publish('Waypoint', result[1:]) self.publish('WaypointID', 0) # self.publish('WaypointType', 'Download') logger.debug('Trail:{}\n Waypoints:{}'.format( Trail, self.subscribe('Waypoint'))) logger.info('Download complete')
def saveResources(self, proportion): try: self.aid = proportion['aid'] self.user_id = proportion['user_id'] self.loan_id = proportion['loan_id'] self.ads_num = proportion['mail_list_num'] self.ads_num_uniq = proportion['weight_loss_num'] self.advertis = proportion['label_num']['advertisement_tel'] self.express = proportion['label_num']['express_tel'] self.harass = proportion['label_num']['harass_tel'] self.house_agent = proportion['label_num']['house_propert_tel'] self.cheat = proportion['label_num']['cheat_tel'] self.company_tel = proportion['label_num']['enterprise_tel'] self.invite = proportion['label_num']['recruit_tel'] self.taxi = proportion['label_num']['lease_car_tel'] self.education = proportion['label_num']['education_tel'] self.insurance = proportion['label_num']['insurance_tel'] self.ring = proportion['label_num']['sound_a_sound_tel'] self.service_tel = proportion['label_num']['customer_service_tel'] self.delinquency = proportion['label_num']['illegality_tel'] self.modify_time = datetime.now() self.create_time = datetime.now() self.add() db.session.commit() return True except AttributeError as error: logger.info("number_label: 通讯录记录失败:%s" % error) print(error) return False except Exception as error: logger.info("number_label: 通讯录记录失败:%s" % error) print(error) return False
def remove_check_tlm_continuous(self, verification_id: str, target: str = None) -> bool: """Implements the instruction RemoveCheckTlmContinuous.""" log.info("RemoveCheckTlmContinuous for target: {}, Verification ID: {}".format(target, verification_id)) # Collect the results of remove_check_tlm_continuous on each specified target, and check that all passed status = [t.remove_check_tlm_continuous(verification_id) for t in self.get_cfs_targets(target)] return all(status) if status else False
async def start(self): if self.running: logger.info("is running, continue") return now = datetime.datetime.now() logger.info("[start]->begin, {}".format(now)) try: self.running = True cloud_status = await self._cloud_feature_proxy.get_sync_status( self.province_code, self.city_code, self.town_code) cloud_status = SyncStatus(**cloud_status) logger.info("cloud_status: {}".format(cloud_status)) remote_status = await self._remote_feature_proxy.get_sync_status( self.province_code, self.city_code, self.town_code) remote_status = SyncStatus(**remote_status) logger.info("remote_status: {}".format(remote_status)) await self.sync_user(cloud_status, remote_status) await self.sync_feature_model_0330(cloud_status, remote_status) finally: self.running = False end = datetime.datetime.now() logger.info("[start]->end, {} ,cost {}s".format( end, (end - now).seconds))
def k8s_nodes_ready(max_retry=app_config['GLOBAL_MAX_RETRY'], wait=app_config['GLOBAL_HEALTH_WAIT']): """ Checks that all nodes in a cluster are Ready """ logger.info('Checking k8s nodes health status...') retry_count = 1 healthy_nodes = False while retry_count < max_retry: # reset healthy nodes after every loop healthy_nodes = True retry_count += 1 nodes = get_k8s_nodes() for node in nodes: conditions = node.status.conditions for condition in conditions: if condition.type == "Ready" and condition.status == "False": logger.info("Node {} is not healthy - Ready: {}".format( node.metadata.name, condition.status)) healthy_nodes = False elif condition.type == "Ready" and condition.status == "True": # condition status is a string logger.info("Node {}: Ready".format(node.metadata.name)) if healthy_nodes: logger.info('All k8s nodes are healthy') break logger.info('Retrying node health...') time.sleep(wait) return healthy_nodes
def modify_k8s_autoscaler(action): """ Pauses or resumes the Kubernetes autoscaler """ import kubernetes.client config.load_kube_config() # Configure API key authorization: BearerToken configuration = kubernetes.client.Configuration() # create an instance of the API class k8s_api = kubernetes.client.AppsV1Api( kubernetes.client.ApiClient(configuration)) if action == 'pause': logger.info('Pausing k8s autoscaler...') body = {'spec': {'replicas': 0}} elif action == 'resume': logger.info('Resuming k8s autoscaler...') body = {'spec': {'replicas': app_config['K8S_AUTOSCALER_REPLICAS']}} else: logger.info('Invalid k8s autoscaler option') sys.exit(1) try: k8s_api.patch_namespaced_deployment( app_config['K8S_AUTOSCALER_DEPLOYMENT'], app_config['K8S_AUTOSCALER_NAMESPACE'], body) logger.info('K8s autoscaler modified to replicas: {}'.format( body['spec']['replicas'])) except ApiException as e: logger.info( 'Scaling of k8s autoscaler failed. Error code was {}, {}. Exiting.' .format(e.reason, e.body)) sys.exit(1)
def exploit(self): ip = [] is_cdn = False logger.warn('DNS resolve starting of target {}'.format( self.target_netloc)) match_result = re.compile('flightHandler\((.*?)\)') url = 'http://ping.aizhan.com/?r=site/PingResult&callback=flightHandler&type=ping&id={}' data = requests.get(url.format(self.target_netloc)).content result = match_result.findall(data) if not result: logger.critical('Failed to get ping result of target {}'.format( self.target_netloc)) return result = json.loads(result[0]) if 'status' in result and result['status'] == 500: logger.critical('Failed to get ping result of target {}'.format( self.target_netloc)) return for i in result: logger.info(u'IP address of {} ({}): {}'.format( self.target_netloc, result[i]['monitor_name'], result[i]['ip'])) ip.append(result[i]['ip']) if len(set(ip)) > 2: logger.warn( 'It seems target use CDN according to the result'.format( self.target_netloc)) is_cdn = True return {'result': {'is_cdn': is_cdn, 'ip': ip}, 'status': True}
def add_telem_msg(self, mid_name, mid, name, parameters, parameter_enums=None): """ Adds a telemetry message to the internal types @param mid_name: Name of the MID associated with the command @param mid: Value of the MID associated with the command @param name: Name of the telemetry message @param parameters: Type of the telemetry message parameters @param parameter_enums: Dictionary of enumerations associated with this telemetry message """ msg = { mid_name: { "MID": mid if isinstance(mid, int) else int(mid, 0), "name": name, "PARAM_CLASS": parameters } } self.mid_map.update(msg) self.enum_map.update({mid_name: mid}) if parameter_enums: self.enum_map.update(parameter_enums) if self.log_ccsds_imports: log.info("Added Telemetry Message {}:{} with MID {}".format( name, mid_name, hex(mid)))
def check_output(self, output_contains=None, output_does_not_contain=None, exit_code=0): result = True log.info("Remote Verify Command with output containing: \"{cont}\"," " not containing: \"{ncont}\"" ", and exit code: {code}" .format(cont=output_contains, ncont=output_does_not_contain, code=exit_code)) if self.last_result is None: log.warn("No output received from remote connection...") result = False else: # Check if stdout contains the nominal output if output_contains is not None and output_contains not in self.last_result.stdout.strip(): log.warn("Output does not contain: {}".format(output_contains)) result = False # Check if stdout doesn't contain offnomial output if output_does_not_contain is not None and len(output_does_not_contain) > 0 \ and output_does_not_contain in self.last_result.stdout.strip(): log.warn("Output contains: {}...".format(output_does_not_contain)) result = False # Check if command exit code matches expected exit code if exit_code != self.last_result.exited: log.warn("Exit code {} does not equal expected exit code {}...".format( self.last_result.exited, exit_code )) result = False if result: log.info("RemoteCheckOutput Passed with exit code {}".format(exit_code)) else: log.warn("RemoteCheckOutput Failed") return result
def check_event(self, app, id, msg=None, is_regex=False, msg_args=None): """Checks for an EVS event message in the telemetry packet history, assuming a particular structure for CFE_EVS_LongEventTlm_t. This can be generified in the future to determine the structure from the MID map. """ log.info("Checking event on {}".format(self.config.name)) if msg_args is not None and len(msg_args) > 0: try: msg = msg % literal_eval(msg_args) except Exception as e: log.error( "Failed to check Event ID {} in App {} with message: '{}' with msg_args = {}" .format(id, app, msg, msg_args)) log.debug(traceback.format_exc()) return False if not str(id).isnumeric(): id = self.resolve_macros(id) # TODO - Should use the mid_map and EVS event name to determine these... # These are the values that will be used to look through the telemetry packets # for the expected packet args = [{ "compare": "streq", "variable": "Payload.PacketID.AppName", "value": app }, { "compare": "==", "variable": "Payload.PacketID.EventID", "value": id }] result = self.cfs.check_tlm_value(self.cfs.evs_short_event_msg_mid, args, discard_old_packets=False) if result: log.info( "Received EVS_ShortEventTlm_t. Ignoring 'Message' field...") else: if msg: compare = "regex" if is_regex else "streq" args.append({ "compare": compare, "variable": "Payload.Message", "value": msg }) result = self.cfs.check_tlm_value( self.cfs.evs_long_event_msg_mid, args, discard_old_packets=False) else: log.warn( "No msg provided; any message for App {} and Event ID {} will be matched." .format(app, id)) result = self.cfs.check_tlm_value( self.cfs.evs_long_event_msg_mid, args, discard_old_packets=False) return result
def set_targets(): targets = [] logger.info('Loading targets') if conf.target is not None: if '/' not in conf.target: logger.debug('Loading targets from command line') targets.append(add_target(conf.target)) else: address, mask = re.search(r"([\d.]+)/(\d+)", conf.target).groups() logger.debug('Expanding targets from command line') start_int = addr_to_int(address) & ~((1 << 32 - int(mask)) - 1) end_int = start_int | ((1 << 32 - int(mask)) - 1) for _ in range(start_int, end_int): targets.append(add_target(int_to_addr(_))) if conf.list is not None: logger.debug('Loading targets from file %s' % conf.list) parsed_targets = parse_targets_file(conf.list) if parsed_targets is not False: for target in parsed_targets: targets.append(target) unique_targets = [] for target in targets: if target not in unique_targets: unique_targets.append(target) if len(unique_targets) < 1: logger.error('No valid targets loaded') sys.exit(1) logger.info('Loaded %s unique target%s' % (len(targets), 's' if len(targets) > 1 else '')) return unique_targets
def shutdown_cfs(self): log.info("Shutting down CFS on {}".format(self.config.name)) # TODO - Pull CFS stdout from SP0. Requires run_application to pipe output of process... # stdout_final_path = os.path.join(Global.current_script_log_dir, # os.path.basename(self.cfs.cfs_std_out_path)) # # if not os.path.exists(stdout_final_path): # if not self.sp0_plugin.get_file(self.cfs.cfs_std_out_path, stdout_final_path): # log.info("Cannot move CFS stdout file to script log directory.") # if self.sp0_plugin.last_result[self.config.name]: # log.debug(self.sp0_plugin.last_result[self.config.name].stdout.strip()) if self.cfs: if self.cfs_running: log.info("Sending SP0 Reboot Command...") self.sp0_plugin.send_command("reboot()\n", timeout=2, name=self.config.name) self.cfs.stop_cfs() # Wait 2 time units for shutdown to complete Global.time_manager.wait_seconds(2) self.cfs = None
def run(self): if config.use_https_dns: logger.info("starting Google HTTPS DNS proxy") async_run(start_google_https_dns_proxy_server) logger.info("listening DNS request on {}".format(config.port)) create_udp_server(config.port, self.resolve)
def set_domains(): domains = [''] logger.info('Loading domains') if conf.domain is not None: logger.debug('Loading domains from command line') added_domains = add_domain(conf.domain) for domain in added_domains: domains.append(domain) if conf.domainsfile is not None: logger.debug('Loading domains from file %s' % conf.domainsfile) parsed_domains = parse_domains_file(conf.domainsfile) for domain in parsed_domains: if domain is not None: domains.append(domain) unique_domains = [] for domain in domains: if domain not in unique_domains: unique_domains.append(domain) if len(unique_domains) == 0: return domains elif len(domains) > 0: return unique_domains
def get_launch_template(lt_name): """ Queries AWS and returns the details of a given Launch Template """ logger.info(f'Describing launch template for {lt_name}...') response = ec2_client.describe_launch_templates(LaunchTemplateNames=[lt_name]) return response['LaunchTemplates'][0]
def build_cfs(self): log.info("Building Remote CFS") build_out_file = os.path.join( "/tmp", "{}_build_cfs_output.txt".format(self.config.name)) build_command = "{} 2>&1 | tee {}".format(self.config.cfs_build_cmd, build_out_file) build_success = self.execution_controller.run_command( build_command, cwd=self.config.cfs_build_dir) log.debug("Build process completed") Global.time_manager.wait_seconds(1) stdout_final_path = os.path.join(Global.current_script_log_dir, os.path.basename(build_out_file)) if not os.path.exists(stdout_final_path): if not self.execution_controller.get_file( build_out_file, stdout_final_path, {'delete': True}): log.warn( "Cannot move CFS build output file to script log directory." ) if self.execution_controller.last_result: log.debug( self.execution_controller.last_result.stdout.strip()) if not build_success: log.error("Failed to build Remote CFS!") return build_success
def instance_outdated_launchtemplate(instance_obj, asg_lt_name, asg_lt_version): """ Checks that the launch template on an instance matches a given string and version. This is often configured in the auto scaling group as $Latest or $Default which we can resolve to an actual version number through the describe_launch_templates boto3 method (wrapped in get_launch_template). """ instance_id = instance_obj['InstanceId'] lt_name = instance_obj['LaunchTemplate']['LaunchTemplateName'] lt_version = int(instance_obj['LaunchTemplate']['Version']) if lt_name != asg_lt_name: logger.info("Instance id {} launch template of '{}' does not match asg launch template of '{}'".format(instance_id, lt_name, asg_lt_name)) return True elif asg_lt_version == "$Latest": latest_lt_version = get_launch_template(asg_lt_name)['LatestVersionNumber'] if lt_version != latest_lt_version: logger.info( "Instance id {} launch template version of '{}' does not match asg launch template version of '{}'".format(instance_id, lt_version, latest_lt_version)) return True elif asg_lt_version == "$Default": default_lt_version = get_launch_template(asg_lt_name)['DefaultVersionNumber'] if lt_version != default_lt_version: logger.info( "Instance id {} launch template version of '{}' does not match asg launch template version of '{}'".format(instance_id, lt_version, default_lt_version)) return True elif lt_version != int(asg_lt_version): logger.info(f"Instance id {instance_id} has a different launch configuration version to the ASG") return True logger.info("Instance id {} : OK ".format(instance_id)) return False
def shutdown(self): log.info("Shutting down controller for {}".format( self.config.name)) if self.cfs: if self.cfs_running: self.shutdown_cfs() self.cfs = None
def initialize(self): log.debug("Initializing CfsController") self.process_ccsds_files() ccsds = import_ccsds_header_types() if not (ccsds and ccsds.CcsdsPrimaryHeader and ccsds.CcsdsCommand and ccsds.CcsdsTelemetry): log.error("Unable to import required CCSDS header types") return False log.info("Starting Local CFS Interface") command = CommandInterface(ccsds, self.config.cmd_udp_port, self.config.cfs_target_ip, self.config.endianess_of_target) telemetry = TlmListener(self.config.ctf_ip, self.config.tlm_udp_port) self.cfs = LocalCfsInterface(self.config, telemetry, command, self.mid_map, ccsds) result = self.cfs.init_passed if not result: log.error("Failed to initialize LocalCfsInterface") else: if self.config.start_cfs_on_init and not self.cfs_running: result = self.start_cfs("") else: log.warn( "Not starting CFS executable... Expecting \"StartCfs\" in test script..." ) if result: log.info("CfsController Initialized") return result
def list_to_string(value): for host in value: if host: if type(host) is list: for element in host: logger.info(element) if type(host) is str: logger.info(host)
def run(self): temperature = 0 system = platform.system() if system == 'Darwin': # 今はMacしか対応しない temperature = self.get_mac_temperature() logger.info('CPU TEMP: %.2f℃' % temperature) if THRESHOLD <= temperature: return MESSAGE_FORMAT % temperature
def main(): if output: handler = logging.FileHandler(output) handler.setLevel(logging.INFO) logger.addHandler(handler) if name: logger.info('[*] Trying to resolve name: '+ name) actions.resolve(name) if output: logger.info('[+] File log written: ' + output)
def scan(target): if core.is_valid_url(target): msg = "Host Validation OK" logger.debug(msg) msg = "[+] Url Accepted" logger.info(msg) msg = "[*] Performing scan" logger.info(msg) try: core.scan(target, settings.WORDLISTFILE) except Exception,e: print str(e) msg = "[-] ERROR" logger.error(msg)
def run(self): params = kuzuha.gen_params('', {'minute': 20}) posts = kuzuha.get_log_as_dict('qwerty', params, url=True) or {} for (post_id, post) in posts.items(): if 'date' not in post: continue stored_body = self.find(post_id) body = self.build_body(post, post_id) if body and body != stored_body: self.update(post_id, body, 'index') else: logger.debug('NO CHANGE: %s' % body) if self.actions: logger.info(helpers.bulk(self.es, self.actions)) self.es.indices.refresh(index='qwerty')
def _scan(self): while True: if self.queue.empty(): break try: sub = self.queue.get_nowait() self.queue.task_done() domain = self.target + sub r = requests.head(domain, headers=header, timeout=5, stream=True) code = r.status_code if code in self.status_code: logger.info('status code {} -> {}'.format(code, domain)) except Exception, e: pass
def gen_report(self, all_words): message = u"%d~%d時の@上海:\n" % (self.start_hour, self.end_hour) for word in sorted(all_words.values(), key=lambda x: [x.count, len(x.surface)], reverse=True): logger.info("%s\t%d" % (word.surface, word.count)) if len(message) + len(word.surface) + len(str(word.count)) + 1 < 116: if len(word.surface) > 1: message = u"%s %s:%d," % (message, word.surface, word.count) elif self.plot_wordmap: all_words = self.to_bag_of_words(all_words) wmap = wordmap.WordMap(upload_flickr=self.up_flickr) message = message[:-1] message += u" " + wmap.run(all_words, message) return message else: break return message[:-1]
def update_ghdb(): global retry msg = "Starting ghdb update" logger.debug(msg) msg = "[*] Updating Database" logger.info(msg) try: fname = settings.WORDLISTFILE with open(fname, 'r') as f: content = f.readlines() f.close() num = len(content)+1 while True: dork = source.get_dork_from_exploit_db(num) if dork: retry = 0 with codecs.open(fname, 'a', "utf-8") as f: f.write(dork+"\n") f.close() msg = "[+] Loaded " + dork logger.info(msg) else: check = source.check_exploit_db(num) if check: cont = 0 while(cont < check): with codecs.open(fname, 'a', "utf-8") as f: space = " " f.write(space+"\n") f.close() cont +=1 num += check -1 else: break num += 1 msg = "Database update ok" logger.debug(msg) msg = "[+] Database is up to date" logger.info(msg) sys.exit(1) except SystemExit: msg = "End update" logger.debug(msg) except: retry +=1 msg = "Database update error" logger.debug(msg) msg = "[-] ERROR: Database update error" logger.error(msg) if (retry<3): msg = "[*] Retrying update" logger.info(msg) update_ghdb() else: msg = "[-] CRITICAL ERROR: Maybe Exploit-db or network is donwn" logger.error(msg) sys.exit(1)
def _get_title(self, url): title = '' root, ext = os.path.splitext(url) if ext in image_extensions: time.sleep(3) # for avoiding to be treated as spam by Google logger.info('Search by google: %s' % url) results = google_image.search(url, best_kwds_max_length=18) keywords = filter(lambda x: not x.isdigit(), results['best_keywords']) title = ''.join(keywords) elif not ext in ignore_extensions: logger.info('Retrieve web resource: %s' % url) html = web.open_url(url) soup = BeautifulSoup(html, "html5lib") if soup.title and soup.title.string: title = soup.title.string title = normalize.normalize(title) title = self._shorten_title(title) return title
def scan_wordlist(target, wordlist): if core.is_valid_url(target): msg = "Host Validation OK" logger.debug(msg) msg = "[+] Url Accepted" logger.info(msg) msg = "[*] Performing scan" logger.info(msg) try: core.scan(target, wordlist) except: msg = "[-] ERROR" logger.error(msg) else: msg = "[-] ERROR: You must provide a valid target. Given: "+ target showhelp() logger.error(msg) sys.exit(1)
def update(): if not os.path.exists(os.path.join(ROOTDIR, ".git")): msg = "[-] Not a git repository. Please checkout the repository from GitHub (e.g. git clone https://github.com/AeonDave/sir.git)" logger.error(msg) if PLATFORM == 'nt': msg = "[-] Please checkout the repository from GitHub with GitHub for Windows (e.g. https://windows.github.com)" logger.warning(msg) msg = "[*] Repository at https://github.com/AeonDave/sir.git" logger.info(msg) else: msg = "[*] Updating SIR from latest version from the GitHub Repository\n" logger.info(msg) Popen("git stash", shell=True, stdout=PIPE, stderr=PIPE) Popen("git stash drop", shell=True, stdout=PIPE, stderr=PIPE) process = Popen("git pull origin master", shell=True, stdout=PIPE, stderr=PIPE) process.communicate() success = not process.returncode if success: msg = "[+] Updated!\n" logger.info(msg) sys.exit(0) else: msg = "[-] Error!\n" logger.error(msg) sys.exit(1)
def scan(url, wordlist): fname = wordlist with open(fname, 'r') as f: dorks = f.readlines() f.close() for dork in dorks: if len(dork)<2: continue try: rnd = random_int(2, 5) time.sleep(rnd) g = GoogleSearch("site:"+url+" "+dork, random_agent=True) g.results_per_page = 10 print("."), results = g.get_results() if len(results) > 0: msg = "[+] Found "+ results +" results with dork: "+dork logger.info(msg) for res in results: print res.title.encode('utf8') print res.url.encode("utf8") except SearchError, e: print "Search failed: %s" % e
def reverse(target, extensive): msg = "Reverse probing" logger.debug(msg) hosts = core.get_reversed_hosts(target, extensive) if len(hosts)>0: if len(hosts)==1: msg = "[+] "+str(len(hosts))+" Domain found" logger.info(msg) for host in hosts: logger.info(host) else: msg = "[+] "+str(len(hosts))+" Domains found" logger.info(msg) for host in hosts: logger.info(host) else: msg = "[-] No Domains found" logger.error(msg)
def get_from_who_is(value, type): whois='http://who.is/whois/' info='http://who.is/website-information/' dns='http://who.is/dns/' if type == 0: url=whois if type == 1: url=info if type == 2: url=dns rawdata = core.get_html_from_url(url+value) if rawdata: parser = BeautifulSoup(rawdata) blocks = parser.find_all('div','domain-data') for block in blocks: title = block.header.h5.get_text() table = block.table if table: logger.info('-----'+title.strip()+'-----') rows = table.find_all('tr') for row in rows: descriptions = row.find_all('th') datas = row.find_all('td') value='' for description in descriptions: if description.get_text().strip(): value = value + '-' + description.get_text().strip() if value: logger.info(value) value='' for data in datas: if data.get_text().strip(): value = value + ' ' + data.get_text().strip() if value: logger.info(value) else: logger.error('[-] Error: Invalid host given for extensive data')
def run(self, interval=20): posts = kuzuha.search('', _filter=kuzuha.build_date_filter_by_range({'minutes': interval})) pairs = self.get_post_res_pairs(posts) for (parent, responses) in pairs.items(): if len(responses) >= 2: ome_posts = set() logger.info('MENTIONED POST: %s' % parent) for (lhs, rhs) in itertools.combinations(responses, 2): logger.info('Compare "%s" with "%s"' % (lhs, rhs)) if lhs and rhs and self.is_ome(lhs, rhs): logger.info('"%s" and "%s" are OME' % (lhs, rhs)) ome_posts |= {lhs, rhs} if len(ome_posts) > 1: num_posts = len(ome_posts) + 1 # childs + parent max_length = (body_length - num_posts*2) // num_posts parent = self.shorten(parent, max_length) message = '%s『%s』' % (PREFIX, parent) for ome_post in sorted(ome_posts): ome_post = self.shorten(ome_post, max_length) message += '「%s」' % ome_post message += HASH_TAG yield message
fname = wordlist with open(fname, 'r') as f: dorks = f.readlines() f.close() for dork in dorks: if len(dork)<2: continue try: rnd = random_int(2, 5) time.sleep(rnd) g = GoogleSearch("site:"+url+" "+dork, random_agent=True) g.results_per_page = 10 print("."), results = g.get_results() if len(results) > 0: msg = "[+] Found "+ results +" results with dork: "+dork logger.info(msg) for res in results: print res.title.encode('utf8') print res.url.encode("utf8") except SearchError, e: print "Search failed: %s" % e msg = "[+] Scan finished" logger.info(msg) def random_int(a, b): return(random.randint(a,b))
def main(): if output: handler = logging.FileHandler(output) handler.setLevel(logging.INFO) logger.addHandler(handler) logger.info('-----Start-----') if target and wordlist: if os.path.isfile(wordlist): msg = "File exist" logger.debug(msg) logger.info('[*] Starting dork scanner from'+ wordlist +' on '+ target) actions.scan_wordlist(target, wordlist) logger.info('[*] Scan completed') else: msg = "[-] ERROR: File not exist." logger.error(msg) sys.exit(1) else: logger.info('[*] Starting dork scanner on '+ target) actions.scan(target) logger.info('[*] Scan completed') if output: logger.info('[+] File log written: ' + output) logger.info('-----End-----\n')
else: path = os.path.join(dir_path, equipment.ip + '.cfg') with open(path, 'w') as _f: _f.write(config) elif args['tune']: for equipment in eqp_gen(ip_addrs): try: if args['<file>']: func, arg = json_config.Config.get_options, args['<file>'] else: config = json_config.Config(settings.settings_dir_path) func, arg = config.load_options, equipment.get_eqp_type() cmd = equipment.analyze_config(func(arg)) if not cmd: logger.info('%s - tune not required' % equipment.ip) sys.exit(0) except (json_config.ConfigException, dlink.DlinkInitException, dlink.DlinkConfigException) as exc: logger.critical(exc) sys.exit(1) if args['--dry-run']: print cmd else: conn = telnet.Telnet(equipment.ip, eqp_type=equipment.eqp_type) try: conn.login( settings.telnet_username, settings.telnet_password
def main(): if output: handler = logging.FileHandler(output) handler.setLevel(logging.INFO) logger.addHandler(handler) logger.info('-----Start-----') if target: if extensive: logger.info('[*] Starting extensive ip lookup on '+target) else: logger.info('[*] Starting ip lookup on '+target) actions.host_inspect(target, extensive) logger.info('[*] Ip Lookup completed') if reverse and not extensive: logger.info('[*] Starting reverse ip lookup on '+target) actions.reverse(target, False) logger.info('[*] Reverse ip lookup completed') if reverse and extensive: logger.info('[*] Starting Extensive reverse ip lookup on '+target) logger.warning('[*] This feature shows all domains pointing on same server but with different ip') actions.reverse(target, True) logger.info('[*] Extensive reverse ip lookup completed') if google: logger.info('[*] Starting search on '+target) actions.search(target) logger.info('[*] Search completed') if output: logger.info('[+] File log written: ' + output) logger.info('-----End-----\n')
def run_socketio_server(): logger.info("Socketio server run on host:{}, port:{}".format(config['SERVER_HOST'], config['SERVER_PORT'])) sio = init_sio() hybrid_server = socketio.Middleware(sio, app) eventlet_socket = eventlet.listen(('', config['SERVER_PORT'])) wsgi.server(eventlet_socket, hybrid_server)
def host_inspect(target, extensive): if core.is_valid_ip(target): msg = "Ip Validation OK" logger.debug(msg) msg = "[+] Valid ip" logger.info(msg) msg = "[*] Performing hostname conversion" logger.info(msg) try: value = core.get_host_by_ip(target) util.list_to_string(value) except: msg = "[-] ERROR: Cannot resolve hostname" logger.error(msg) elif core.is_valid_hostname(target): msg = "Host Validation OK" logger.debug(msg) msg = "[+] Valid host" logger.info(msg) msg = "[*] Performing ip conversion" logger.info(msg) try: value = core.get_host_by_name(target) util.list_to_string(value) except: msg = "[-] ERROR: Cannot resolve hostname" logger.error(msg) else: msg = "[-] ERROR: You must provide a valid target. Given: "+ target showhelp() logger.error(msg) sys.exit(1) db = GEOIPFILE geo = core.ip_to_country(core.get_ip(target), db) if geo: msg = "[+] The host is situated in "+geo logger.info(msg) else: msg = "[-] Cannot geolocalize the host" logger.warning(msg) if extensive: msg = "Extensive probing" logger.debug(msg) msg = "[*] Starting extensive information gathering" logger.info(msg) whois = core.get_extensive_data(target, 0) info = core.get_extensive_data(target, 1) dns = core.get_extensive_data(target, 2)