def make(self): """ 编译 :return: """ if not self.cmd: logger.error("文件不存在:%s" % self.make_file) return None logger.info("执行命令: \n%s" % self.cmd) logger.info("开始编译...") # 执行命令 msg_ = self.execute.execute(self.cmd) logger.debug(msg_) if "INFO: Building EXE from EXE-00.toc completed successfully." in msg_: logger.info("编译成功...") # 删除缓存文件 logger.info("开始清理缓存文件...") if exists(self.temp): shutil.rmtree(self.temp) logger.info("缓存清除成功...") else: logger.info("清除临时文件失败,文件不存在:%s" % self.temp)
def download(self, origin, index=1): import toml import os file_path = os.path.join('..', 'Toml', 'waypoint.yaml') key = "waypoint{}".format(index) with open(file_path) as f: wps = toml.loads(f.read()) count = len(wps) if index > count or index == 0: logger.error('index out of range when download Waypoints') return waypoints = wps[key] Trail = waypoints['Trail'] result = [origin] points = waypoints['points'] number = 0 for point in points: result.append(get_location_metres( result[number], point[0], point[1])) number += 1 self.publish('Waypoint', result[1:]) self.publish('WaypointID', 0) # self.publish('WaypointType', 'Download') logger.debug('Trail:{}\n Waypoints:{}'.format( Trail, self.subscribe('Waypoint'))) logger.info('Download complete')
def enable_output(self): """ Implement enable_output method for ToApi class. Build "SendCfsCommand" instruction with command code "TO_ENABLE_OUTPUT", search for a plugin to send out the instruction. @return bool: True if a plugin send out instruction successfully; otherwise False """ if self.command_args is None: log.error("Failed to enable output.") return False # ENHANCE - This could fail if TO command structure doesn't contain below fields. # Error will be reported, however, from the cfs_plugin. Maybe allow user to configure # args for TO. Maybe provide CTF "functions" JSON file that receives input in the test script. instruction = { "data": { "target": self.name, "cc": TO_ENABLE_OUTPUT, "args": { "cDestIp": self.local_ip, "usDestPort": self.local_port }, "mid": self.mid }, "instruction": "SendCfsCommand" } return Global.plugin_manager.find_plugin_for_command_and_execute( instruction)
def is_local_admin(): """ Returns True if the current process is run under admin privileges """ isAdmin = None if os.name in ("posix", "mac"): _ = os.geteuid() isAdmin = isinstance(_, (int, float, long)) and _ == 0 elif subprocess.mswindows: import ctypes _ = ctypes.windll.shell32.IsUserAnAdmin() isAdmin = isinstance(_, (int, float, long)) and _ == 1 else: errMsg = "keimpx is not able to check if you are running it " errMsg += "as an administrator account on this platform. " errMsg += "keimpx will assume that you are an administrator " errMsg += "which is mandatory for the requested attack " errMsg += "to work properly" logger.error(errMsg) isAdmin = True return isAdmin
def update(): if not os.path.exists(os.path.join(ROOTDIR, ".git")): msg = "[-] Not a git repository. Please checkout the repository from GitHub (e.g. git clone https://github.com/AeonDave/sir.git)" logger.error(msg) if PLATFORM == 'nt': msg = "[-] Please checkout the repository from GitHub with GitHub for Windows (e.g. https://windows.github.com)" logger.warning(msg) msg = "[*] Repository at https://github.com/AeonDave/sir.git" logger.info(msg) else: msg = "[*] Updating SIR from latest version from the GitHub Repository\n" logger.info(msg) Popen("git stash", shell=True, stdout=PIPE, stderr=PIPE) Popen("git stash drop", shell=True, stdout=PIPE, stderr=PIPE) process = Popen("git pull origin master", shell=True, stdout=PIPE, stderr=PIPE) process.communicate() success = not process.returncode if success: msg = "[+] Updated!\n" logger.info(msg) sys.exit(0) else: msg = "[-] Error!\n" logger.error(msg) sys.exit(1)
def set_targets(): targets = [] logger.info('Loading targets') if conf.target is not None: if '/' not in conf.target: logger.debug('Loading targets from command line') targets.append(add_target(conf.target)) else: address, mask = re.search(r"([\d.]+)/(\d+)", conf.target).groups() logger.debug('Expanding targets from command line') start_int = addr_to_int(address) & ~((1 << 32 - int(mask)) - 1) end_int = start_int | ((1 << 32 - int(mask)) - 1) for _ in range(start_int, end_int): targets.append(add_target(int_to_addr(_))) if conf.list is not None: logger.debug('Loading targets from file %s' % conf.list) parsed_targets = parse_targets_file(conf.list) if parsed_targets is not False: for target in parsed_targets: targets.append(target) unique_targets = [] for target in targets: if target not in unique_targets: unique_targets.append(target) if len(unique_targets) < 1: logger.error('No valid targets loaded') sys.exit(1) logger.info('Loaded %s unique target%s' % (len(targets), 's' if len(targets) > 1 else '')) return unique_targets
def download(self, filename, path=None): self.check_share() basename = os.path.basename(filename) if path is None: path = '.' else: path = path.replace('\\', '/') self.ls(basename, display=False) for identified_file, is_directory, size in self.completion: if is_directory > 0: self.downloadtree(identified_file) self.cd('..') continue filepath = ntpath.join(self.pwd, identified_file) logger.debug('Downloading file %s (%d bytes)..' % (filepath, size)) try: fh = open(os.path.join(path, identified_file), 'wb') self.smb.getFile(self.share, filepath, fh.write) fh.close() except SessionError as e: if e.getErrorCode() == nt_errors.STATUS_ACCESS_DENIED: logger.warn('Access denied to %s' % identified_file) elif e.getErrorCode() == nt_errors.STATUS_SHARING_VIOLATION: logger.warn('Access denied to %s due to share access flags' % identified_file) else: logger.error('Unable to download file: %s' % (e.getErrorString(),))
def update(): if not os.path.exists(os.path.join(ROOTDIR, ".git")): msg = "[-] Not a git repository. Please checkout the repository from GitHub (e.g. git clone https://github.com/AeonDave/tilt.git)" logger.error(msg) if PLATFORM == 'nt': msg = "[-] Please checkout the repository from GitHub with GitHub for Windows (e.g. https://windows.github.com)" logger.warning(msg) msg = "[*] Repository at https://github.com/AeonDave/tilt.git" logger.info(msg) else: msg = "[*] Updating Tilt from latest version from the GitHub Repository\n" logger.info(msg) Popen("git stash", shell=True, stdout=PIPE, stderr=PIPE) Popen("git stash drop", shell=True, stdout=PIPE, stderr=PIPE) process = Popen("git pull origin master", shell=True, stdout=PIPE, stderr=PIPE) process.communicate() success = not process.returncode if success: msg = "[+] Updated!\n" logger.info(msg) sys.exit(0) else: msg = "[-] Error!\n" logger.error(msg) sys.exit(1)
def check_event(self, app, id, msg=None, is_regex=False, msg_args=None): """Checks for an EVS event message in the telemetry packet history, assuming a particular structure for CFE_EVS_LongEventTlm_t. This can be generified in the future to determine the structure from the MID map. """ log.info("Checking event on {}".format(self.config.name)) if msg_args is not None and len(msg_args) > 0: try: msg = msg % literal_eval(msg_args) except Exception as e: log.error( "Failed to check Event ID {} in App {} with message: '{}' with msg_args = {}" .format(id, app, msg, msg_args)) log.debug(traceback.format_exc()) return False if not str(id).isnumeric(): id = self.resolve_macros(id) # TODO - Should use the mid_map and EVS event name to determine these... # These are the values that will be used to look through the telemetry packets # for the expected packet args = [{ "compare": "streq", "variable": "Payload.PacketID.AppName", "value": app }, { "compare": "==", "variable": "Payload.PacketID.EventID", "value": id }] result = self.cfs.check_tlm_value(self.cfs.evs_short_event_msg_mid, args, discard_old_packets=False) if result: log.info( "Received EVS_ShortEventTlm_t. Ignoring 'Message' field...") else: if msg: compare = "regex" if is_regex else "streq" args.append({ "compare": compare, "variable": "Payload.Message", "value": msg }) result = self.cfs.check_tlm_value( self.cfs.evs_long_event_msg_mid, args, discard_old_packets=False) else: log.warn( "No msg provided; any message for App {} and Event ID {} will be matched." .format(app, id)) result = self.cfs.check_tlm_value( self.cfs.evs_long_event_msg_mid, args, discard_old_packets=False) return result
def is_local_admin(): """ Returns True if the current process is run under admin privileges """ isAdmin = None if os.name in ('posix', 'mac'): _ = os.geteuid() isAdmin = isinstance(_, (int, float, long)) and _ == 0 elif sys.platform.lower() == 'win32': import ctypes _ = ctypes.windll.shell32.IsUserAnAdmin() isAdmin = isinstance(_, (int, float, long)) and _ == 1 else: errMsg = "keimpx is not able to check if you are running it " errMsg += "as an administrator account on this platform. " errMsg += "keimpx will assume that you are an administrator " errMsg += "which is mandatory for the requested attack " errMsg += "to work properly" logger.error(errMsg) isAdmin = True return isAdmin
def run_command_persistent(self, command, cwd="", prefix=":"): log.info("Remote persistent command: {}".format(command)) if not self.connection or not self.connection.is_connected: log.error("No connection to remote host.") return False # TODO Investigate ways to pipe the output back and capture it live instead of directing to /dev/null. # The interaction of fabric and nohup etc. limit our options pid_file = "/tmp/pid" cmd_str = "nohup sh -c '{command} & echo $! > {pid_file}' & 2>&1".format(command=command, pid_file=pid_file) with self.connection.cd(cwd): with self.connection.prefix(prefix): try: result = self.connection.run(cmd_str, hide=(not self.config.print_stdout), timeout=self.config.command_timeout) except (invoke.exceptions.UnexpectedExit, invoke.exceptions.CommandTimedOut) as e: result = invoke.Result(exited=e.result.exited) # TODO Investigate ways this could go wrong in various error conditions. There may be a more # reliable method to preserve the PID and/or avoid possibly picking up an old one. pid = self.connection.run('cat {pid_file} && > {pid_file}'.format(pid_file=pid_file)) if pid.exited == 0 and pid.stdout: self.last_pid = pid.stdout.rstrip() else: self.last_pid = None log.error("Unable to get PID of last command!") log.info("Remote {cmd} complete. Exit Code = {code}. PID = {pid}" .format(cmd=cmd_str, code=result.exited, pid=self.last_pid)) self.last_result = result return result.exited == 0
def read_sb_packets(self): """ read_sb_packets() is responsible for receiving packets coming from the CFS application that is being tested and placing them in a dictionary of lists that is ordered by mids as shown below. received_mid_packets_dic = { "mid1": ["The last packet received with mid1"], "mid2": ["The last packet received with mid2"] } } """ while True: # Read from the socket until no more data available try: recvd = bytearray(self.telemetry.read_socket()) if len(recvd) <= 0: break # Pull the primary header from the received data try: pheader = self.ccsds.CcsdsPrimaryHeader.from_buffer( recvd[0:self.pheader_offset]) except ValueError: log.error("Cannot create CCSDS Primary Header") continue # If the packet is a command packet it is handled differently if pheader.is_command(): self.parse_command_packet(recvd) else: self.parse_telemetry_packet(recvd) except socket.timeout: log.warning( "No telemetry received from CFS. Socket timeout...") break
def crawl(user: User): account: AmazonAccount = pick_amazon_account(user) user_cache_root: Path = CACHE_ROOT / str(user.id) current_cache_root: Path = user_cache_root / current_date_str() for root in [user_cache_root, current_cache_root]: if not root.exists(): root.mkdir() driver_builder = DriverBuilder() driver = driver_builder.get_driver(headless=False) try: driver.get(BASE_URL) login(driver, account=account) iframe = pick_iframe(driver) driver.switch_to_frame(iframe) wait_book_container(driver) cache_file = current_cache_root / "cache.html" with cache_file.open("w") as f: f.write(driver.page_source) except Exception as e: logger.error(type(e)) logger.error(e) finally: driver.close() driver.quit() logger.info("close & quit driver")
def get_file(self, remote_path, local_path, args=None, name="default"): log.debug("SshPlugin.get_file") if name not in self.targets: log.error("No Execution target named {}".format(name)) return False return self.targets[name].get_file(remote_path, local_path, args)
def run(self): while True: if self.work_queue.empty() and self.vehicle.isArmed(): self.vehicle._brake() time.sleep(.01) continue message = self.work_queue.get().split('#') try: _timestamp = float(message[0]) command = message[1].strip() except Exception as e: logger.error(e) continue timeout = time.time() - _timestamp if timeout > 1.5: logger.debug('Timestamp is invalid timeout:{}'.format(timeout)) continue if command is '': continue command = "self." + command logger.debug('Execute command {}'.format(command)) try: eval(command) self.work_queue.task_done() except Exception as e: logger.error(e)
def lib_send_sms_message(message): parse_message = json.loads(message) sms_from = parse_message['fromUser'] sms_to = parse_message['toUser'] sms_message = parse_message['messageBody'] + config['SMS_SUFFIX'] sms_data = { 'username': config['SMS_USERNAME'], 'password_md5': hashlib.md5(config['SMS_PASSWORD']).hexdigest(), 'apikey': config['SMS_API_KEY'], 'encode': 'UTF-8', 'mobile': sms_to, 'content': sms_message + '【Drchat: ' + sms_from + '】', } res = requests.post(config['SMS_API_URL'], data=sms_data) if res.status_code != 200: logger.error('Cannot connect to sms platform: {}'.format( config['SMS_API_URL'])) return False if res.text.split(':')[0] != 'success': logger.error('Send sms error: {}'.format(res.text)) return False logger.debug('Send sms to {} success!'.format(sms_to)) return True
def deploy(config_file, environment, version): """ The only function you want to call from main(). A nice cozy wrapper for the deployment operations, nothing more, nothing less. Args: config_file (str): path to the configuration file environment (str): the environment you're about to deploy Raises: PipelineError: something when wrong during this deployment, please file a bug """ try: config = read_configuration(config_file=config_file, environment='deploy') config = config[environment] project_name = os.environ['CI_PROJECT_NAME'] except KeyError as error: msg = "missing key in configuration! {0}".format(error) logger.error(msg) raise PipelineError(msg) kerberos.check(config) image_name = docker_image_name(environment) create_docker_configuration(config) dockerfile = dockerfile_path(config) create_docker_image(image_name, dockerfile) execute_deployment(config, image_name, version) mark_deployment_as_done(config, project_name)
def main(): if output: handler = logging.FileHandler(output) handler.setLevel(logging.INFO) logger.addHandler(handler) logger.info('-----Start-----') if target and wordlist: if os.path.isfile(wordlist): msg = "File exist" logger.debug(msg) logger.info('[*] Starting dork scanner from'+ wordlist +' on '+ target) actions.scan_wordlist(target, wordlist) logger.info('[*] Scan completed') else: msg = "[-] ERROR: File not exist." logger.error(msg) sys.exit(1) else: logger.info('[*] Starting dork scanner on '+ target) actions.scan(target) logger.info('[*] Scan completed') if output: logger.info('[+] File log written: ' + output) logger.info('-----End-----\n')
def lib_user_login(username, password): hash_key = 'user' response = {} try: redis_client = redis.StrictRedis.from_url(config['REDIS_LOCAL_URL']) redis_data = redis_client.hget(hash_key, username) except: logger.error("ERROR! Cannot connect to {}".format(config['REDIS_LOCAL_URL'])) response['status'] = 'err' response['data'] = "连接数据库错误!" return response if redis_data is None: logger.warning("WARNING! User {} not exist".format(username)) response['status'] = 'err' response['data'] = "该用户尚未注册!" return response user_data = json.loads(redis_data.decode('utf-8')) if user_data['password'] != password: logger.debug("User {} password is not correct".format(username)) response['status'] = 'err' response['data'] = "密码错误!" else: logger.debug("User {} login success".format(username)) response['status'] = 'ok' response['data'] = {"username": username} return response
def run_command_local(self, command, name="default"): log.debug("SshPlugin.run_command_local") if name not in self.targets: log.error("No Execution target named {}".format(name)) return False return self.targets[name].run_command_local(command)
def resolve(name): ip = core.get_ip_by_name(name) msg = ip if msg: logger.info('[+] Resolved! IP: ' + msg) else: logger.error('[-] Error: Impossible to resolve ' + name)
def lib_user_register(username, password): hash_key = 'user' response = {} try: redis_client = redis.StrictRedis.from_url(config['REDIS_REMOTE_URL']) redis_data = redis_client.hget(hash_key, username) except: logger.error("ERROR! Cannot connect to {}".format(config['REDIS_REMOTE_URL'])) response['status'] = 'err' response['data'] = "连接数据库错误!" return response if redis_data: logger.warning("WARNING! User {} already exist".format(username)) response['status'] = 'err' response['data'] = "用户已被注册!" return response save_data = { 'username' : username, 'password' : password, } redis_client.hset(hash_key, username, json.dumps(save_data)) logger.debug("Success register user {}".format(username)) response['status'] = 'ok' response['data'] = { "username" : username } return response
def search_phone(self, phone_num): # 解析url,获取响应 item = {} url = self.temp_url.format(phone_num) r = requests.get(url, headers=self.headers) html_str = r.content.decode('gbk') try: html = etree.HTML(html_str) number = re.search( r'http://tools.2345.com/frame/black/result/(\d*)', url, re.S).group(1) item['phone'] = number li_list = html.xpath("//ul[@class='ulInforList']/li") if len(li_list) > 0: for li in li_list: biaoshi = li.xpath("//span[@class='sStyle']/text()") biaoshi = list(set(biaoshi)) item['tag_type'] = biaoshi logger.info('爬取出来的标识是%s' % item) else: item = {} logger.info('电话号码%s的标识为空' % number) return item except RequestException as e: with open(self.path, 'r') as f: user_id = f.read() logger.error('userid is %s ,request fail %s' % (user_id, e))
def download_ftp(self, host, remote_path, local_path, name="default"): log.debug("SshPlugin.download_ftp") if name not in self.targets: log.error("No Execution target named {}".format(name)) return False return self.targets[name].download_ftp(host, remote_path, local_path)
def build_cfs(self): log.info("Building Remote CFS") build_out_file = os.path.join( "/tmp", "{}_build_cfs_output.txt".format(self.config.name)) build_command = "{} 2>&1 | tee {}".format(self.config.cfs_build_cmd, build_out_file) build_success = self.execution_controller.run_command( build_command, cwd=self.config.cfs_build_dir) log.debug("Build process completed") Global.time_manager.wait_seconds(1) stdout_final_path = os.path.join(Global.current_script_log_dir, os.path.basename(build_out_file)) if not os.path.exists(stdout_final_path): if not self.execution_controller.get_file( build_out_file, stdout_final_path, {'delete': True}): log.warn( "Cannot move CFS build output file to script log directory." ) if self.execution_controller.last_result: log.debug( self.execution_controller.last_result.stdout.strip()) if not build_success: log.error("Failed to build Remote CFS!") return build_success
def on_message(client, userdata, msg): # logger.debug('Received {} from Keyboard'.format(msg.payload)) stub = userdata['stub'] vehicle = userdata['vehicle'] command = map(int, msg.payload.split(',')) if oa.STOP in command: # print 'brake' client.publish('ACK', 'ack') vehicle.brake() return userdata['semi_id'] = userdata['semi_id'] + 1 message = {'id': userdata['semi_id'], 'actions': command} logger.debug('Send {} to Lidar'.format(message)) try: id, actions = stub.SemiAuto(message) logger.debug('Received id:{} actions:{} from Lidar'.format( id, actions)) if id != userdata['semi_id']: logger.error('ID not match.Note:ExceptID:{} ReceiveID:{}'.format( userdata['semi_id'], id)) return exe_actions(vehicle, actions) except grpc.RpcError, e: logger.critical(e)
def remove_tlm_condition(self, v_id): verification = next( (ids[v_id] for ids in self.tlm_verifications_by_mid_and_vid.values() if v_id in ids), None) if not verification: if self.config.remove_continuous_on_fail: log.error( "Condition with id {} is not registered! It may have failed earlier in the test." .format(v_id)) else: log.error( "Condition with id {} is not registered! Check your test instructions." .format(v_id)) return False log.info("Continuous Telemetry Check {} on {}:".format( v_id, self.output_manager.name)) log.info("Number times Passed: {}".format( verification.pass_count)) log.info("Number times Failed: {}".format( verification.fail_count)) self.tlm_verifications_by_mid_and_vid[ verification.condition.mid['MID']].pop(v_id) return True
def run_script(self, status_manager): """ Execute a complete test script, updating the status_manager as needed. """ script_status = StatusDefs.active # Set script as active status_manager.update_script_status(script_status, "") try: self.num_tests = len(self.tests) self.log_test_header() self.start_time = time.time() for test in self.tests: if self.verify_timeout: test.ctf_verification_timeout = self.verify_timeout test_status = test.run_test(status_manager) if test_status == StatusDefs.aborted: log.error("Aborted Test Script: {}".format( self.input_file)) break self.exec_time = time.time() - self.start_time self.generate_test_results() except Exception as exception: script_status = StatusDefs.error status_manager.update_script_status(script_status, "Error") raise CtfTestError("Error in run_script") from exception
def set_credentials(): credentials = [] logger.info('Loading credentials') if conf.user is not None: logger.debug('Loading credentials from command line') credentials.append( add_credentials(conf.user, conf.password or '', conf.lmhash or '', conf.nthash or '', conf.domain or '')) if conf.credsfile is not None: logger.debug('Loading credentials from file %s' % conf.credsfile) parsed_credentials = parse_credentials_file(conf.credsfile) for credential in parsed_credentials: if credential is not None: credentials.append(credential) unique_credentials = [] for credential in credentials: if credential not in unique_credentials: unique_credentials.append(credential) if len(unique_credentials) < 1: logger.error('No valid credentials loaded') sys.exit(1) logger.info('Loaded %s unique credential%s' % (len(credentials), 's' if len(credentials) > 1 else '')) return unique_credentials
def lib_send_sms_message(message): parse_message = json.loads(message) sms_from = parse_message["fromUser"] sms_to = parse_message["toUser"] sms_message = parse_message["messageBody"] + config["SMS_SUFFIX"] sms_data = { "username": config["SMS_USERNAME"], "password_md5": hashlib.md5(config["SMS_PASSWORD"]).hexdigest(), "apikey": config["SMS_API_KEY"], "encode": "UTF-8", "mobile": sms_to, "content": sms_message + "【Drchat: " + sms_from + "】", } res = requests.post(config["SMS_API_URL"], data=sms_data) if res.status_code != 200: logger.error("Cannot connect to sms platform: {}".format(config["SMS_API_URL"])) return False if res.text.split(":")[0] != "success": logger.error("Send sms error: {}".format(res.text)) return False logger.debug("Send sms to {} success!".format(sms_to)) return True
def sanitize_args(self, args): """ Iterates over arguments within test instructions and decodes arguments if needed. """ if args is None: return None try: # args may be a dictionary {'expectedCmdCnt': 1, 'expectedErrCnt': 0} or # a list [{'expectedCmdCnt': 1, 'expectedErrCnt': 0}] if isinstance(args, dict): for key, value in args.items(): if isinstance(value, bytes): args[key] = value.decode() elif isinstance(args, list): for index, arg in enumerate(args): if isinstance(arg, dict): for key in arg: value = arg[key] if isinstance(value, bytes): arg[key] = value.decode() else: if isinstance(arg, bytes): args[index] = arg.decode() return args except (TypeError, UnicodeDecodeError) as exception: log.error("Cannot decode arguments in {}. Ensure command arguments are formatted as follows\n" "[1, 2, 3] or \n" "[{{\'arg1\': 1, \'arg2\': 2, \'arg3\': 3}}]".format(self.input_script_path)) log.debug(exception) return None
def __del__(self): if self.summary_file: try: self.summary_file.close() except Exception as e: log.error("Failed to write CTF results summary file!") log.error(e)
def initialize(self): log.debug("Initializing CfsController") self.process_ccsds_files() ccsds = import_ccsds_header_types() if not (ccsds and ccsds.CcsdsPrimaryHeader and ccsds.CcsdsCommand and ccsds.CcsdsTelemetry): log.error("Unable to import required CCSDS header types") return False log.info("Starting Local CFS Interface") command = CommandInterface(ccsds, self.config.cmd_udp_port, self.config.cfs_target_ip, self.config.endianess_of_target) telemetry = TlmListener(self.config.ctf_ip, self.config.tlm_udp_port) self.cfs = LocalCfsInterface(self.config, telemetry, command, self.mid_map, ccsds) result = self.cfs.init_passed if not result: log.error("Failed to initialize LocalCfsInterface") else: if self.config.start_cfs_on_init and not self.cfs_running: result = self.start_cfs("") else: log.warn( "Not starting CFS executable... Expecting \"StartCfs\" in test script..." ) if result: log.info("CfsController Initialized") return result
def openPipe(self, s, tid, pipe, accessMask): pipeReady = False tries = 50 while pipeReady is False and tries > 0: try: self.__smb.waitNamedPipe(tid, pipe) pipeReady = True except Exception as e: # traceback.print_exc() logger.error('Named pipe open error: %s' % str(e)) tries -= 1 time.sleep(2) if tries == 0: logger.error('Named pipe not ready, aborting') raise fid = self.__smb.openFile(tid, pipe, accessMask, creationOption=0x40, fileAttributes=0x80) return fid
def check_output(self, output_contains=None, output_does_not_contain=None, exit_code=0, name="default"): log.debug("SshPlugin.check_output") if name not in self.targets: log.error("No Execution target named {}".format(name)) return False return self.targets[name].check_output(output_contains, output_does_not_contain, exit_code)
async def sync_feature(self, continue_task=True): now = datetime.datetime.now() logger.info("start sync feature at: {}".format(now)) next_time = now + datetime.timedelta( seconds=g_conf_parameter.sync_time_interval) try: await self._feature_processor.start() except Exception: logger.error(traceback.format_exc()) if g_conf_parameter.sync_run_once == 1: logger.info("only run once so complete") else: tmp = datetime.datetime.now() if tmp > next_time: _interval = 0 else: _interval = (next_time - tmp).seconds logger.info("sleep {} seconds for next schedule".format(_interval)) self._event_loop.call_later(_interval, self.start_service) end = datetime.datetime.now() logger.info("end sync feature at: {}, cost:{}s".format( end, round((end - now).total_seconds(), 3)))
def run_script(self, status_manager): script_status = StatusDefs.active script_details = "" # Set script as active status_manager.update_script_status(script_status, "") try: self.num_tests = len(self.tests) self.logTestProlog() self.start_time = time.time() for test in self.tests: if self.verify_timeout: test.ctf_verification_timeout = self.verify_timeout test_status = test.run_test(status_manager) if test_status == StatusDefs.aborted: log.error("Aborted Test Script: {}".format( self.input_file)) break self.time_taken = time.time() - self.start_time self.generateTestResults() except Exception as e: script_status = StatusDefs.error status_manager.update_script_status(script_status, "") raise
def update_ghdb(): global retry msg = "Starting ghdb update" logger.debug(msg) msg = "[*] Updating Database" logger.info(msg) try: fname = settings.WORDLISTFILE with open(fname, 'r') as f: content = f.readlines() f.close() num = len(content)+1 while True: dork = source.get_dork_from_exploit_db(num) if dork: retry = 0 with codecs.open(fname, 'a', "utf-8") as f: f.write(dork+"\n") f.close() msg = "[+] Loaded " + dork logger.info(msg) else: check = source.check_exploit_db(num) if check: cont = 0 while(cont < check): with codecs.open(fname, 'a', "utf-8") as f: space = " " f.write(space+"\n") f.close() cont +=1 num += check -1 else: break num += 1 msg = "Database update ok" logger.debug(msg) msg = "[+] Database is up to date" logger.info(msg) sys.exit(1) except SystemExit: msg = "End update" logger.debug(msg) except: retry +=1 msg = "Database update error" logger.debug(msg) msg = "[-] ERROR: Database update error" logger.error(msg) if (retry<3): msg = "[*] Retrying update" logger.info(msg) update_ghdb() else: msg = "[-] CRITICAL ERROR: Maybe Exploit-db or network is donwn" logger.error(msg) sys.exit(1)
def get_html_from_url(url): user_agent = {'User-agent': 'Mozilla/5.0'} try: r = requests.get(url, headers = user_agent) return r.content except: logger.error('[-] Error: Host responded badly') return False
def single_mode(self): # TODO: 多线程什么鬼的..要改要改 result = exploit.run('dns_resolve', target=self.target) if not result.get('status'): logger.error('Something wrong, do you want to continue?[y/N]:') if not raw_input().lower() == 'y': logger.critical('User abort, quit.') return if result.get('result').get('is_cdn'): logger.warn('Target is using CDN, port scan skipped.')
def eqp_gen(arg): """ Генератор инстансов класса Dlink """ for _ip in arg: try: ping.ping(_ip) except ping.PingException as _exc: logger.error(_exc) else: yield dlink.Dlink(_ip, **settings.__dict__)
def lib_send_redis_message(message, save_flag=True): try: # connect to the redis queue redis = socketio.RedisManager(config['REDIS_REMOTE_URL'], channel=config['SOCKET_IO_CHANNEL'], write_only=True) # emit an event redis.emit('msg', data=message, namespace=config['SOCKET_IO_NAMESPACE']) except: logger.error("ERROR! Cannot connect to {}".format(config['REDIS_REMOTE_URL'])) return False if(save_flag): lib_save_message_history(message)
def get_html_from_url(value): req = urllib2.Request(value) req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0') req.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8') req.add_header('Connection', 'keep-alive') try: rawdata = urllib2.urlopen(req) return rawdata.read() except: logger.error('[-] Error: Invalid host given')
def get_html_from_url(url, name): values = {'username' : name, 'submit' : ''} data = urllib.urlencode(values) req = urllib2.Request(url, data) req.add_header('User-Agent', 'Mozilla/5.0 (Windows NT 6.3; WOW64; rv:28.0) Gecko/20100101 Firefox/28.0') req.add_header('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8') req.add_header('Connection', 'keep-alive') try: response = urllib2.urlopen(req) return response.read() except: logger.error('[-] Error')
def lib_delete_message_list(from_user, to_user): redis_key = 'msglist:{}'.format(from_user) response = {} try: redis_client = redis.StrictRedis.from_url(config['REDIS_REMOTE_URL']) redis_client.hdel(redis_key, to_user) except: logger.error("ERROR! Cannot connect to {}".format(config['REDIS_LOCAL_URL'])) response['status'] = 'err' response['data'] = "连接数据库错误!" return response response['status'] = 'ok' return response
def scan(target): if core.is_valid_url(target): msg = "Host Validation OK" logger.debug(msg) msg = "[+] Url Accepted" logger.info(msg) msg = "[*] Performing scan" logger.info(msg) try: core.scan(target, settings.WORDLISTFILE) except Exception,e: print str(e) msg = "[-] ERROR" logger.error(msg)
def __init__(self): logger.warning('Configuration: ' + str(settings.SERVER)) self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) try: self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.socket.setblocking(False) self.socket.bind((settings.SERVER.HOST, settings.SERVER.PORT)) self.socket.listen(settings.SERVER.MAX_CONNECTION) logger.warning("Server is ready!") except Exception as err: logger.error(err)
def lib_get_message_history(from_user, to_user): redis_key = 'msghistory:{}:{}'.format(from_user, to_user) response = {} try: redis_client = redis.StrictRedis.from_url(config['REDIS_LOCAL_URL']) redis_data = redis_client.lrange(redis_key, 0, config['REDIS_HISTORY_LONG']) except: logger.error("ERROR! Cannot connect to {}".format(config['REDIS_LOCAL_URL'])) response['status'] = 'err' response['data'] = "连接数据库错误!" return response response['status'] = 'ok' response['data'] = [ el.decode('utf-8') for el in redis_data ] return response
def lib_get_contact(username): hash_key = 'contact:' + username response = {} try: redis_client = redis.StrictRedis.from_url(config['REDIS_LOCAL_URL']) redis_data = redis_client.hvals(hash_key) except: logger.error("ERROR! Cannot connect to {}".format(config['REDIS_LOCAL_URL'])) response['status'] = 'err' response['data'] = "连接数据库错误!" return response response['status'] = 'ok' response['data'] = [ json.loads(el.decode('utf-8')) for el in redis_data ] return response
def lib_get_message_list(from_user): redis_key = 'msglist:{}'.format(from_user) response = {} try: redis_client = redis.StrictRedis.from_url(config['REDIS_LOCAL_URL']) redis_data = redis_client.hvals(redis_key) except: logger.error("ERROR! Cannot connect to {}".format(config['REDIS_LOCAL_URL'])) response['status'] = 'err' response['data'] = "连接数据库错误!" return response response['status'] = 'ok' response['data'] = [json.loads(el.decode('utf-8')) for el in redis_data] return response
def lib_delete_contact(username, contact_username): hash_key = 'contact:' + username response = {} try: redis_client = redis.StrictRedis.from_url(config['REDIS_REMOTE_URL']) redis_client.hdel(hash_key, contact_username) except: logger.error("ERROR! Cannot connect to {}".format(config['REDIS_REMOTE_URL'])) response['status'] = 'err' response['data'] = "连接数据库错误!" return response logger.debug("Success delete contact contact:{} {}".format(contact_username, username)) response['status'] = 'ok' return response
def reverse(target, extensive): msg = "Reverse probing" logger.debug(msg) hosts = core.get_reversed_hosts(target, extensive) if len(hosts)>0: if len(hosts)==1: msg = "[+] "+str(len(hosts))+" Domain found" logger.info(msg) for host in hosts: logger.info(host) else: msg = "[+] "+str(len(hosts))+" Domains found" logger.info(msg) for host in hosts: logger.info(host) else: msg = "[-] No Domains found" logger.error(msg)
def scan_wordlist(target, wordlist): if core.is_valid_url(target): msg = "Host Validation OK" logger.debug(msg) msg = "[+] Url Accepted" logger.info(msg) msg = "[*] Performing scan" logger.info(msg) try: core.scan(target, wordlist) except: msg = "[-] ERROR" logger.error(msg) else: msg = "[-] ERROR: You must provide a valid target. Given: "+ target showhelp() logger.error(msg) sys.exit(1)
def lib_add_contact(username, contact_username, contact_nickname): hash_key = 'contact:' + username response = {} params = { 'username' : contact_username, 'nickname' : contact_nickname } try: redis_client = redis.StrictRedis.from_url(config['REDIS_REMOTE_URL']) redis_client.hset(hash_key, contact_username, json.dumps(params)) except: logger.error("ERROR! Cannot connect to {}".format(config['REDIS_REMOTE_URL'])) response['status'] = 'err' response['data'] = "连接数据库错误!" return response logger.debug("Success add contact {} to user {}".format(contact_username, username)) response['status'] = 'ok' return response
def get_from_who_is(value, type): whois='http://who.is/whois/' info='http://who.is/website-information/' dns='http://who.is/dns/' if type == 0: url=whois if type == 1: url=info if type == 2: url=dns rawdata = core.get_html_from_url(url+value) if rawdata: parser = BeautifulSoup(rawdata) blocks = parser.find_all('div','domain-data') for block in blocks: title = block.header.h5.get_text() table = block.table if table: logger.info('-----'+title.strip()+'-----') rows = table.find_all('tr') for row in rows: descriptions = row.find_all('th') datas = row.find_all('td') value='' for description in descriptions: if description.get_text().strip(): value = value + '-' + description.get_text().strip() if value: logger.info(value) value='' for data in datas: if data.get_text().strip(): value = value + ' ' + data.get_text().strip() if value: logger.info(value) else: logger.error('[-] Error: Invalid host given for extensive data')
def run(self): try: while self.keepRunning: time.sleep(0.1) try: conn = self.socket.accept()[0] except Exception as err: continue users.create_user(conn) except KeyboardInterrupt: logger.warning("Press CTRL+C for exit!") except Exception as err: logger.error(err) raise err self.socket.close() logger.warning("Server is closed.")
def lib_save_message_history(message): parse_message = json.loads(message) from_user = parse_message['fromUser'] to_user = parse_message['toUser'] # 保存两份历史消息, 一份自己用, 一份对方用 redis_key_local_history = 'msghistory:{}:{}'.format(from_user, to_user) redis_key_remote_history = 'msghistory:{}:{}'.format(to_user, from_user) # 保存两份消息列表 redis_key_local_msglist = 'msglist:{}'.format(from_user) redis_key_remote_msglist = 'msglist:{}'.format(to_user) try: redis_client = redis.StrictRedis.from_url(config['REDIS_REMOTE_URL']) # 使用 redis 的 mutli 操作 redis_pipeline = redis_client.pipeline() # 保存本地消息 redis_pipeline.lpush(redis_key_local_history, message) redis_pipeline.ltrim(redis_key_local_history, 0, config['REDIS_HISTORY_LONG']) # 保存远端消息 redis_pipeline.lpush(redis_key_remote_history, message) redis_pipeline.ltrim(redis_key_remote_history, 0, config['REDIS_HISTORY_LONG']) # 保存本地消息列表 redis_pipeline.hset(redis_key_local_msglist, to_user, message) # 保存远端消息列表 redis_pipeline.hset(redis_key_remote_msglist, from_user, message) # 执行所有操作 redis_pipeline.execute() except: logger.error("ERROR! Cannot connect to {}".format(config['REDIS_REMOTE_URL'])) return False
def host_inspect(target, extensive): if core.is_valid_ip(target): msg = "Ip Validation OK" logger.debug(msg) msg = "[+] Valid ip" logger.info(msg) msg = "[*] Performing hostname conversion" logger.info(msg) try: value = core.get_host_by_ip(target) util.list_to_string(value) except: msg = "[-] ERROR: Cannot resolve hostname" logger.error(msg) elif core.is_valid_hostname(target): msg = "Host Validation OK" logger.debug(msg) msg = "[+] Valid host" logger.info(msg) msg = "[*] Performing ip conversion" logger.info(msg) try: value = core.get_host_by_name(target) util.list_to_string(value) except: msg = "[-] ERROR: Cannot resolve hostname" logger.error(msg) else: msg = "[-] ERROR: You must provide a valid target. Given: "+ target showhelp() logger.error(msg) sys.exit(1) db = GEOIPFILE geo = core.ip_to_country(core.get_ip(target), db) if geo: msg = "[+] The host is situated in "+geo logger.info(msg) else: msg = "[-] Cannot geolocalize the host" logger.warning(msg) if extensive: msg = "Extensive probing" logger.debug(msg) msg = "[*] Starting extensive information gathering" logger.info(msg) whois = core.get_extensive_data(target, 0) info = core.get_extensive_data(target, 1) dns = core.get_extensive_data(target, 2)
elif opt in ('-e', '--extensive'): extensive = True elif opt in ('-g', '--google'): google = True elif opt in ('-o', '--output'): output = arg else: actions.header() actions.showhelp() sys.exit(1) if not target: actions.header() actions.showhelp() msg = "[-] ERROR: You must provide a target." logger.error(msg) sys.exit(1) if google and reverse: msg = "[-] Cannot do reverse ip lookup and google search togheter!" logger.error(msg) sys.exit(1) def main(): if output: handler = logging.FileHandler(output) handler.setLevel(logging.INFO) logger.addHandler(handler) logger.info('-----Start-----') if target:
def search(value): msg = "Search probing" logger.debug(msg) msg = "[-] Not Implemented Yet" logger.error(msg)