def dump_bucket(self, dry_run=False): """ Create download workers Fetch and organize a list of Objects to be downloaded pass to workers """ logger.info("Downloading bucket...") self._dry_run = dry_run _download_workers = [ multiprocessing.Process(target=self._download_file) for _ in range(self._max_workers) ] [w.start() for w in _download_workers] _objects = self._s3_client.list_objects() for _obj in _objects: logger.debug(f"Checking if '{_obj.key}' is downloaded") if not self._is_file_exists(_obj.key): self._download_queue.put(_obj.key) self._files_to_be_downloaded.release() # Telling workers to stop for _ in range(0, self._max_workers): self._download_queue.put(None) self._files_to_be_downloaded.release() # Joining workers [w.join() for w in _download_workers] logger.info("Bucket downloaded")
def run_rules(conf): data = rds.get_scan_data() exclusions = rds.get_exclusions() if not data: return for ip, values in data.items(): rules = rule_manager(role='attacker') if 'ports' in values and len(values['ports']) > 0: for port in values['ports']: logger.info('Attacking Asset: {} on port: {}'.format(ip, port)) for rule in rules.values(): """ Check if the target is in exclusions list, if it is, skip. """ if rule.rule in exclusions and ip in exclusions[rule.rule]: logger.debug('Skipping rule {} for target {}'.format( rule.rule, ip)) continue """ Only run rules that are in the allowed_aggressive config level. """ if conf['config']['allow_aggressive'] >= rule.intensity: thread = threading.Thread(target=rule.check_rule, args=(ip, port, values, conf)) thread.start()
def calc_md5sum(rel_file_path): """ Calculate md5sum Read chunk by chunk to reduce memory """ logger.debug("Calculating md5sum for `{}`".format(rel_file_path)) hasher = hashlib.md5() with open(rel_file_path, 'rb') as afile: buf = afile.read(configs.READ_CHUNK_SIZE) while len(buf) > 0: hasher.update(buf) buf = afile.read(configs.READ_CHUNK_SIZE) return '{}'.format(hasher.hexdigest())
def socket_banner(self, ip, port): sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) socket_banner = None sock.settimeout(6) try: result = sock.connect_ex((ip, port)) if result == 0: socket_banner = str(sock.recv(1024)) except Exception as e: logger.debug('socket_open banner {} {} {}'.format(ip, port, e)) finally: sock.close() return socket_banner
def scanner(): utils = Utils() scanner = Scanner() logger.info('Scanner process started') while True: if not rds.is_session_active(): time.sleep(10) continue conf = rds.get_scan_config() if not conf: time.sleep(10) continue hosts = rds.get_ips_to_scan( limit=conf['config']['scan_opts']['parallel_scan']) if hosts: conf = rds.get_scan_config() scan_data = scanner.scan( hosts, max_ports=conf['config']['scan_opts']['max_ports'], custom_ports=conf['config']['scan_opts']['custom_ports'], interface=conf['config']['scan_opts']['interface']) if scan_data: for host, values in scan_data.items(): if 'ports' in values and values['ports']: logger.info('Discovered Asset: {}'.format(host)) logger.debug('Host: {}, Open Ports: {}'.format( host, values['ports'])) rds.store_topology(host) rds.store_sca(host, values) rds.store_inv(host, values) else: if values['status_reason'] == 'echo-reply': logger.info('Discovered Asset: {}'.format(host)) rds.store_topology(host)
def scanner(): scanner = Scanner() logger.info('Scanner process started') while True: if not rds.is_session_active(): time.sleep(10) continue conf = rds.get_scan_config() if not conf: time.sleep(10) continue c = ConfParser(conf) hosts = rds.get_ips_to_scan(limit=c.get_cfg_scan_threads()) if hosts: conf = rds.get_scan_config() scan_data = scanner.scan(hosts, max_ports=c.get_cfg_max_ports(), custom_ports=c.get_cfg_custom_ports(), interface=c.get_cfg_netinterface()) if scan_data: for host, values in scan_data.items(): if 'ports' in values and values['ports']: logger.info('Discovered Asset: {}'.format(host)) logger.debug('Host: {}, Open Ports: {}'.format( host, values['ports'])) rds.store_topology(host) rds.store_sca(host, values) rds.store_inv(host, values) else: if values['status_reason'] == 'echo-reply': logger.info('Discovered Asset: {}'.format(host)) rds.store_topology(host)
def get_scan_data(self): kv = {} ip_key = None for k in self.r.scan_iter(match="sca_*"): ip_key = k.decode('utf-8') break # only get one key if ip_key: data = self.r.get(ip_key) if data: try: result = pickle.loads(data) if result: ip = ip_key.split('_')[1] kv[ip] = result self.r.delete(ip_key) except pickle.UnpicklingError as e: logger.error('Error unpickling %s' % e) logger.debug('IP Key: %s' % ip_key) return kv
def attacker(): count = 0 logger.info('Attacker process started') while True: conf = rds.get_scan_config() if not conf: time.sleep(10) continue run_rules(conf) count += 1 if count == conf['config']['scan_opts']['parallel_attack']: time.sleep(30) count = 0 if threading.active_count() > 50: logger.debug( 'Sleeping for 30 seconds to control threads (Threads: {})'. format(threading.active_count())) time.sleep(30)
def http_request(self, ip, port, method="GET", params=None, data=None, json=None, headers=None, follow_redirects=True, timeout=None, uri='/'): resp = None if headers: self.headers = {**headers, **self.headers} if method not in ('GET', 'POST', 'OPTIONS', 'PUT', 'DELETE', 'HEAD'): logger.error('HTTP Method is not supported.') return if not timeout: timeout = self.global_timeout url = 'http://{}:{}{}'.format(ip, port, uri) if port == 443 or port == 8443 or '443' in str(port): url = 'https://{}:{}{}'.format(ip, port, uri) try: if method == 'GET': resp = requests.get(url, verify=False, timeout=timeout, params=params, allow_redirects=follow_redirects, headers=self.headers) elif method == 'PUT': resp = requests.put(url, verify=False, timeout=timeout, params=params, data=data, json=json, allow_redirects=follow_redirects, headers=self.headers) elif method == 'POST': resp = requests.post(url, verify=False, timeout=timeout, params=params, data=data, json=json, allow_redirects=follow_redirects, headers=self.headers) elif method == 'OPTIONS': resp = requests.options(url, verify=False, timeout=timeout, params=params, allow_redirects=follow_redirects, headers=self.headers) elif method == 'DELETE': resp = requests.delete(url, verify=False, timeout=timeout, params=params, data=data, json=json, allow_redirects=follow_redirects, headers=self.headers) elif method == 'HEAD': resp = requests.head(url, verify=False, timeout=timeout, params=params, allow_redirects=follow_redirects, headers=self.headers) else: # Default to GET. resp = requests.get(url, verify=False, timeout=timeout, params=params, data=data, json=json, allow_redirects=follow_redirects, headers=self.headers) except requests.exceptions.ConnectTimeout: logger.debug('http_request {} {} (Timeout)'.format(ip, port)) except urllib3.exceptions.MaxRetryError: logger.debug('http_request {} {} (MaxRetryError)'.format(ip, port)) except requests.exceptions.SSLError: logger.debug('http_request {} {} (SSL Error)'.format(ip, port)) except requests.exceptions.ConnectionError as e: logger.debug('http_request {} {} (Connection Error: {})'.format( ip, port, e)) except requests.exceptions.Timeout: logger.debug('http_request {} {} {} (Timeout)'.format( ip, port, url)) except requests.exceptions.ReadTimeout: logger.debug('http_request {} {} (Read Timeout)'.format(ip, port)) except ProtocolError: logger.debug('http_request {} {} (Protocol Error)'.format( ip, port)) except RemoteDisconnected: logger.debug('http_request {} {} (Remote Disconnected)'.format( ip, port)) except Exception as e: logger.debug('http_request {} {} (Unknown Error: {})'.format( ip, port, e)) return resp
def _reinitialize_client(self): logger.debug( "Reinitializing S3Client, probably due to new bucket_name") self._client = S3Client(self._bucket_name)
def set_bucket_name(self, bucket_name): logger.debug("Setting bucket name to {}".format(bucket_name)) self._bucket_name = bucket_name self._reinitialize_client()
def sync(self, dry_run=False, recursive=True, file_pattern="**"): """ Scan and upload files in `path` if md5 mismatch """ self._dry_run = dry_run logger.info("Dry run is {}".format("on" if dry_run else "off")) # Navigate to target directory target_directory = os.path.abspath(self._target_path) logger.info(f"Target directory: {target_directory}") os.chdir(target_directory) # Create uploaded worker _upload_workers = [ multiprocessing.Process(target=self._upload_file_task) for _ in range(self._max_workers) ] [p.start() for p in _upload_workers] # Scan directories files_iter = glob.iglob(file_pattern, recursive=recursive) for rel_file_path in files_iter: if self._is_metastore(rel_file_path): # Ignore metastore file pass elif isfile(rel_file_path): # 1. Get last synced if not self._is_object_exists(rel_file_path): logger.debug( "File doesn't exist, queuing file.. ({})".format( rel_file_path)) md5sum_local = calc_md5sum(rel_file_path) self._upload_queue.put((rel_file_path, md5sum_local)) self._files_to_be_uploaded.release() # 2. File is not in bucket -> Upload and set last_synced elif self._get_last_synced(rel_file_path) >= \ get_last_modified(abs_file_path=os.path.abspath(rel_file_path)): # No need to sync -> Do nothing pass # 3. File is in the bucket -> Check if md5sum match else: md5sum_local = calc_md5sum(rel_file_path) metadata_remote = self._get_object_metadata(rel_file_path) md5sum_remote = metadata_remote.get('md5sum', None) if md5sum_local != md5sum_remote: # Upload file if sync required logger.debug( "Etags mismatched. File is being queued ({})". format(rel_file_path)) self._upload_queue.put((rel_file_path, md5sum_local)) self._files_to_be_uploaded.release() else: self._set_last_synced(rel_file_path=rel_file_path) # Joining queues and workers for _ in range(self._max_workers): self._upload_queue.put(None) self._files_to_be_uploaded.release() [p.join() for p in _upload_workers]
def run_rules(conf): data = rds.get_scan_data() exclusions = rds.get_exclusions() if not data: return for ip, values in data.items(): rules = rule_manager(role='attacker') if 'ports' in values and len(values['ports']) > 0: for port in values['ports']: logger.info('Attacking Asset: {} on port: {}'.format(ip, port)) for rule in rules.values(): if rule.rule in exclusions and ip in exclusions[rule.rule]: logger.debug('Skipping rule {} for target {}'.format(rule.rule, ip)) continue if conf['config']['allow_aggressive'] >= rule.intensity: thread = threading.Thread(target=rule.check_rule, args=(ip, port, values, conf)) thread.start() def attacker(): count = 0 logger.info('Attacker process started') while True: conf = rds.get_scan_config()
def http_request(self, ip, port, headers=None, follow_redirects=True, uri='/'): resp = None if headers: self.headers = {**headers, **self.headers} url = 'http://{}:{}{}'.format(ip, port, uri) if port == 443 or port == 8443 or '443' in str(port): url = 'https://{}:{}{}'.format(ip, port, uri) try: resp = requests.get(url, verify=False, timeout=8, allow_redirects=follow_redirects, headers=self.headers) except requests.exceptions.ConnectTimeout: logger.debug('http_request {} {} (Timeout)'.format(ip, port)) except urllib3.exceptions.MaxRetryError: logger.debug('http_request {} {} (MaxRetryError)'.format(ip, port)) except requests.exceptions.SSLError: logger.debug('http_request {} {} (SSL Error)'.format(ip, port)) except requests.exceptions.ConnectionError as e: logger.debug('http_request {} {} (Connection Error: {})'.format(ip, port, e)) except requests.exceptions.Timeout: logger.debug('http_request {} {} {} (Timeout)'.format(ip, port, url)) except requests.exceptions.ReadTimeout: logger.debug('http_request {} {} (Read Timeout)'.format(ip, port)) except ProtocolError: logger.debug('http_request {} {} (Protocol Error)'.format(ip, port)) except RemoteDisconnected: logger.debug('http_request {} {} (Remote Disconnected)'.format(ip, port)) except Exception as e: logger.debug('http_request {} {} (Unknown Error: {})'.format(ip, port, e)) return resp