def get_https_rule_by_hostname(self): rules = list() port = self._uri.port if not port: port = 443 ipaddrs = self.get_ip_from_hostname(self._uri.hostname, None) if not ipaddrs: _logger.error( '{0}: no ip addresses found from lookup, this is unexpected.'. format(self.get_name())) return None for ipaddr in ipaddrs: _logger.debug( '{0}: adding rules for: hostname: {1}, ip addr: {2}, port: {3}' .format(self.get_name(), self._uri.hostname, ipaddr, port)) rules.append( create_iptables_egress_ingress_rule( ipaddr, port, 'tcp', self._slot_config_access, transport=ipt.TRANSPORT_AUTO)) return rules
def get_ftp_rule_by_hostname(self): rules = list() port = self._uri.port if not port: port = 21 ipaddrs = self.get_ip_from_hostname(self._uri.hostname, None) if not ipaddrs: _logger.error('{0}: no ip addresses found from lookup, this is unexpected.'.format(self.get_name())) return None for ipaddr in ipaddrs: _logger.debug( '{0}: adding rules for: hostname: {1}, ip addr: {2}, ftp ports: {3}, 20, etc'.format( self.get_name(), self._uri.hostname, ipaddr, port)) # FTP control rules.append(create_iptables_egress_ingress_rule(ipaddr, port, u'tcp', self._slot_config_access, transport=ipt.TRANSPORT_AUTO)) # FTP data transfer rules.append( create_iptables_egress_rule_dest(ipaddr, 20, u'tcp', self._slot_config_access, u'ESTABLISHED', transport=ipt.TRANSPORT_AUTO)) rules.append(create_iptables_ingress_rule_source(ipaddr, 20, u'tcp', self._slot_config_access, u'ESTABLISHED,RELATED', transport=ipt.TRANSPORT_AUTO)) rules.append(create_iptables_egress_rule_dest(ipaddr, None, u'tcp', self._slot_config_access, u'ESTABLISHED,RELATED', transport=ipt.TRANSPORT_AUTO)) rules.append( create_iptables_ingress_rule_source(ipaddr, None, u'tcp', self._slot_config_access, u'ESTABLISHED', transport=ipt.TRANSPORT_AUTO)) return rules
def add_rule_by_url(self, url): """ :param url: Complete url string :return: urlparse URI """ rules = list() if not url: _logger.debug('{0}: empty url given, unable to add rule'.format(self.get_name())) return None try: uri = parse.urlparse(url) except: _logger.error('{0}: error parsing url ({1})'.format(self.get_name(), url)) return None if not uri.scheme or not uri.hostname or uri.scheme not in ['http', 'https', 'ftp', 'rsync']: return None # If this is an FTP url... if uri.scheme == 'ftp': return self.add_ftp_rule_by_url(uri) port = uri.port if not port: if uri.scheme == 'http': port = 80 elif uri.scheme == 'https': port = 443 elif uri.scheme == 'rsync': port = 873 key = uri.hostname + ':' + str(port) # Avoid duplicate urls. if key not in self._hostnames: self._hostnames.append(key) ipaddrs = self.resolve_hostname(uri.hostname, port) if ipaddrs: for ipaddr in ipaddrs: # _logger.debug('{0}: adding ip: {1} from hostname: {2}'.format( # self.get_name(), uri.scheme + '://' + ipaddr, uri.hostname)) rules.append(create_iptables_egress_ingress_rule( ipaddr, port, 'tcp', self._slot, transport=ipt.TRANSPORT_AUTO)) _logger.debug('{0}: host: {1} ip: {2}:{3}'.format(self.get_name(), uri.hostname, ipaddr, port)) return rules return None
def _add_server_access_rule(self): """ Add a management server connection rule so we can connect :return: """ rule = create_iptables_egress_ingress_rule( self._server, self._port, 'tcp', Slots.silentdune_server, transport=ipt.TRANSPORT_AUTO) # Notify the firewall module to reload the rules. task = QueueTask(TASK_FIREWALL_INSERT_RULES, src_module=self.get_name(), dest_module=SilentDuneClientFirewallModule().get_name(), data=rule) self.send_parent_task(task)
def _add_server_access_rule(self): """ Add a management server connection rule so we can connect :return: """ rule = create_iptables_egress_ingress_rule( self._server, self._port, 'tcp', Slots.silentdune_server, transport=ipt.TRANSPORT_AUTO) # Notify the firewall module to reload the rules. task = QueueTask( TASK_FIREWALL_INSERT_RULES, src_module=self.get_name(), dest_module=SilentDuneClientFirewallModule().get_name(), data=rule) self.send_parent_task(task)
def get_https_rule_by_hostname(self): rules = list() port = self._uri.port if not port: port = 443 ipaddrs = self.get_ip_from_hostname(self._uri.hostname, None) if not ipaddrs: _logger.error('{0}: no ip addresses found from lookup, this is unexpected.'.format(self.get_name())) return None for ipaddr in ipaddrs: _logger.debug( '{0}: adding rules for: hostname: {1}, ip addr: {2}, port: {3}'.format( self.get_name(), self._uri.hostname, ipaddr, port)) rules.append(create_iptables_egress_ingress_rule(ipaddr, port, 'tcp', self._slot_config_access, transport=ipt.TRANSPORT_AUTO)) return rules
def add_ftp_rule_by_url(self, uri): """ Add rules to allow FTP access based on uri value :param uri: urlparse uri value :return: rules """ # Check to make sure we can add ftp rules. if self._disable_auto_updates_ftp: return None rules = list() ipaddrs = self.resolve_hostname(uri.hostname, 21) if ipaddrs: for ipaddr in ipaddrs: _logger.debug('{0}: adding ip: {1} from hostname: {2}'.format( self.get_name(), uri.scheme + '://' + ipaddr, uri.hostname)) # FTP control rules.append(create_iptables_egress_ingress_rule(ipaddr, 21, 'tcp', self._slot, transport=ipt.TRANSPORT_AUTO)) # FTP data transfer rules.append(create_iptables_egress_rule_dest(ipaddr, 20, 'tcp', self._slot, 'ESTABLISHED', transport=ipt.TRANSPORT_AUTO)) rules.append( create_iptables_ingress_rule_source(ipaddr, 20, 'tcp', self._slot, 'ESTABLISHED,RELATED', transport=ipt.TRANSPORT_AUTO)) rules.append( create_iptables_egress_rule_dest(ipaddr, None, 'tcp', self._slot, 'ESTABLISHED,RELATED', transport=ipt.TRANSPORT_AUTO)) rules.append( create_iptables_ingress_rule_source(ipaddr, None, 'tcp', self._slot, 'ESTABLISHED', transport=ipt.TRANSPORT_AUTO)) return rules
def add_ftp_rule_by_url(self, uri): """ Add rules to allow FTP access based on uri value :param uri: urlparse uri value :return: rules """ # Check to make sure we can add ftp rules. if self._disable_auto_updates_ftp: return None rules = list() ipaddrs = self.resolve_hostname(uri.hostname, 21) if ipaddrs: for ipaddr in ipaddrs: _logger.debug('{0}: adding ip: {1} from hostname: {2}'.format( self.get_name(), uri.scheme + '://' + ipaddr, uri.hostname)) # FTP control rules.append( create_iptables_egress_ingress_rule( ipaddr, 21, 'tcp', self._slot, transport=ipt.TRANSPORT_AUTO)) # FTP data transfer rules.append( create_iptables_egress_rule_dest( ipaddr, 20, 'tcp', self._slot, 'ESTABLISHED', transport=ipt.TRANSPORT_AUTO)) rules.append( create_iptables_ingress_rule_source( ipaddr, 20, 'tcp', self._slot, 'ESTABLISHED,RELATED', transport=ipt.TRANSPORT_AUTO)) rules.append( create_iptables_egress_rule_dest( ipaddr, None, 'tcp', self._slot, 'ESTABLISHED,RELATED', transport=ipt.TRANSPORT_AUTO)) rules.append( create_iptables_ingress_rule_source( ipaddr, None, 'tcp', self._slot, 'ESTABLISHED', transport=ipt.TRANSPORT_AUTO)) return rules
def add_rule_by_url(self, url): """ :param url: Complete url string :return: urlparse URI """ rules = list() if not url: _logger.debug('{0}: empty url given, unable to add rule'.format( self.get_name())) return None try: uri = parse.urlparse(url) except: _logger.error('{0}: error parsing url ({1})'.format( self.get_name(), url)) return None if not uri.scheme or not uri.hostname or uri.scheme not in [ 'http', 'https', 'ftp', 'rsync' ]: return None # If this is an FTP url... if uri.scheme == 'ftp': return self.add_ftp_rule_by_url(uri) port = uri.port if not port: if uri.scheme == 'http': port = 80 elif uri.scheme == 'https': port = 443 elif uri.scheme == 'rsync': port = 873 key = uri.hostname + ':' + str(port) # Avoid duplicate urls. if key not in self._hostnames: self._hostnames.append(key) ipaddrs = self.resolve_hostname(uri.hostname, port) if ipaddrs: for ipaddr in ipaddrs: # _logger.debug('{0}: adding ip: {1} from hostname: {2}'.format( # self.get_name(), uri.scheme + '://' + ipaddr, uri.hostname)) rules.append( create_iptables_egress_ingress_rule( ipaddr, port, 'tcp', self._slot, transport=ipt.TRANSPORT_AUTO)) _logger.debug('{0}: host: {1} ip: {2}:{3}'.format( self.get_name(), uri.hostname, ipaddr, port)) return rules return None
def add_repository_rules(self): """ Add repository rules for rpm based systems. """ # If it is not time to rebuild the package manager cache, just return the rules we have. if not self._rebuild_rules: return self._saved_rules self._rebuild_cache = False # reset hostnames list self._hostnames = list() rules = list() base_urls = list() mirror_urls = list() _logger.debug('{0}: adding rules for {1} repositories'.format( self.get_name(), self._dist)) # Loop through all the repo files and gather url information repofiles = glob.glob(self._repo_config_base) # Add in any zypper service files. if self._dist in 'suse': repofiles += glob.glob(self._repo_service_base) for repofile in repofiles: config = ConfigParser.ConfigParser() if config.read(repofile): sections = config.sections() # Loop through sections looking for enabled repositories. for section in sections: if config.has_option(section, 'enabled'): enabled = config.getint(section, 'enabled') else: enabled = 1 if not enabled: continue _logger.debug('{0}: adding urls for section: {1}'.format( self.get_name(), section)) url = None if config.has_option(section, 'metalink'): url = config.get(section, 'metalink') self._rebuild_cache = True if url: mirror_urls.append([section, url]) elif config.has_option(section, 'mirrorlist'): url = config.get(section, 'mirrorlist') self._rebuild_cache = True if url: mirror_urls.append([section, url]) elif config.has_option(section, 'baseurl'): url = config.get(section, 'baseurl') if url: base_urls.append([section, url]) # Handle zypper service files. elif config.has_option(section, 'url'): url = config.get(section, 'url') if url: base_urls.append([section, url]) if not url: _logger.debug( '{0}: could not find repo section ({1}) url?'. format(self.get_name(), section)) # Loop through all the urls and add rules for them. for section, url in base_urls: # TODO: Add support for mirrorbrain style mirrorlists. rules.append(self.add_rule_by_url(url)) # If we don't need to rebuild the package manager cache, just return the rules we have. if not self._rebuild_cache: return rules for section, url in mirror_urls: rules.append(self.add_rule_by_url(url)) # Rebuild the package manager cache # Open up all port 80 and port 443 connections so cache rebuild succeeds. all_access = list() all_access.append( create_iptables_egress_ingress_rule('', 80, 'tcp', self._slot, transport=ipt.TRANSPORT_IPV4)) all_access.append( create_iptables_egress_ingress_rule('', 80, 'tcp', self._slot, transport=ipt.TRANSPORT_IPV6)) all_access.append( create_iptables_egress_ingress_rule('', 443, 'tcp', self._slot, transport=ipt.TRANSPORT_IPV4)) all_access.append( create_iptables_egress_ingress_rule('', 443, 'tcp', self._slot, transport=ipt.TRANSPORT_IPV6)) # our parent will take care of clearing these rules once we return the real rules. self.add_url_rule_to_firewall(all_access) time.sleep(2) # Give the firewall manager time to add the rules. if not self.rebuild_package_manager_cache(): return rules # Check to see if we know where the package manage cache data is. if not self._repo_cache_base: return rules # loop through the mirror list and parse the mirrorlist or metalink file. for section, url in mirror_urls: file, file_type = self.get_cache_file(section) if file: if file_type == 'mirrorlist': urls = self.get_mirrorlist_urls_from_file(file, section) else: urls = self.get_metalink_urls_from_file(file, section) if urls: for url in urls: if url: rules.append(self.add_rule_by_url(url)) self._cache_last_rebuilt = datetime.now() self._saved_rules = rules return rules
def get_ftp_rule_by_hostname(self): rules = list() port = self._uri.port if not port: port = 21 ipaddrs = self.get_ip_from_hostname(self._uri.hostname, None) if not ipaddrs: _logger.error( '{0}: no ip addresses found from lookup, this is unexpected.'. format(self.get_name())) return None for ipaddr in ipaddrs: _logger.debug( '{0}: adding rules for: hostname: {1}, ip addr: {2}, ftp ports: {3}, 20, etc' .format(self.get_name(), self._uri.hostname, ipaddr, port)) # FTP control rules.append( create_iptables_egress_ingress_rule( ipaddr, port, u'tcp', self._slot_config_access, transport=ipt.TRANSPORT_AUTO)) # FTP data transfer rules.append( create_iptables_egress_rule_dest(ipaddr, 20, u'tcp', self._slot_config_access, u'ESTABLISHED', transport=ipt.TRANSPORT_AUTO)) rules.append( create_iptables_ingress_rule_source( ipaddr, 20, u'tcp', self._slot_config_access, u'ESTABLISHED,RELATED', transport=ipt.TRANSPORT_AUTO)) rules.append( create_iptables_egress_rule_dest(ipaddr, None, u'tcp', self._slot_config_access, u'ESTABLISHED,RELATED', transport=ipt.TRANSPORT_AUTO)) rules.append( create_iptables_ingress_rule_source( ipaddr, None, u'tcp', self._slot_config_access, u'ESTABLISHED', transport=ipt.TRANSPORT_AUTO)) return rules
def add_repository_rules(self): """ Add repository rules for rpm based systems. """ # If it is not time to rebuild the package manager cache, just return the rules we have. if not self._rebuild_rules: return self._saved_rules self._rebuild_cache = False # reset hostnames list self._hostnames = list() rules = list() base_urls = list() mirror_urls = list() _logger.debug('{0}: adding rules for {1} repositories'.format(self.get_name(), self._dist)) # Loop through all the repo files and gather url information repofiles = glob.glob(self._repo_config_base) # Add in any zypper service files. if self._dist in 'suse': repofiles += glob.glob(self._repo_service_base) for repofile in repofiles: config = ConfigParser.ConfigParser() if config.read(repofile): sections = config.sections() # Loop through sections looking for enabled repositories. for section in sections: if config.has_option(section, 'enabled'): enabled = config.getint(section, 'enabled') else: enabled = 1 if not enabled: continue _logger.debug('{0}: adding urls for section: {1}'.format(self.get_name(), section)) url = None if config.has_option(section, 'metalink'): url = config.get(section, 'metalink') self._rebuild_cache = True if url: mirror_urls.append([section, url]) elif config.has_option(section, 'mirrorlist'): url = config.get(section, 'mirrorlist') self._rebuild_cache = True if url: mirror_urls.append([section, url]) elif config.has_option(section, 'baseurl'): url = config.get(section, 'baseurl') if url: base_urls.append([section, url]) # Handle zypper service files. elif config.has_option(section, 'url'): url = config.get(section, 'url') if url: base_urls.append([section, url]) if not url: _logger.debug('{0}: could not find repo section ({1}) url?'.format(self.get_name(), section)) # Loop through all the urls and add rules for them. for section, url in base_urls: # TODO: Add support for mirrorbrain style mirrorlists. rules.append(self.add_rule_by_url(url)) # If we don't need to rebuild the package manager cache, just return the rules we have. if not self._rebuild_cache: return rules for section, url in mirror_urls: rules.append(self.add_rule_by_url(url)) # Rebuild the package manager cache # Open up all port 80 and port 443 connections so cache rebuild succeeds. all_access = list() all_access.append(create_iptables_egress_ingress_rule( '', 80, 'tcp', self._slot, transport=ipt.TRANSPORT_IPV4)) all_access.append(create_iptables_egress_ingress_rule( '', 80, 'tcp', self._slot, transport=ipt.TRANSPORT_IPV6)) all_access.append(create_iptables_egress_ingress_rule( '', 443, 'tcp', self._slot, transport=ipt.TRANSPORT_IPV4)) all_access.append(create_iptables_egress_ingress_rule( '', 443, 'tcp', self._slot, transport=ipt.TRANSPORT_IPV6)) # our parent will take care of clearing these rules once we return the real rules. self.add_url_rule_to_firewall(all_access) time.sleep(2) # Give the firewall manager time to add the rules. if not self.rebuild_package_manager_cache(): return rules # Check to see if we know where the package manage cache data is. if not self._repo_cache_base: return rules # loop through the mirror list and parse the mirrorlist or metalink file. for section, url in mirror_urls: file, file_type = self.get_cache_file(section) if file: if file_type == 'mirrorlist': urls = self.get_mirrorlist_urls_from_file(file, section) else: urls = self.get_metalink_urls_from_file(file, section) if urls: for url in urls: if url: rules.append(self.add_rule_by_url(url)) self._cache_last_rebuilt = datetime.now() self._saved_rules = rules return rules