コード例 #1
0
 def save_model(self, request, obj, form, change):
     remote_addr = request.META['REMOTE_ADDR']
     ip_lan = form.cleaned_data['ip_lan']
     mask_lan = form.cleaned_data['mask_lan']
     dhcp = form.cleaned_data['dhcp']
     ip_start = form.cleaned_data['ip_start']
     ip_end = form.cleaned_data['ip_end']
     iface_range = iptools.IpRange(ip_lan + '/' + mask_lan)
     if dhcp is True:
         if ip_start == ip_lan or ip_end == ip_lan or ip_start == ip_end:
             messages.set_level(request, messages.ERROR)
             messages.error(request, "L'IP dal range non puo' essere uguale dal IP della rete associata")
         else:
             dhcp_range = iptools.IpRange(ip_lan + '/' + mask_lan)
             if ip_start in dhcp_range and ip_end in dhcp_range:
                 if remote_addr not in iface_range:
                     messages.set_level(request, messages.WARNING)
                     messages.warning(request,
                                      "Impostare un IP diverso da dove e' connesso puo' causare perdida di connessione. "
                                      "Ricordi cambiare dopo l'IP per accedere di nuovo.")
                 super(LANAdmin, self).save_model(request, obj, form, change)
             else:
                 messages.set_level(request, messages.ERROR)
                 messages.error(request, "Questa network non appartiene all'interfaccia LAN.")
     else:
         obj.ip_start = '0.0.0.0'
         obj.ip_end = '0.0.0.0'
         if remote_addr not in iface_range:
             messages.set_level(request, messages.WARNING)
             messages.warning(request,
                              "Impostare un IP diverso da dove e' connesso puo' causare perdida di connessione. "
                              "Ricordi cambiare dopo l'IP per accedere di nuovo.")
         super(LANAdmin, self).save_model(request, obj, form, change)
コード例 #2
0
def cidr_comparison(test, resource, key, value):
    ip_range = iptools.IpRange(resource[key])
    host_bits = 32 - int(value)
    if function_dict[test](len(ip_range), 2**host_bits):
        return True
    else:
        return False
コード例 #3
0
def network_address_validator(address, obj_range):
    """validate that given ip address and netmask is really the network address of the range
    e.g. 10.11.12.0/24 is valid;
    Args:
        address (unicode): a give address (e.g. "127.0.0.1")
        obj_range (iptools.IpRange): an object instance; e.g. created by: IpRange("::1/128")

    Returns:
        True or Raises ValidationError

    Examples:
        >>> network_address_validator(u'10.11.12.0', iptools.IpRange('10.11.12.0/24'))
        True

        >>> network_address_validator(u'10.11.12.5', iptools.IpRange('10.11.12.0/24'))
        Traceback (most recent call last):
          ...
        ValidationError: [u'Wrong network address for given mask, should be 10.11.12.0']

    """

    if not iptools.IpRange(address).startIp == obj_range.startIp:
        raise ValidationError(
            _("Wrong network address for given mask, should be " +
              unicode(obj_range[0])))
    return True
コード例 #4
0
 def ip_range(self):
     try:
         return iptools.IpRange(self.cidr)
     except ValueError:
         # 2015-11-23 (RH): TODO when and how can parsing the IpRange fail?!
         logger.warn("warning.. unable to create an ip_range from input!")
         return None
コード例 #5
0
    def add_ip_range(self, ip_range, value):
        """Adds an entry to this map.

        ip_range can be in the following forms:

            "1.2.3.4"
            "1.2.3.0/8"
            ("1.2.3.4", "1.2.3.44")
        """
        # Convert ranges in CIDR format into (start, end) tuple
        if isinstance(ip_range, six.string_types) and "/" in ip_range:
            # ignore bad value
            if not iptools.validate_cidr(ip_range):
                return
            ip_range = iptools.cidr2block(ip_range)

        # Find the integer representation of first 2 parts of the start and end IPs
        if isinstance(ip_range, tuple):
            # ignore bad ips
            if not iptools.validate_ip(ip_range[0]) or not iptools.validate_ip(
                    ip_range[1]):
                return

            # Take the first 2 parts of the begin and end ip as integer
            start = iptools.ip2long(ip_range[0]) >> 16
            end = iptools.ip2long(ip_range[1]) >> 16
        else:
            start = iptools.ip2long(ip_range) >> 16
            end = start

        # for each integer in the range add an entry.
        for i in range(start, end + 1):
            self.ip_ranges.setdefault(i, {})[iptools.IpRange(ip_range)] = value
コード例 #6
0
def generate_ips(ip_range):
    try:
        ip_addresses = iptools.IpRangeList(args.ip_range)
        return ip_addresses
    except:
        ip_start = ip_range.split("-")[0]
        ip_end = ip_range.split("-")[1]
        return iptools.IpRange(ip_start, ip_end)
コード例 #7
0
def ip_cidr_filter(log, cidr):
    if cidr is None:
        return True
    cidr = iptools.IpRange(cidr)
    ip = log[0]
    if ip in cidr:
        return True
    return False
コード例 #8
0
    def matches(self, value):
        # is this CIDR notation?
        if '/' in value:
            try:
                return self.value in iptools.IpRange(value)
            except:
                pass

        # otherwise it has to match exactly
        return self.value == value
コード例 #9
0
 def save_model(self, request, obj, form, change):
     ip_check = form.cleaned_data['ip']
     ip_lan = LAN.objects.values()[0]['ip_lan']
     mask_lan = LAN.objects.values()[0]['mask_lan']
     dhcp_range = iptools.IpRange(ip_lan + '/' + mask_lan)
     if str(ip_check) == str(ip_lan):
         messages.set_level(request, messages.ERROR)
         messages.error(request, "L'indirizzo IP e' gia' in utilizzo nella impostazione rete. Prova un'altro.")
     elif ip_check in dhcp_range:
         super(dhcptableAdmin, self).save_model(request, obj, form, change)
     else:
         messages.set_level(request, messages.ERROR)
         messages.error(request, "L'indirizzo IP non appartiene alla rete impostata nella voce Networking. Verifiche e provi di nuovo")
コード例 #10
0
    def testV4MappedAddressInIPv6Range(self):
        """
        Given that the user has configured an IPv4 range
        When the server recieves a connection from a host in that range
         And the network stack presents that address in v4 mapped format
        Then the address should be recognized as being in the IPv4 range.
        """
        fixture = iptools.IpRange('192.168.0.1/24')

        self.assertTrue('192.168.0.12' in fixture)
        self.assertFalse('192.168.1.12' in fixture)

        self.assertTrue('::ffff:192.168.0.12' in fixture)
        self.assertFalse('::ffff:192.168.1.12' in fixture)
コード例 #11
0
ファイル: PacketFromTest.py プロジェクト: netcdl/netcdl
    def run(self, packets):
        self.success = not self.should_pass
        for p in packets:
            try:
                src_ip = p['payload']['fields'].get('src', None)
                if self.type == "network" and src_ip in iptools.IpRange(self.target):
                    self.success = self.should_pass
                    break
                elif src_ip == self.target:
                    self.success = self.should_pass
                    break
            except:
                continue

        return self.create_result()
コード例 #12
0
ファイル: asset.py プロジェクト: code4days/ACE
    def __init__(self, *args, **kwargs):
        super(NetworkIdentifier, self).__init__(*args, **kwargs)
        self._networks = [] # list of _NetworkDefinition
        
        # load the network definitions from the CSV file
        with open(os.path.join(saq.SAQ_HOME, saq.CONFIG.get(self.config_section, 'csv_file')), 'r') as fp:
            reader = csv.reader(fp)
            # these are pulled from splunk and these are the header names
            header = next(reader)
            assert header[0] == 'Indicator'
            assert header[1] == 'Indicator_Type'
            for row in reader:
                #logging.debug("loading {0} = {1}".format(row[0], row[1]))
                self._networks.append(_NetworkDefinition(iptools.IpRange(row[0]), row[1]))

        logging.debug("loaded {0} network definitions".format(len(self._networks)))
コード例 #13
0
    def clean(self):
        """validate fields
        https://docs.djangoproject.com/en/1.8/ref/models/instances/#django.db.models.Model.clean
        This method should be used to provide custom model validation,
        and to modify attributes on your model if desired.
        Note that a model's clean() method is not invoked when you call your model's save()
        method. But it is called when is_valid is invoked.

        """

        # make sure db field address_integer is set to the correct value
        """
        if not self.address_integer == self.address_int:
            self.address_integer = self.address_int
        """
        if not self.address_binary == self.address_bin:
            self.address_binary = self.address_bin

        # make sure that given address is really the startIp of the range (with given mask)
        if self.ip_range:
            network_address_validator(self.address,
                                      iptools.IpRange(self.ip_range))

        # check for duplicates
        # TODO (RH) 2015-11-29: duplicate handling?!
        self.check_is_duplicate()

        # check whether new RangeV4 is really a child of selected parent
        # TODO (RH) 2015-11-29: should user really be allow / required to set parent?!
        if self.parent:
            if self.address not in self.parent.ip_range:
                raise ValidationError(
                    _(self.cidr + " is not a subnet of " + self.parent.cidr))

        # insert into tree at correct position (no matter what user selected as "parent")
        try:
            self.insert_into_tree()
        except IntegrityError:
            logger.debug(
                "Integrity because object is not new.. so that's fine.")

        # 2015-11-29 (RH): TODO does this hurt? Shouldn't really!
        # play it safe.. rebuild on every save

        self.__class__.objects.rebuild()

        return self
コード例 #14
0
ファイル: xipescan.py プロジェクト: ch0ks/xipescan
def discovery(file_name, output_file):
    """
    This function performs discovery scan using predefined parameters, once finished it returns a tuple with the active hosts.

    :param file_name: filename containing the host to scan one host by line.
    :type file_name: str
    :param output_file: file where to write the active hosts found.
    :type output_file: str
    :return: a tuple with the active hosts.
    """
    nm = nmap.PortScanner()
    tcp_ports = "21-23,25,53,80,88,110-111,135,139,443,445,515,1433,1521,2222,8080,8443"
    udp_ports = "53,111,135,137,161,500"
    nm_arguments = "-sP -Pn -PU" + udp_ports + " -PS" + tcp_ports

    active_ips = list()
    iplist = list()
    host_list = filetolist(file_name)
    for host in host_list:
        if "/" in host:
            iplist += iptools.IpRange(host)
        else:
            iplist.append(host)
    iplist = remove_duplicates_in_list(iplist)
    iplist.sort()

    printMessage("Scanning a total of %i hosts." % len(iplist))
    for host in iplist:
        printMessage("Scanning %s" % host)
        nm.scan(hosts=host, arguments=nm_arguments)
        active_ips += nm.all_hosts()

    if len(active_ips) is 0:
        printError("No active hosts found.")
        sys.exit(1)

    printSuccess("Scan completed.")
    active_ips = remove_duplicates_in_list(active_ips)
    printSuccess("Found %i active hosts." % len(active_ips))
    f = open(output_file, "w")
    f.write("\n".join(active_ips))
    f.close()
    printSuccess("File %s created." % output_file)
    return active_ips
コード例 #15
0
 def save_model(self, request, obj, form, change):
     ip_wan = form.cleaned_data['ip_wan']
     mask_wan = form.cleaned_data['mask_wan']
     gateway = form.cleaned_data['gateway']
     dns1 = form.cleaned_data['dns1']
     dns2 = form.cleaned_data['dns2']
     if dns1 == dns2:
         messages.set_level(request, messages.ERROR)
         messages.error(request, "Gli IP dal DNS non devono essere uguali")
     else:
         if ip_wan == gateway:
             messages.set_level(request, messages.ERROR)
             messages.error(request, "L'IP dal gateway non puo' essere uguale dal IP della WAN")
         else:
             dhcp_range = iptools.IpRange(ip_wan + '/' + mask_wan)
             if gateway in dhcp_range:
                 super(WANAdmin, self).save_model(request, obj, form, change)
             else:
                 messages.set_level(request, messages.ERROR)
                 messages.error(request, "Questo gateway non appartiene all'interfaccia WAN.")
コード例 #16
0
def calculate_min_max(ip):
    ip_range = iptools.IpRange(ip)
    return inflate_ip(ip_range[0]), inflate_ip(ip_range[-1])
コード例 #17
0
ファイル: enumXFF.py プロジェクト: b6938236/enumXFF
def generate_ips(ip_range):
    ip_start = ip_range.split("-")[0]
    ip_end = ip_range.split("-")[1]
    r = iptools.IpRange(ip_start, ip_end)
    return r
コード例 #18
0
import heatmap

# The DB update is a replace, so all the old data should be removed first.
heatmap.Location.query.delete()

# Using decimal.Decimal because IPv6 address space is huge, way bigger than
# sys.maxint.
counts = defaultdict(Decimal)

# Read in each line and update the IP address count for each location. Store
# them in the dict without updating the DB yet, so we can get the totals up
# front and avoid duplicates.
with open('GeoLite2-City-Blocks-IPv6.csv') as f:
    reader = DictReader(f)
    for row in reader:
        location = (float(row['latitude']), float(row['longitude']))
        counts[location] += Decimal(iptools.IpRange(row['network']).__len__())

# Get the total for all locations so we can calculate the percent of each.
total_count = Decimal(sum(x for x in counts.values()))

# Now we calculate the percentage for each location and add them all to the DB.
for (latitude, longitude), count in counts.iteritems():
    percent = count / total_count
    location = heatmap.Location(latitude, longitude, percent)
    heatmap.db.session.add(location)

# It'd be a shame to waste all that time by throwing everything away now...
heatmap.db.session.commit()
コード例 #19
0
ファイル: __init__.py プロジェクト: IntegralDefense/ACE
def initialize(saq_home=None,
               config_paths=None,
               logging_config_path=None,
               args=None,
               relative_dir=None,
               unittest=False):

    from saq.database import initialize_database, initialize_node

    global API_PREFIX
    global CA_CHAIN_PATH
    global COMPANY_ID
    global COMPANY_NAME
    global CONFIG
    global CONFIG_PATHS
    global DAEMON_MODE
    global DATA_DIR
    global DEFAULT_ENCODING
    global DUMP_TRACEBACKS
    global ENCRYPTION_PASSWORD
    global EXCLUDED_SLA_ALERT_TYPES
    global EXECUTION_THREAD_LONG_TIMEOUT
    global FORCED_ALERTS
    global GLOBAL_SLA_SETTINGS
    global GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES
    global INSTANCE_TYPE
    global LOCK_TIMEOUT_SECONDS
    global LOG_DIRECTORY
    global LOG_LEVEL
    global MANAGED_NETWORKS
    global MODULE_STATS_DIR
    global OTHER_PROXIES
    global OTHER_SLA_SETTINGS
    global PROXIES
    global SAQ_HOME
    global SAQ_NODE
    global SAQ_NODE_ID
    global SAQ_RELATIVE_DIR
    global SEMAPHORES_ENABLED
    global STATS_DIR
    global TEMP_DIR
    global TOR_PROXY
    global YSS_BASE_DIR
    global YSS_SOCKET_DIR

    SAQ_HOME = None
    SAQ_NODE = None
    SAQ_NODE_ID = None
    API_PREFIX = None
    SAQ_RELATIVE_DIR = None
    CONFIG = None
    CONFIG_PATHS = []
    DATA_DIR = None
    TEMP_DIR = None
    DEFAULT_ENCODING = None
    SEMAPHORES_ENABLED = False
    PROXIES = {}
    OTHER_PROXIES = {}
    TOR_PROXY = None
    # list of iptools.IpRange objects defined in [network_configuration]
    MANAGED_NETWORKS = None
    # set this to True to force all anlaysis to result in an alert being generated
    FORCED_ALERTS = False
    # the gpg private key password for encrypting/decrypting archive files
    # this can be provided on the command line so that these files can also be analyzed
    ENCRYPTION_PASSWORD = None

    # the global log level setting
    LOG_LEVEL = logging.INFO
    # global logging directory (relative to DATA_DIR)
    LOG_DIRECTORY = None

    # directory containing statistical runtime info
    STATS_DIR = None
    MODULE_STATS_DIR = None

    # are we running as a daemon in the background?
    DAEMON_MODE = False

    # path to the certifcate chain used by all SSL certs
    CA_CHAIN_PATH = None

    # what type of instance is this?
    INSTANCE_TYPE = INSTANCE_TYPE_PRODUCTION

    # SLA settings
    GLOBAL_SLA_SETTINGS = None
    OTHER_SLA_SETTINGS = []
    EXCLUDED_SLA_ALERT_TYPES = []

    # Yara Scanner Server base directory
    YSS_BASE_DIR = None
    YSS_SOCKET_DIR = None

    # set to True to cause tracebacks to be dumped to standard output
    # useful when debugging or testing
    DUMP_TRACEBACKS = False

    # the amount of time (in seconds) that a lock in the locks table is valid
    LOCK_TIMEOUT_SECONDS = None

    # amount of time (in seconds) before a process blows up because a threaded module won't stop
    EXECUTION_THREAD_LONG_TIMEOUT = None

    # the company/custom this node belongs to
    COMPANY_NAME = None
    COMPANY_ID = None

    # go ahead and try to figure out what text encoding we're using
    DEFAULT_ENCODING = locale.getpreferredencoding()

    # list of observable types we want to exclude from whitelisting (via the GUI)
    GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES = []

    # do we want to force alerts?
    if args:
        FORCED_ALERTS = args.force_alerts

    # what is the root directory of the entire system?
    if saq_home is not None:
        SAQ_HOME = saq_home
    elif 'SAQ_HOME' in os.environ:
        SAQ_HOME = os.environ['SAQ_HOME']
    else:
        SAQ_HOME = '.'

    if not os.path.isdir(SAQ_HOME):
        sys.stderr.write("invalid root SAQ directory {0}\n".format(SAQ_HOME))
        sys.exit(1)

    # XXX not sure we need this SAQ_RELATIVE_DIR anymore -- check it out
    # this system was originally designed to run out of /opt/saq
    # later we modified to run out of anywhere for command line correlation
    # when running the GUI in apache you have no control over the current working directory
    # so we specify what directory we'd *want* to be running out of here (even if we're not actually)
    # this only matters when loading alerts
    # this defaults to the current working directory
    SAQ_RELATIVE_DIR = os.path.relpath(os.getcwd(), start=SAQ_HOME)
    if relative_dir:
        SAQ_RELATIVE_DIR = relative_dir

    # load configuration file
    # defaults to $SAQ_HOME/etc/saq.ini
    if args:
        if args.config_paths:
            config_paths = args.config_paths

    if config_paths is None:
        config_paths = []

    # make each relative config path absolute to SAQ_HOME
    CONFIG_PATHS = [
        os.path.join(SAQ_HOME, p) if not os.path.isabs(p) else p
        for p in config_paths
    ]

    # add any config files specified in SAQ_CONFIG_PATHS env var (command separated)
    #sys.stderr.write("SAQ_CONFIG_PATHS = {}\n".format(os.environ['SAQ_CONFIG_PATHS']))
    if 'SAQ_CONFIG_PATHS' in os.environ:
        for config_path in os.environ['SAQ_CONFIG_PATHS'].split(','):
            config_path = config_path.strip()
            if not os.path.isabs(config_path):
                config_path = os.path.join(SAQ_HOME, config_path)
            if not os.path.exists(config_path):
                sys.stderr.write(
                    "WARNING: config path {} specified in SAQ_CONFIG_PATHS env var does not exist\n"
                    .format(config_path))
            else:
                if config_path not in CONFIG_PATHS:
                    CONFIG_PATHS.append(config_path)

    # if $SAQ_HOME/etc/saq.ini exists then we use that as the last config if it's not already specified
    default_config_path = os.path.join(SAQ_HOME, 'etc', 'saq.ini')

    # use unit test config if we are running a unit test
    if unittest:
        default_config_path = os.path.join(SAQ_HOME, 'etc', 'saq.unittest.ini')

    if os.path.exists(default_config_path):
        if default_config_path not in CONFIG_PATHS:
            CONFIG_PATHS.append(default_config_path)

    try:
        load_configuration()
    except Exception as e:
        sys.stderr.write("ERROR: unable to load configuration: {0}".format(
            str(e)))
        sys.exit(1)

    DATA_DIR = os.path.join(SAQ_HOME, CONFIG['global']['data_dir'])
    TEMP_DIR = os.path.join(DATA_DIR, CONFIG['global']['tmp_dir'])
    COMPANY_NAME = CONFIG['global']['company_name']
    COMPANY_ID = CONFIG['global'].getint('company_id')

    minutes, seconds = map(int, CONFIG['global']['lock_timeout'].split(':'))
    LOCK_TIMEOUT_SECONDS = (minutes * 60) + seconds
    EXECUTION_THREAD_LONG_TIMEOUT = CONFIG['global'].getint(
        'execution_thread_long_timeout')

    # user specified log level
    LOG_LEVEL = logging.INFO
    if args:
        if args.log_level:
            LOG_LEVEL = args.log_level

    # make sure the logs directory exists
    LOG_DIRECTORY = os.path.join(DATA_DIR, 'logs')
    if not os.path.exists(LOG_DIRECTORY):
        try:
            os.mkdir(LOG_DIRECTORY)
        except Exception as e:
            sys.stderr.write("unable to mkdir {}: {}\n".format(
                LOG_DIRECTORY, e))
            sys.exit(1)

    # by default we log to the console
    if logging_config_path is None:
        logging_config_path = os.path.join(SAQ_HOME, 'etc',
                                           'console_logging.ini')

    # we can override this on the command line
    # this is what we use for production engine settings
    if args:
        if args.logging_config_path:
            logging_config_path = args.logging_config_path

    # we can re-initialize later if we have to
    try:
        initialize_logging(
            logging_config_path
        )  # this log file just gets some startup information
    except Exception as e:
        sys.exit(1)

    GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES = [
        _.strip() for _ in CONFIG['gui']
        ['whitelist_excluded_observable_types'].split(',')
    ]

    for o_type in GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES:
        if o_type not in VALID_OBSERVABLE_TYPES:
            logging.error(
                f"invalid observable type {o_type} specified in [gui] whitelist_excluded_observable_types"
            )

    # make this a faster lookup
    GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES = set(
        GUI_WHITELIST_EXCLUDED_OBSERVABLE_TYPES)

    # load global SLA settings
    GLOBAL_SLA_SETTINGS = SLA(None, CONFIG['SLA'].getboolean('enabled'),
                              CONFIG['SLA'].getint('time_to_dispo'),
                              CONFIG['SLA'].getint('approaching_warn'), None,
                              None)

    EXCLUDED_SLA_ALERT_TYPES = [
        x.strip() for x in CONFIG['SLA']['excluded_alert_types'].split(',')
    ]

    # load all the other SLA settings
    for section in [s for s in CONFIG.keys() if s.startswith('SLA_')]:
        logging.debug("loading {}".format(section))
        OTHER_SLA_SETTINGS.append(
            SLA(section[len('SLA_'):], CONFIG[section].getboolean('enabled'),
                CONFIG[section].getint('time_to_dispo'),
                CONFIG[section].getint('approaching_warn'),
                CONFIG[section]['property'], CONFIG[section]['value']))

    # what node is this?
    try:
        SAQ_NODE = CONFIG['global']['node']
        if SAQ_NODE == 'AUTO':
            SAQ_NODE = socket.getfqdn()
    except Exception as e:
        sys.stderr.write("unable to get hostname: {}\n".format(e))
        sys.exit(1)

    # what prefix do other systems use to communicate to the API server for this node?
    try:
        API_PREFIX = CONFIG['api']['prefix']
        if API_PREFIX == 'AUTO':
            API_PREFIX = socket.getfqdn()
        logging.debug("node {} has api prefix {}".format(SAQ_NODE, API_PREFIX))
    except Exception as e:
        sys.stderr.write("unable to get hostname: {}\n".format(e))
        sys.exit(1)

    # what type of instance is this?
    if 'instance_type' in CONFIG['global']:
        INSTANCE_TYPE = CONFIG['global']['instance_type']
        if INSTANCE_TYPE not in [
                INSTANCE_TYPE_PRODUCTION, INSTANCE_TYPE_QA, INSTANCE_TYPE_DEV
        ]:
            logging.warning(
                "invalid instance type {}: defaulting to {}".format(
                    INSTANCE_TYPE, INSTANCE_TYPE_PRODUCTION))
            INSTANCE_TYPE = INSTANCE_TYPE_PRODUCTION
    else:
        logging.warning(
            "missing configuration instance_type in global section (defaulting to instance type {})"
            .format(INSTANCE_TYPE_PRODUCTION))

    if FORCED_ALERTS:  # lol
        logging.warning(
            " ****************************************************************** "
        )
        logging.warning(
            " ****************************************************************** "
        )
        logging.warning(
            " **** WARNING **** ALL ANALYSIS RESULTS IN ALERTS **** WARNING **** "
        )
        logging.warning(
            " ****************************************************************** "
        )
        logging.warning(
            " ****************************************************************** "
        )

    # warn if timezone is not UTC
    #if time.strftime("%z") != "+0000":
    #logging.warning("Timezone is not UTC. All ACE systems in a cluster should be in UTC.")

    # we can globally disable semaphores with this flag
    SEMAPHORES_ENABLED = CONFIG.getboolean('global', 'enable_semaphores')

    # some settings can be set to PROMPT
    for section in CONFIG.sections():
        for (name, value) in CONFIG.items(section):
            if value == 'PROMPT':
                CONFIG.set(
                    section, name,
                    getpass("Enter the value for {0}:{1}: ".format(
                        section, name)))

    # make sure we've got the ca chain for SSL certs
    CA_CHAIN_PATH = os.path.join(SAQ_HOME, CONFIG['SSL']['ca_chain_path'])
    ace_api.set_default_ssl_ca_path(CA_CHAIN_PATH)

    # XXX this should probably move to the yara scanning module
    # set the location we'll be running yss out of
    YSS_BASE_DIR = os.path.join(SAQ_HOME, CONFIG['yara']['yss_base_dir'])
    if not os.path.exists(YSS_BASE_DIR):
        logging.critical(
            "[yara][yss_base_dir] is set to {} but does not exist".format(
                YSS_BASE_DIR))

    YSS_SOCKET_DIR = os.path.join(YSS_BASE_DIR,
                                  CONFIG['yara']['yss_socket_dir'])

    # initialize the database connection
    initialize_database()

    # initialize fallback semaphores
    initialize_fallback_semaphores()

    # XXX get rid of this
    try:
        maliciousdir = CONFIG.get("global", "malicious")
    except:
        maliciousdir = "malicious"

    STATS_DIR = os.path.join(DATA_DIR, 'stats')
    MODULE_STATS_DIR = os.path.join(STATS_DIR, 'modules')

    # make sure some key directories exists
    for dir_path in [
            # anaysis data
            os.path.join(DATA_DIR, CONFIG['global']['node']),
            #os.path.join(SAQ_HOME, 'var', 'locks'), # XXX remove
            os.path.join(DATA_DIR, 'review', 'rfc822'),
            os.path.join(DATA_DIR, 'review', 'misc'),
            os.path.join(DATA_DIR, CONFIG['global']['error_reporting_dir']),
            STATS_DIR,
            MODULE_STATS_DIR,
            os.path.join(STATS_DIR, 'brocess'),  # get rid of this
            os.path.join(STATS_DIR, 'metrics'),
            os.path.join(DATA_DIR, CONFIG['splunk_logging']['splunk_log_dir']),
            os.path.join(DATA_DIR, DATA_DIR,
                         CONFIG['elk_logging']['elk_log_dir']),
            os.path.join(TEMP_DIR),
            os.path.join(SAQ_HOME, CONFIG['yara']['yss_base_dir'], 'logs'),
    ]:  # XXX this should be in YSS
        #os.path.join(SAQ_HOME, maliciousdir) ]: # XXX remove
        try:
            if not os.path.isdir(dir_path):
                os.makedirs(dir_path)
        except Exception as e:
            logging.error("unable to create required directory {}: {}".format(
                dir_path, str(e)))
            sys.exit(1)

    # clear out any proxy environment variables if they exist
    for proxy_key in ['http_proxy', 'https_proxy', 'ftp_proxy']:
        if proxy_key in os.environ:
            logging.debug(
                "removing proxy environment variable for {}".format(proxy_key))
            del os.environ[proxy_key]

    # set up the PROXY global dict (to be used with the requests library)
    for proxy_key in ['http', 'https']:
        if CONFIG['proxy']['host'] and CONFIG['proxy']['port'] and CONFIG[
                'proxy']['transport']:
            if CONFIG['proxy']['user'] and CONFIG['proxy']['password']:
                PROXIES[proxy_key] = '{}://{}:{}@{}:{}'.format(
                    CONFIG['proxy']['transport'], CONFIG['proxy']['user'],
                    CONFIG['proxy']['password'], CONFIG['proxy']['host'],
                    CONFIG['proxy']['port'])
            else:
                PROXIES[proxy_key] = '{}://{}:{}'.format(
                    CONFIG['proxy']['transport'], CONFIG['proxy']['host'],
                    CONFIG['proxy']['port'])
            logging.debug("proxy for {} set to {}".format(
                proxy_key, PROXIES[proxy_key]))

    # load any additional proxies specified in the config sections proxy_*
    for section in CONFIG.keys():
        if section.startswith('proxy_'):
            proxy_name = section[len('proxy_'):]
            OTHER_PROXIES[proxy_name] = {}
            for proxy_key in ['http', 'https']:
                if CONFIG[section]['host'] and CONFIG[section][
                        'port'] and CONFIG[section]['transport']:
                    if 'user' in CONFIG[section] and 'password' in CONFIG[section] \
                    and CONFIG[section]['user'] and CONFIG[section]['password']:
                        OTHER_PROXIES[proxy_name][
                            proxy_key] = '{}://{}:{}@{}:{}'.format(
                                CONFIG[section]['transport'],
                                CONFIG[section]['user'],
                                CONFIG[section]['password'],
                                CONFIG[section]['host'],
                                CONFIG[section]['port'])
                    else:
                        OTHER_PROXIES[proxy_name][
                            proxy_key] = '{}://{}:{}'.format(
                                CONFIG[section]['transport'],
                                CONFIG[section]['host'],
                                CONFIG[section]['port'])

    # load global constants
    import iptools

    MANAGED_NETWORKS = []
    for cidr in CONFIG['network_configuration']['managed_networks'].split(','):
        try:
            if cidr:
                MANAGED_NETWORKS.append(iptools.IpRange(cidr.strip()))
        except Exception as e:
            logging.error("invalid managed network {}: {}".format(
                cidr, str(e)))

    # are we running as a daemon?
    if args:
        DAEMON_MODE = args.daemon

    # initialize other systems
    initialize_remediation_system_manager()
    initialize_message_system()

    logging.debug("SAQ initialized")
コード例 #20
0
import iptools
import sys
inputfile = sys.argv[1]
outputfile = sys.argv[2]
inputcidr = open(inputfile, "r")
outputips = open(outputfile, "a")
for cidr in inputcidr:
    iplist = iptools.IpRange(cidr)
    for ip in iplist:
        print(ip)
        outputips.write(ip + "\n")
inputcidr.close()
outputips.close()
inputcount = len(open(inputfile).readlines())
print("number of CIDR = " + str(inputcount))
outputcount = len(open(outputfile).readlines())
print("number of output IPS = " + str(outputcount))
コード例 #21
0
 def address_bin(self):
     address_int = iptools.IpRange(self.address).startIp
     return bin(address_int)
コード例 #22
0
ファイル: snippet.py プロジェクト: szabo92/gistable
def close_stream(host, stream):
    """Bookkeeping: delete closed hosts from the dict"""
    lock.acquire()
    try:
        del hosts[host]
    except:
        pass
    lock.release()
    countinc()


# IpRanges to check if we generated an valid IP
# Tip: Use 3to2-3.x to convert the iptools module

classA = iptools.IpRange('1.0.0.0', '126.0.0.0')
classB = iptools.IpRange('128.0.0.0', '191.0.0.0')
classC = iptools.IpRange('192.0.0.0', '223.0.0.0')
privA = iptools.IpRange('10.0.0.0', '10.255.255.255')
privB = iptools.IpRange('172.16.0.0', '172.31.255.255')
privC = iptools.IpRange('192.168.0.0', '192.168.255.255')
max_ip = 2**32


# In python3 this is so much nicer to write!
def int_to_ip(ip):
    """Convert 32-bit integer to an IP"""
    return socket.inet_ntoa(ip.to_bytes(4, 'big'))


def ip_ranges():
コード例 #23
0
ファイル: main.py プロジェクト: robinlennox/geo-ip
#!/usr/bin/python

## -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
## Import Python Libs
## -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-
import os
import geo
import json
import iptools

## Get IP Range
IPRANGE = os.environ.get('IPRANGE', '52.0.0.0/30')

## Private IP Addresses
private = iptools.IpRangeList(
    '0.0.0.0/8', '10.0.0.0/8', '100.64.0.0/10', '127.0.0.0/8',
    '169.254.0.0/16', '172.16.0.0/12', '192.0.0.0/24', '192.0.2.0/24',
    '192.88.99.0/24', '192.168.0.0/16', '198.18.0.0/15', '198.51.100.0/24',
    '203.0.113.0/24', '224.0.0.0/4', '240.0.0.0/4', '255.255.255.255/32')

## Print Found Valid Public IPs
ips = iptools.IpRange(IPRANGE).__iter__()
while True:
    try:
        ip = ips.next()
    except:
        break
    if ip not in private: print json.dumps(geo.lookup(ip))
コード例 #24
0
ファイル: cloudium.py プロジェクト: june5079/cloudium
    def ipExtract(self, ipClass):
        # Extract specific ip addrs from IP Class
        self.IPV4 = ipClass
        self.tryipList = iptools.IpRange(self.IPV4)

        return self.tryipList
コード例 #25
0
def main():
    print('\nFor finding SSL/TLS certificates in a IP range.\n')
    PARSER = argparse.ArgumentParser(
        description="""This program scans a range of IPs and attempts
    to read hostname information from the SSL/TLS certificates""")
    PARSER.add_argument('-r',
                        '--range',
                        help='IP range(ex:1.1/16,1.1.1/24)',
                        required=False)
    PARSER.add_argument('-s',
                        '--start',
                        help='IP or start of IP range',
                        required=False)
    PARSER.add_argument('-e', '--end', help='End of IP range', required=False)
    PARSER.add_argument('-t',
                        '--timeout',
                        help='Socket timeout (default:2)',
                        required=False)
    PARSER.add_argument('-p',
                        '--port',
                        help='SSL/TLS port (default:443)',
                        required=False)
    PARSER.add_argument('-m',
                        '--maxscanprocess',
                        help='SSL/TLS port (default:443)',
                        required=False)
    ARGS = PARSER.parse_args()

    # if ARGS.range is None:
    #     if ARGS.start is None:
    #         Usage()
    #         sys.exit(2)
    if ARGS.range is not None and ARGS.start is not None:
        print(
            'The start address of IP and IP range cannot be used concurrently.'
        )
        print('')
        Usage()
        sys.exit(2)

    # if ARGS.range is not None:
    #     IPLIST = iptools.IpRange(ARGS.range)
    # # else:
    # #     if ARGS.end is None:
    # #         ARGS.end = ARGS.start
    # #     IPLIST = iptools.IpRange(ARGS.start, ARGS.end)
    # Amazon_IPs_List = prefixes()
    # RemoveDup_Amazon_IPs_List = list(set(Amazon_IPs_List))
    # RemoveDup_Amazon_IPs_List.sort()

    if ARGS.maxscanprocess is None:
        Max_Do_Process_Cnt = 200
        Max_Log_Process_Cnt = 2
    else:
        if (int(ARGS.maxscanprocess) > 1024):
            Max_Do_Process_Cnt = 1024
        else:
            Max_Do_Process_Cnt = int(ARGS.maxscanprocess)

        if (Max_Do_Process_Cnt / 50 <= 4):
            Max_Log_Process_Cnt = 4
        else:
            Max_Log_Process_Cnt = Max_Do_Process_Cnt / 50

    if ARGS.timeout is None:
        ARGS.timeout = 1
    if ARGS.port is None:
        ARGS.port = 443

    sys.stdout = Unbuffered(sys.stdout)

    queueA = Queue(
        2048)  #needs to be a bit bigger than actual. 3x works well for me
    queueB = Queue(2048)

    procsA = start_procs(Max_Do_Process_Cnt, processA,
                         (queueA, queueB, ARGS.timeout, ARGS.port))
    procsB = start_procs(Max_Log_Process_Cnt, processB, (queueB, None))

    IPV4 = "8.35.192.0/21"
    IPLIST = iptools.IpRange(IPV4)
    for ip_addr in IPLIST:
        queueA.put(ip_addr)
        print(ip_addr)

    # for IPv4_Class in RemoveDup_Amazon_IPs_List:
    #  	IPLIST = iptools.IpRange(IPv4_Class)
    #     print (IPv4_Class)
    #     [queueA.put(ip_addrs) for ip_addrs in IPLIST]
    #     # print ("[+] NEXT" + ip_addrs)

    # scan_start = IPLIST.__getitem__(0)
    # scan_end = IPLIST.__getitem__(IPLIST.__len__()-1)
    # print "\nScanning %s IPs from %s to %s...\n" % (len(IPLIST), scan_start, scan_end)
    # print 'Scan Process Nums : ' + str(Max_Do_Process_Cnt) + ', Log Proc Nums : ' + str(Max_Log_Process_Cnt)
    # print ''

    shutdown_process(procsA, queueA)
    shutdown_process(procsB, queueB)
コード例 #26
0
ファイル: __init__.py プロジェクト: code4days/ACE
def initialize(saq_home=None, config_paths=None, logging_config_path=None, args=None, relative_dir=None):

    from saq.database import initialize_database

    global SAQ_HOME
    global SAQ_NODE
    global SAQ_RELATIVE_DIR
    global CONFIG
    global CONFIG_PATHS
    global SINGLE_THREADED
    global DEFAULT_ENCODING
    global SEMAPHORES_ENABLED
    global MANAGED_NETWORKS
    global FORCED_ALERTS
    global LOG_LEVEL
    global DAEMON_MODE
    global CA_CHAIN_PATH
    global INSTANCE_TYPE
    global GLOBAL_SLA_SETTINGS
    global EXCLUDED_SLA_ALERT_TYPES
    global STATS_DIR
    global MODULE_STATS_DIR
    global YSS_BASE_DIR
    global YSS_SOCKET_DIR

    # go ahead and try to figure out what text encoding we're using
    DEFAULT_ENCODING = locale.getpreferredencoding()

    # do we want to force alerts?
    if args:
        FORCED_ALERTS = args.force_alerts

    # do we want to run in single threaded mode?
    if args:
        SINGLE_THREADED = args.single_threaded

    # what is the root directory of the entire system?
    if saq_home is not None:
        SAQ_HOME = saq_home
    elif 'SAQ_HOME' in os.environ:
        SAQ_HOME = os.environ['SAQ_HOME']
    else:
        SAQ_HOME = '.'

    if not os.path.isdir(SAQ_HOME):
        sys.stderr.write("invalid root SAQ directory {0}\n".format(SAQ_HOME)) 
        sys.exit(1)

    # XXX not sure we need this SAQ_RELATIVE_DIR anymore -- check it out
    # this system was originally designed to run out of /opt/saq
    # later we modified to run out of anywhere for command line correlation
    # when running the GUI in apache you have no control over the current working directory
    # so we specify what directory we'd *want* to be running out of here (even if we're not actually)
    # this only matters when loading alerts
    # this defaults to the current working directory
    SAQ_RELATIVE_DIR = os.getcwd()
    if relative_dir:
        SAQ_RELATIVE_DIR = relative_dir

    # load configuration file
    # defaults to $SAQ_HOME/etc/saq.ini
    if args:
        if args.config_paths:
            config_paths = args.config_paths

    if config_paths is None:
        config_paths = []
    
    # make each relative config path absolute to SAQ_HOME
    CONFIG_PATHS = [os.path.join(SAQ_HOME, p) if not os.path.isabs(p) else p for p in config_paths]

    # add any config files specified in SAQ_CONFIG_PATHS env var (command separated)
    #sys.stderr.write("SAQ_CONFIG_PATHS = {}\n".format(os.environ['SAQ_CONFIG_PATHS']))
    if 'SAQ_CONFIG_PATHS' in os.environ:
        for config_path in os.environ['SAQ_CONFIG_PATHS'].split(','):
            config_path = config_path.strip()
            if not os.path.isabs(config_path):
                config_path = os.path.join(SAQ_HOME, config_path)
            if not os.path.exists(config_path):
                sys.stderr.write("WARNING: config path {} specified in SAQ_CONFIG_PATHS env var does not exist\n".format(config_path))
            else:
                if config_path not in CONFIG_PATHS:
                    CONFIG_PATHS.append(config_path)

    # if $SAQ_HOME/etc/saq.ini exists then we use that as the last config if it's not already specified
    default_config_path = os.path.join(SAQ_HOME, 'etc', 'saq.ini')
    if os.path.exists(default_config_path):
        if default_config_path not in CONFIG_PATHS:
            CONFIG_PATHS.append(default_config_path)

    try:
        load_configuration()
    except Exception as e:
        sys.stderr.write("ERROR: unable to load configuration: {0}".format(str(e)))
        sys.exit(1)

    # user specified log level
    LOG_LEVEL = logging.INFO
    if args:
        if args.log_level:
            LOG_LEVEL = args.log_level

    # make sure the logs directory exists
    logs_dir = os.path.join(SAQ_HOME, LOG_DIRECTORY)
    if not os.path.exists(logs_dir):
        try:
            os.mkdir(logs_dir)
        except Exception as e:
            sys.stderr.write("unable to mkdir {}: {}\n".format(logs_dir, e))
            sys.exit(1)

    # by default we log to the console
    if logging_config_path is None:
        logging_config_path = os.path.join(SAQ_HOME, 'etc', 'console_logging.ini')

    # we can override this on the command line
    # this is what we use for production engine settings
    if args:
        if args.logging_config_path:
            logging_config_path = args.logging_config_path
    
    # we can re-initialize later if we have to
    try:
        initialize_logging(logging_config_path) # this log file just gets some startup information
    except Exception as e:
        sys.exit(1)

    # load global SLA settings
    GLOBAL_SLA_SETTINGS = SLA(None, 
                              CONFIG['SLA'].getboolean('enabled'),
                              CONFIG['SLA'].getint('time_to_dispo'),
                              CONFIG['SLA'].getint('approaching_warn'),
                              None, None)

    EXCLUDED_SLA_ALERT_TYPES = [x.strip() for x in CONFIG['SLA']['excluded_alert_types'].split(',')]

    # load all the other SLA settings
    for section in [s for s in CONFIG.keys() if s.startswith('SLA_')]:
        logging.debug("loading {}".format(section))
        OTHER_SLA_SETTINGS.append(SLA(section[len('SLA_'):],
                                      CONFIG[section].getboolean('enabled'),
                                      CONFIG[section].getint('time_to_dispo'),
                                      CONFIG[section].getint('approaching_warn'),
                                      CONFIG[section]['property'],
                                      CONFIG[section]['value']))

    # what node is this?
    try:
        SAQ_NODE = CONFIG['global']['node']
        logging.debug("node {}".format(SAQ_NODE))
    except Exception as e:
        sys.stderr.write("unable to get hostname: {}\n".format(e))
        sys.exit(1)

    # what type of instance is this?
    if 'instance_type' in CONFIG['global']:
        INSTANCE_TYPE = CONFIG['global']['instance_type']
        if INSTANCE_TYPE not in [ INSTANCE_TYPE_PRODUCTION, INSTANCE_TYPE_QA, INSTANCE_TYPE_DEV ]:
            logging.warning("invalid instance type {}: defaulting to {}".format(INSTANCE_TYPE, INSTANCE_TYPE_PRODUCTION))
            INSTANCE_TYPE = INSTANCE_TYPE_PRODUCTION
    else:
        logging.warning("missing configuration instance_type in global section (defaulting to instance type {})".format(INSTANCE_TYPE_PRODUCTION))

    if FORCED_ALERTS: # lol
        logging.warning(" ****************************************************************** ")
        logging.warning(" ****************************************************************** ")
        logging.warning(" **** WARNING **** ALL ANALYSIS RESULTS IN ALERTS **** WARNING **** ")
        logging.warning(" ****************************************************************** ")
        logging.warning(" ****************************************************************** ")

    # we can globally disable semaphores with this flag
    SEMAPHORES_ENABLED = CONFIG.getboolean('global', 'enable_semaphores')

    # log all SQL commands if we are running in debug mode
    if CONFIG['global'].getboolean('log_sql'):
        logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)

    # some settings can be set to PROMPT
    for section in CONFIG.sections():
        for (name, value) in CONFIG.items(section):
            if value == 'PROMPT':
                CONFIG.set(section, name, getpass("Enter the value for {0}:{1}: ".format(section, name)))

    # make sure we've got the ca chain for SSL certs
    CA_CHAIN_PATH = os.path.join(SAQ_HOME, CONFIG['SSL']['ca_chain_path'])

    # set the location we'll be running yss out of
    YSS_BASE_DIR = os.path.join(SAQ_HOME, CONFIG['yara']['yss_base_dir'])
    if not os.path.exists(YSS_BASE_DIR):
        logging.critical("[yara][yss_base_dir] is set to {} but does not exist".format(YSS_BASE_DIR))

    YSS_SOCKET_DIR = os.path.join(YSS_BASE_DIR, CONFIG['yara']['yss_socket_dir'])

    # initialize the database connection
    initialize_database()

    # initialize fallback semaphores
    initialize_fallback_semaphores()

    try:
        maliciousdir = CONFIG.get("global", "malicious")
    except:
        maliciousdir = "malicious"

    STATS_DIR = os.path.join(SAQ_HOME, 'stats')
    MODULE_STATS_DIR = os.path.join(STATS_DIR, 'modules')

    # make sure some key directories exists
    for dir_path in [ 
        os.path.join(SAQ_HOME, CONFIG['global']['data_dir'], CONFIG['global']['node']),
        os.path.join(SAQ_HOME, 'var', 'locks'),
        os.path.join(SAQ_HOME, 'var', 'incoming'),
        os.path.join(SAQ_HOME, 'review', 'rfc822'),
        os.path.join(SAQ_HOME, 'review', 'misc'),
        STATS_DIR,
        MODULE_STATS_DIR,
        os.path.join(SAQ_HOME, 'stats', 'brocess'),
        os.path.join(SAQ_HOME, 'stats', 'metrics'),
        os.path.join(SAQ_HOME, CONFIG['splunk_logging']['splunk_log_dir']),
        os.path.join(SAQ_HOME, CONFIG['elk_logging']['elk_log_dir']),
        os.path.join(SAQ_HOME, CONFIG['global']['tmp_dir']),
        os.path.join(SAQ_HOME, CONFIG['yara']['yss_base_dir'], 'logs'),
        os.path.join(SAQ_HOME, maliciousdir) ]:
        try:
            if not os.path.isdir(dir_path):
                os.makedirs(dir_path)
        except Exception as e:
            logging.error("unable to create required directory {}: {}".format(dir_path, str(e)))
            sys.exit(1)

    # make sure the collection directory for each enabled engine exists
    for section in CONFIG.keys():
        if section.startswith('engine_'):
            engine_config = CONFIG[section]
            if 'collection_dir' in engine_config:
                collection_dir = os.path.join(SAQ_HOME, engine_config['collection_dir'])
                if not os.path.isdir(collection_dir):
                    logging.info("creating collection directory {} for {}".format(collection_dir, section))
                    try:
                        os.makedirs(collection_dir)
                    except Exception as e:
                        logging.error("unable to create directory {}: {}".format(collection_dir, e))
                        sys.exit(1)

    # clear out any proxy environment variables if they exist
    for proxy_key in [ 'http_proxy', 'https_proxy', 'ftp_proxy' ]:
        if proxy_key in os.environ:
            logging.debug("removing proxy environment variable for {}".format(proxy_key))
            del os.environ[proxy_key]

    # set up the PROXY global dict (to be used with the requests library)
    for proxy_key in [ 'http', 'https' ]:
        if CONFIG['proxy']['host'] and CONFIG['proxy']['port'] and CONFIG['proxy']['transport']:
            if CONFIG['proxy']['user'] and CONFIG['proxy']['password']:
                PROXIES[proxy_key] = '{}://{}:{}@{}:{}'.format(CONFIG['proxy']['transport'], CONFIG['proxy']['user'], 
                CONFIG['proxy']['password'], CONFIG['proxy']['host'], CONFIG['proxy']['port'])
            else:
                PROXIES[proxy_key] = '{}://{}:{}'.format(CONFIG['proxy']['transport'], CONFIG['proxy']['host'], CONFIG['proxy']['port'])
            logging.debug("proxy for {} set to {}".format(proxy_key, PROXIES[proxy_key]))

    # load any additional proxies specified in the config sections proxy_*
    for section in CONFIG.keys():
        if section.startswith('proxy_'):
            proxy_name = section[len('proxy_'):]
            OTHER_PROXIES[proxy_name] = {}
            for proxy_key in [ 'http', 'https' ]:
                if CONFIG[section]['host'] and CONFIG[section]['port'] and CONFIG[section]['transport']:
                    if 'user' in CONFIG[section] and 'password' in CONFIG[section] \
                    and CONFIG[section]['user'] and CONFIG[section]['password']:
                        OTHER_PROXIES[proxy_name][proxy_key] = '{}://{}:{}@{}:{}'.format(
                        CONFIG[section]['transport'], CONFIG[section]['user'], CONFIG[section]['password'], 
                        CONFIG[section]['host'], CONFIG[section]['port'])
                    else:
                        OTHER_PROXIES[proxy_name][proxy_key] = '{}://{}:{}'.format(
                        CONFIG[section]['transport'], CONFIG[section]['host'], CONFIG[section]['port'])

    # load global constants
    import iptools
    
    MANAGED_NETWORKS = []
    for cidr in CONFIG['network_configuration']['managed_networks'].split(','):
        try:
            if cidr:
                MANAGED_NETWORKS.append(iptools.IpRange(cidr.strip()))
        except Exception as e:
            logging.error("invalid managed network {}: {}".format(cidr, str(e)))

    # are we running as a daemon?
    if args:
        DAEMON_MODE = args.daemon

    logging.debug("SAQ initialized")
コード例 #27
0
def Malicious(pop_size, scan_rate, susc_size, inf, code, count):
    """
    Function Malicious takes population size, scanning rate, susceptible proportion, number of initial infected nodes, attack type, 
    and number of infections per iteration as input to generate netflows in .csv and .txt format. 
    
    Args : 
        pop_size (float)  : population size of scanning worm
        scan_rate (float) : scanning rate of computer worm
        susc_prop (float) : susceptible proportion of computer worm
        inf (float)       : number of initial infected hosts
        code (int)        : attack type
        count (int)       : time variable
        
    """
    ### Variable : Initialization ###
    time = int(count) * 5  # Duration of Malicious Traffic #

    ### Filename ###
    if (code == 1):
        name = "DDoS"
    elif (code == 2):
        name = "Horiz"
    elif (code == 3):
        name = "Vert"
    elif (code == 4):
        name = "FIN"

    ### .csv ###
    Malicious_filename_1 = "[" + str(int(pop_size)) + "_" + str(
        int(scan_rate)) + "_" + str(int(susc_size)) + "_" + str(
            int(code)) + name + "_" + str(time) + "]" + ".csv"
    open(Malicious_filename_1, 'w').close()

    ### .txt ###
    Malicious_filename_2 = "[" + str(int(pop_size)) + "_" + str(
        int(scan_rate)) + "_" + str(int(susc_size)) + "_" + str(
            int(code)) + name + "_" + str(time) + "]" + ".txt"
    open(Malicious_filename_2, 'w').close()

    ### Header ###
    Header(Malicious_filename_1, 1)
    Header(Malicious_filename_2, 2)

    ### File Operation : Open ###
    f0 = open(Time_filename, 'r')  # Time File      #
    f1 = open(Malicious_filename_1, 'a')  # Malicious File #
    f2 = open(Malicious_filename_2, 'a')  # Malicious File #

    ### Variable : Initialization ###
    port = 0  # Port Counter                  #
    ik_t = 0  # Temp Variable (Unusual Flags) #
    st_t = 0  # Temp Variable (Port Scan)     #
    n = int(code)  # Code                          #

    ### IP Address Range ###
    ip_internal = iptools.IpRange(
        '10.0.0.0', '10.0.255.255').__iter__()  # Internal : IP Address Space #
    ip_internal2 = iptools.IpRange(
        '10.0.0.0', '10.0.255.255').__iter__()  # Internal : IP Address Space #

    ### Display ###
    print(Malicious_filename_1)
    print(Malicious_filename_2)

    ### Loop : For line in f0 ###
    for line in f0:

        ### Data Extraction ###
        ip, time, unix_t = line.split(",")  # IP Address and Time #
        time = time.strip()  # Time Strip function #
        Unix_time = unix_t.strip()  # Unix Strip Function #
        #fmt               = '%Y-%m-%d %H:%M:%S.%f'
        #time              = datetime.datetime.strptime ( time , fmt )

        ### Protocols ###
        Src_ip = ip.strip()  # Source IP Address      #
        TCP_protocol = '6'  # TCP Protocol           #
        Dest_ip = '10.0.100.1'  # Destination IP Address #
        Packets = int(1)  # Number of Packets      #
        Bytes = random.randint(41, 43)  # Number of Bytes        #
        Dt = time  # Time in format         #
        Src_port = random.randint(50000, 65000)  # Source Port            #
        Lk = [8080, 80]  # List                   #
        Dest_port = random.choice(Lk)  # Destination Port       #
        Remaining_flags = '      '  # Remaining Flags        #
        Duration = random.randint(300, 305)  # Duration               #

        ### Random Number Generator ###
        r = random.randint(0, 9)  # Random Integer         #

        ### Conditional Statements : if ###
        if (n == 1):
            f1.write(
                str(Unix_time) + "," + str(Duration) + "," + str(Packets) +
                "," + str(Src_ip) + "," + str(Dest_ip) + "," + str(Src_port) +
                "," + str(Dest_port) + "," + str(TCP_protocol) + "," +
                "    S " + "\n")
            f2.write(
                str("in") + str("|") + str(TCP_protocol) + str("|") +
                str(Src_ip) + str("|") + str(Dest_ip) + str("|") +
                str(Src_port) + str("|") + str(Dest_port) + str("|") +
                str(Packets) + str("|") + str(Bytes) + str("|") + str(Dt) +
                str("|") + str("0.002") + str("|") + str("    S ") + str("|") +
                str(Remaining_flags) + "\n")
            if (r == 9):
                f1.write(
                    str(Unix_time) + "," + str(Duration) + "," + str(Packets) +
                    "," + str(Dest_ip) + "," + str(Src_ip) + "," +
                    str(Dest_port) + "," + str(Src_port) + "," +
                    str(TCP_protocol) + "," + " A  S " + "\n")
                f2.write(
                    str("out") + str("|") + str(TCP_protocol) + str("|") +
                    str(Dest_ip) + str("|") + str(Src_ip) + str("|") +
                    str(Dest_port) + str("|") + str(Src_port) + str("|") +
                    str(Packets) + str("|") + str(Bytes) + str("|") + str(Dt) +
                    str("|") + str("0.002") + str("|") + str(" A  S ") +
                    str("|") + str(Remaining_flags) + "\n")

        elif (n == 2):
            if (port < 65536):

                ### Variable : Initialization ###
                Dest_ip = '10.0.100.9'  # Destination IP Address #
                Dest_port = port  # Port Number            #

                ### File Operation : Write ###
                f1.write(
                    str("in") + str(",") + str(TCP_protocol) + str(",") +
                    str(Src_ip) + str(",") + str(Dest_ip) + str(",") +
                    str(Src_port) + str(",") + str(Dest_port) + str(",") +
                    str(Packets) + str(",") + str(Bytes) + str(",") + str(Dt) +
                    str(",") + str("0.002") + str(",") + str('    S ') +
                    str(Remaining_flags) + "\n")
                if (r == 9):
                    f1.write(
                        str("out") + str(",") + str(TCP_protocol) + str(",") +
                        str(Dest_ip) + str(",") + str(Src_ip) + str(",") +
                        str(Dest_port) + str(",") + str(Src_port) + str(",") +
                        str(Packets) + str(",") + str(Bytes) + str(",") +
                        str(Dt) + str(",") + str("0.002") + str(",") +
                        str(' A    ') + str(Remaining_flags) + "\n")
                    f1.write(
                        str("in") + str(",") + str(TCP_protocol) + str(",") +
                        str(Src_ip) + str(",") + str(Dest_ip) + str(",") +
                        str(Src_port) + str(",") + str(Dest_port) + str(",") +
                        str(Packets) + str(",") + str(Bytes) + str(",") +
                        str(Dt) + str(",") + str("0.002") + str(",") +
                        str('   R  ') + str(Remaining_flags) + "\n")

                ### Increment : port ###
                port += 1  # Port Number #

        ### Try Statement ###
        elif (n == 3):
            try:

                ### If Statement ###
                if (next(ip_internal).strip() != '10.0.255.255'):

                    ### Variables ###
                    Dest_ip_1 = next(ip_internal)

                    ### File Operation : Write ###
                    f1.write(
                        str("in") + str(",") + str(TCP_protocol) + str(",") +
                        str(Src_ip) + str(",") + str(Dest_ip_1) + str(",") +
                        str(Src_port) + str(",") + str(Dest_port) + str(",") +
                        str(Packets) + str(",") + str(Bytes) + str(",") +
                        str(Dt) + str(",") + str("0.002") + str(",") +
                        str('    S ') + "\n")
                    if (r == 9):
                        f1.write(
                            str("out") + str(",") + str(TCP_protocol) +
                            str(",") + str(Dest_ip_1) + str(",") +
                            str(Src_ip) + str(",") + str(Dest_port) +
                            str(",") + str(Src_port) + str(",") +
                            str(Packets) + str(",") + str(Bytes) + str(",") +
                            str(Dt) + str(",") + str("0.002") + str(",") +
                            str(' A    ') + "\n")
                        f1.write(
                            str("in") + str(",") + str(TCP_protocol) +
                            str(",") + str(Src_ip) + str(",") +
                            str(Dest_ip_1) + str(",") + str(Src_port) +
                            str(",") + str(Dest_port) + str(",") +
                            str(Packets) + str(",") + str(Bytes) + str(",") +
                            str(Dt) + str(",") + str("0.002") + str(",") +
                            str('   R  ') + "\n")

            ### Exception Handling ###
            except StopIteration:
                st_t += 1  # Increment : Temp Variable (Horiz. Scan) #

        elif (n == 4):
            try:

                ### If Statement ###
                if (next(ip_internal2).strip() != '10.0.255.255'):

                    ### Variable : Update ###
                    Dest_ip_2 = next(ip_internal2)  # Destination IP Address #

                    ### File Operation : Write ###
                    f1.write(
                        str("in") + str(",") + str(TCP_protocol) + str(",") +
                        str(Src_ip) + str(",") + str(Dest_ip_2) + str(",") +
                        str(Src_port) + str(",") + str(Dest_port) + str(",") +
                        str(Packets) + str(",") + str(Bytes) + str(",") +
                        str(Dt) + str(",") + str("0.002") + str(",") +
                        str('     F') + "\n")
                    if (r != 9):
                        f1.write(
                            str("out") + str(",") + str(TCP_protocol) +
                            str(",") + str(Dest_ip_2) + str(",") +
                            str(Src_ip) + str(",") + str(Dest_port) +
                            str(",") + str(Src_port) + str(",") +
                            str(Packets) + str(",") + str(Bytes) + str(",") +
                            str(Dt) + str(",") + str("0.002") + str(",") +
                            str('   R  ') + "\n")

            ### Exception Handling ###
            except StopIteration:
                st_t += 1  # Increment Temp Variable (FIN Scan) #

        elif (n == 5):
            if (ik_t < 1000):

                ### Variable : Initialization ###
                flag = random.choice(perms)  # Flag (Unusual Flags) #

                ### File Operation : Write ###
                f1.write(
                    str("in") + str(",") + str(TCP_protocol) + str(",") +
                    str(Src_ip) + str(",") + str(Dest_ip_2) + str(",") +
                    str(Src_port) + str(",") + str(Dest_port) + str(",") +
                    str(Packets) + str(",") + str(Bytes) + str(",") + str(Dt) +
                    str(",") + str("0.002") + str(",") + str(flag) + "\n")

                ### Increment : ik_t ###
                ik_t += 1  # Increment Temp Variable (Unusual Flag) #
コード例 #28
0
    parser.add_argument("-t",
                        "--target",
                        help="Restricted URL (target)",
                        required=True)
    parser.add_argument("-cl",
                        "--badcl",
                        help="Restricted URL Content Length",
                        required=True)
    parser.add_argument("-r",
                        "--range",
                        help="IP range i.e. 0.0.0.0-255.255.255.255",
                        required=True)
    args = parser.parse_args()

    ip_start, ip_end = args.range.split("-")
    ip_range = iptools.IpRange(ip_start, ip_end)

    for ip in tqdm(ip_range, leave=True):
        try:
            ip_list = ("{0}, ".format(ip) * 50)[:-2]
            x_forwarded_for_header = {"X-Forwarded-For": ip_list}
            response = requests.get(args.target,
                                    headers=x_forwarded_for_header)
            if response.headers['content-length'] > args.badcl:
                print("")
                info("Access granted with header: \n{0}".format(
                    x_forwarded_for_header))
                break
        except KeyError:
            error(
                "No Content-Length header contained in request to {0}".format(
コード例 #29
0
def parse(args, pname, pdescription, rootpath, parser=None,
          ips=None, nthreads=None, timeout=None,
          thosts=None, port=None, users=None, passwords=None):
    if parser is None:
        parser = argparse.ArgumentParser(
            prog=pname,
            description=pdescription)
    if not ips is None:
        parser.add_argument("-i",
                            dest='fn_ip',
                            help="File with a newline seperated list of IPs," +
                            " e.g. -i ips.txt")
        parser.add_argument("-I",
                            dest='ip',
                            help="IP address to resolve" +
                            ", e.g. -I 8.8.8.8")
        parser.add_argument("-r",
                            dest='ip_range',
                            help="Range of IPs to resolve" +
                            ", e.g. -r 82.165.197.0/24")
    if not nthreads is None:
        parser.add_argument("-t",
                            dest='nthreads',
                            help="amount of threads to scan with (default 400)",
                            type=int,
                            default=400)
    if not timeout is None:
        parser.add_argument('-to',
                            dest='timeout',
                            help='timeout (default 3s)',
                            type=int,
                            default=3
                            )
    if not thosts is None:
        parser.add_argument('-s',
                            help='file containing ip[:port] entries',
                            dest='fn_thost'
                            )
        parser.add_argument('-S',
                            help='target ip[:port]',
                            dest='thost',
                            )
    if not port is None:
        parser.add_argument('-P',
                            help='target port',
                            dest='port',
                            default=21,
                            type=int
                            )
    if not users is None:
        parser.add_argument('-u',
                            help='file containing username per line',
                            dest='fn_users',
                            )
    if not passwords is None:
        parser.add_argument('-p',
                            help='file containing password per line',
                            dest='fn_passwords',
                            )

    opts = parser.parse_args(args)
    if not ips is None:
        if opts.fn_ip:
            fn_ip = os.path.join(rootpath, opts.fn_ip)
            with open(fn_ip, 'r') as f:
                ips.extend(f.read().splitlines())
        if opts.ip:
            ips.append(opts.ip)
        if opts.ip_range:
            for ip in iptools.IpRange(opts.ip_range):
                ips.append(ip)
    if not nthreads is None:
        nthreads.append(opts.nthreads)
    if not timeout is None:
        timeout.append(opts.timeout)
    if not port is None:
        port.append(opts.port)
    # TODO: convert port to int
    if not thosts is None:
        if opts.fn_thost:
            fn_thost = os.path.join(rootpath, opts.fn_thost)
            with open(fn_thost, 'r') as f:
                for th in f.read().splitlines():
                    if th.find(':') > 0:
                        thosts.append(tuple(th.split(':')))
                    else:
                        thosts.append((th, port[0]))
        if opts.thost:
            if opts.thost.find(':') > 0:
                thosts.append(tuple(opts.thost.split(':')))
            else:
                thosts.append((opts.thost, port[0]))
    if not users is None:
        fn_users = os.path.join(rootpath, opts.fn_users)
        with open(fn_users, 'r') as f:
            users.extend(f.read().splitlines())
    if not passwords is None:
        fn_passwords = os.path.join(rootpath, opts.fn_passwords)
        with open(fn_passwords, 'r') as f:
            passwords.extend(f.read().splitlines())

    return opts
コード例 #30
0
def Generator(pop_size, scan_rate, susc_size, inf, code):
    """
    Function Generator genereates the malicious datasets by initially calling function Curve and then function Malicious.
    
    Args : 
        pop_size (float) : Population Size of Scanning Worm 
        scan_rate (float) : Scanning Rate of Computer Worm 
        susc_size (float) : Susceptible Size o Computer Worm 
        inf (int) : Number of initial infected hosts
        code (int) : Attack Type 
    
    """

    ### Input ###
    pop_size = float(pop_size)  # Population Size                  #
    scan_rate = float(scan_rate)  # Scanning Rate                    #
    susc_size = float(susc_size)  # Susceptible Size                 #
    inf = float(inf)  # Number of Initial Infected nodes #
    code = int(code)  # Code                             #

    ### Data Generation ###
    inf, res, count = Curve(pop_size, scan_rate, susc_size,
                            inf)  # Curve                    #
    r = iptools.IpRange('10.0/16')  # IP Space : Address Space #

    ### Data Generation : Random Infected IP Address (Outside NIST) ###
    z = []
    i = 0

    ### Loop : While i is less than susc_size ###
    while (i < susc_size):

        ### Random IP Address Generation ###
        p = socket.inet_ntoa(struct.pack('>I', random.randint(
            1, 0xffffffff)))  # Random IP Address #
        p = p.strip()

        ### Insert into List ###
        if (p not in r):
            z.append(p)

            ### Increment : i ###
            i += 1

    ### Loop : While i is less than count ###
    i = 0
    while (i < count):

        ### Global Variable ###
        global time_t

        ### Conversion to Microseconds ###
        temp = 300000000 / res[i]
        time_t = time_t + [temp]

        ### Increment : i ###
        i += 1

    ### Variable : Initialization ###
    b_time = datetime.datetime.now()  # Time #
    unixtime = time.mktime(b_time.timetuple())  # unixtime #

    ### File Operations : Open ###
    open(Time_filename, 'w').close()
    f = open(Time_filename, 'a')

    ### Loop : While i is less than count ###
    i = 0
    k = 0
    while (i < count):

        ### Loop : While j is less than res[i] ###
        j = 0
        while (j < res[i]):

            ### Data : Format ###
            Dt = b_time.strftime('%Y/%m/%dT%H:%M:%S')

            ### File Operation : Write #
            f.write(str(z[k]) + "," + str(Dt) + "," + str(unixtime))
            f.write("\n")

            ### Update : time ###
            b_time = b_time + datetime.timedelta(microseconds=time_t[i])
            b_time = b_time.strftime("%Y-%m-%d %H:%M:%S.%f")
            b_time = datetime.datetime.strptime(b_time, "%Y-%m-%d %H:%M:%S.%f")
            unixtime = time.mktime(b_time.timetuple())

            ### Increment : k & j ###
            k += 1
            j += 1

        ### Increment : i ###
        i += 1

    print("Done!!")
    ### Malicious Traffic ###
    Malicious(pop_size, scan_rate, susc_size, inf, code, count)