示例#1
0
def discover_ipv6_enum(_, interval, queue):
    """enqueues ranged derived from storage registered ipv6 addresses"""

    if not should_run('discover_ipv6_enum', timeparse(interval)):
        return

    queue = Queue.query.filter(Queue.name == queue).one()
    targets = set()
    query = Host.query.filter(func.family(Host.address) == 6).order_by(Host.address)
    for host in query.all():
        exploded = IPv6Address(host.address).exploded
        # do not enumerate EUI-64 hosts/nets
        if exploded[27:32] == 'ff:fe':
            continue

        exploded = exploded.split(':')
        exploded[-1] = '0-ffff'
        target = ':'.join(exploded)

        targets.add(target)

    targets = filter_already_queued(queue, targets)
    queue_enqueue(queue, targets)

    if targets:
        current_app.logger.info(f'discover_ipv6_enum, queued {len(targets)} items')
    update_lastrun('discover_ipv6_enum')
示例#2
0
def rescan_services(_, interval, queue):
    """rescan services from storage; update known services info"""

    qref = Queue.query.filter(Queue.name == queue).one()

    now = datetime.utcnow()
    rescan_horizont = now - timedelta(seconds=timeparse(interval))
    query = Service.query.filter(or_(Service.rescan_time < rescan_horizont, Service.rescan_time == None))  # noqa: E501,E711  pylint: disable=singleton-comparison

    rescan, ids = [], []
    for service in windowed_query(query, Service.id):
        item = f'{service.proto}://{format_host_address(service.host.address)}:{service.port}'
        rescan.append(item)
        ids.append(service.id)
    # orm is bypassed for performance reasons in case of large rescans
    update_statement = Service.__table__.update().where(Service.id.in_(ids)).values(rescan_time=now)
    db.session.execute(update_statement)
    db.session.commit()
    db.session.expire_all()

    rescan = filter_already_queued(qref, rescan)
    queue_enqueue(qref, rescan)

    if rescan:
        current_app.logger.info(f'rescan_services, rescan {len(rescan)} items')
示例#3
0
def enqueue(ctx, queue):
    """enqueue to queue from context data"""

    if ctx.data:
        current_app.logger.info(f'enqueue {len(ctx.data)} targets to "{queue}"')
        queue = Queue.query.filter(Queue.name == queue).one()
        queue_enqueue(queue, filter_already_queued(queue, ctx.data))

    return ctx
示例#4
0
def discover_ipv6_dns(_, interval, netranges, queue):
    """enqueues all netranges into dns discovery queue"""

    if not should_run('discover_ipv6_dns', timeparse(interval)):
        return

    queue = Queue.query.filter(Queue.name == queue).one()
    count = 0
    for netrange in netranges:
        targets = filter_already_queued(queue, enumerate_network(netrange))
        count += len(targets)
        queue_enqueue(queue, targets)

    if count:
        current_app.logger.info(f'discover_ipv6_dns, queued {count} items')
    update_lastrun('discover_ipv6_dns')
示例#5
0
def rescan_hosts(_, interval, queue):
    """rescan hosts from storage; discovers new services on hosts"""

    qref = Queue.query.filter(Queue.name == queue).one()

    now = datetime.utcnow()
    rescan_horizont = now - timedelta(seconds=timeparse(interval))
    query = Host.query.filter(or_(Host.rescan_time < rescan_horizont, Host.rescan_time == None))  # noqa: E711  pylint: disable=singleton-comparison

    rescan, ids = [], []
    for host in windowed_query(query, Host.id):
        rescan.append(host.address)
        ids.append(host.id)
    # orm is bypassed for performance reasons in case of large rescans
    update_statement = Host.__table__.update().where(Host.id.in_(ids)).values(rescan_time=now)
    db.session.execute(update_statement)
    db.session.commit()
    db.session.expire_all()

    rescan = filter_already_queued(qref, rescan)
    queue_enqueue(qref, rescan)

    if rescan:
        current_app.logger.info(f'rescan_hosts, rescan {len(rescan)} items')