Esempio n. 1
0
    def dump_bucket(self, dry_run=False):
        """
        Create download workers
        Fetch and organize a list of Objects to be downloaded
        pass to workers
        """
        logger.info("Downloading bucket...")

        self._dry_run = dry_run
        _download_workers = [
            multiprocessing.Process(target=self._download_file)
            for _ in range(self._max_workers)
        ]
        [w.start() for w in _download_workers]

        _objects = self._s3_client.list_objects()
        for _obj in _objects:
            logger.debug(f"Checking if '{_obj.key}' is downloaded")
            if not self._is_file_exists(_obj.key):
                self._download_queue.put(_obj.key)
                self._files_to_be_downloaded.release()

        # Telling workers to stop
        for _ in range(0, self._max_workers):
            self._download_queue.put(None)
            self._files_to_be_downloaded.release()

        # Joining workers
        [w.join() for w in _download_workers]
        logger.info("Bucket downloaded")
 def reset_configs(self):
     logger.warning(f"Deleting config file {self._CONFIG_FILE}")
     if os.path.exists(self._CONFIG_FILE):
         os.remove(self._CONFIG_FILE)
         logger.info("Config file successfully removed")
     self._config = self._default_config
     return self._config
Esempio n. 3
0
def run_rules(conf):
    data = rds.get_scan_data()
    exclusions = rds.get_exclusions()

    if not data:
        return

    for ip, values in data.items():
        rules = rule_manager(role='attacker')
        if 'ports' in values and len(values['ports']) > 0:
            for port in values['ports']:
                logger.info('Attacking Asset: {} on port: {}'.format(ip, port))
                for rule in rules.values():
                    """
            Check if the target is in exclusions list, if it is, skip.
          """
                    if rule.rule in exclusions and ip in exclusions[rule.rule]:
                        logger.debug('Skipping rule {} for target {}'.format(
                            rule.rule, ip))
                        continue
                    """
            Only run rules that are in the allowed_aggressive config level.
          """
                    if conf['config']['allow_aggressive'] >= rule.intensity:
                        thread = threading.Thread(target=rule.check_rule,
                                                  args=(ip, port, values,
                                                        conf))
                        thread.start()
Esempio n. 4
0
 def validate_unique(self):
     """
     By default, the form will complain if a user already exists with that
     username. But that message is confusing for Site admins who don't have a
     User with that username on their particular site. This function changes
     the error message so it's non-confusng, but also vague enough not to
     suggest that other Sites exist.
     """
     # Ensure our username is not taken in LDAP; we have to un-namespace it before doing the LDAP lookup.
     if self.cleaned_data.get('username').startswith(self.request.site.hostname + "-"):
         username = self.cleaned_data.get('username')[len(self.request.site.hostname)+1:]
         try:
             ldap_user = search_ldap_for_user(username)
         except ldap.LDAPError:
             # Not taken in LDAP
             logger.exception('usr.local.ldap-check.exception', target_user=username)
         if ldap_user:
             logger.info('usr.local.ldap.taken', target_user=username)
             self.add_error('username', forms.ValidationError(self.error_messages['duplicate_username_in_ldap']))
     try:
         get_user_model().objects.get(username=self.cleaned_data.get('username'))
     except get_user_model().DoesNotExist:
         pass
     else:
         self.add_error('username', forms.ValidationError(self.error_messages['duplicate_username_local']))
Esempio n. 5
0
File: main.py Progetto: ninhhv/nerve
def quickstart():
  if request.method == 'POST':
    # In Quickstart, we only take the network provided by the user as input
    # The rest is as defined in config.py
    network = request.values.get('network')  
    
    if network:
      scan = copy.deepcopy(config.DEFAULT_SCAN)
      scan['targets']['networks'].append(network)
      schema = SchemaParser(scan, request)
      vfd, msg, scan = schema.verify()
      
      if vfd:
        res, code, msg = register.scan(scan)
        if res:
          logger.info('A scan was initiated')
          flash('Assessment started.', 'success')
          return redirect('/qs')
        else:
          flash(msg, 'error')
    
      else:
        flash(msg, 'error')
      
  return render_template('quickstart.html')
Esempio n. 6
0
def email(email, subject, body, html=None):
    """ Sends email """
    logger.info(f'Sending email to {email}... '
                f'Subject: {subject}  '
                f'Body: {body}')
    msg = Message(subject, to='%s <%s>' % (email, email), text=body, html=html)

    gmail.send(msg)
Esempio n. 7
0
 def get(self, request, format=None):
     data = {
         "status": "up",
         "build_version": settings.BUILD_VERSION,
         "build_date": settings.BUILD_DATE,
     }
     logger.info("Health check")
     return Response(data)
Esempio n. 8
0
 def get(self, request, format=None):
     data = {
         'status': 'up',
         'build_version': settings.BUILD_VERSION,
         'build_date': settings.BUILD_DATE
     }
     logger.info('Health check')
     return Response(data)
Esempio n. 9
0
    def store_vuln(self, value):
        logger.info('Vulnerability detected')
        key = '{}{}{}{}'.format(value['ip'], value['port'], value['rule_id'],
                                value['rule_details'])
        key_hash = 'vuln_' + self.utils.hash_sha1(key)
        if self.r.exists(key_hash):
            return False

        self.store_json(key_hash, value)
Esempio n. 10
0
def parse_args_f():
    logger.info(f"sys.argv: {sys.argv}")
    parser = argparse.ArgumentParser(description='description here')
    parser.add_argument('-v',
                        '--version',
                        action='version',
                        version='%(prog)s 0.1')
    parser.add_argument('-a', '--arg1', required=True, help='this is for arg1')
    parser.add_argument('-b', '--arg2', required=True, help='this is for arg2')
    return parser.parse_args()
Esempio n. 11
0
    def get_sync_status(self, kwargs):
        files = self.syncer.scan(recursive=True)
        synced = len(
            list(
                filter(lambda f: len(f) > 2 and f[2] in ("NOT_UPLOADED", "NOT_SYNCED"), files)
            )
        ) == 0
        logger.info(f"Target path scanned. Sync status: {'all synced' if synced else 'not synced'}")

        return {"synced": synced}
Esempio n. 12
0
    def _upload_file(self, rel_file_path, md5sum):
        if self._dry_run:
            return

        logger.info(f"Uploading file... ({rel_file_path})")
        self._client.put_object(
            object_key=rel_file_path,
            file_path=self._get_abs_file_path(rel_file_path),
            metadata={'md5sum': md5sum})
        self._set_last_synced(rel_file_path=rel_file_path)
        logger.info(f"File is uploaded! ({rel_file_path})")
Esempio n. 13
0
    def scan(self, kwargs):
        if not self.syncer.has_bucket_name():
            self.syncer.set_bucket_name(
                self.config_manager.get("DEFAULT_BUCKET_NAME")
            )

        path = self.config_manager.get("TARGET_PATH") \
            if kwargs.get("path") is None \
            else kwargs["path"]
        logger.info(f"Scanning directory: {path}")

        return self.syncer.scan(path, recursive=False)
Esempio n. 14
0
 def submit_webhook(self, webhook, cfg, data={}):
   logger.info('Sending the webhook...')
   try:
     data = {'status':'done', 'vulnerabilities':data, 'scan_config':cfg}
     requests.post(webhook, 
                   json=data, 
                   headers={'User-Agent':USER_AGENT, 
                           'Content-Type':'application/json'},
                   verify=False)
     return True
   except Exception as e:
     logger.error('Could not submit webhook: {}'.format(e))
   
   return
Esempio n. 15
0
 def redis_attack(self, ip, port, password):
     try:
         s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
         s.connect((ip, port))
         payload = 'AUTH {}\n'.format(password)
         s.sendall(payload.encode())
         data = s.recv(1024)
         logger.info(data)
         if 'OK' in data.decode('utf-8'):
             return True
     except Exception as e:
         logger.error(e)
         return False
     finally:
         s.close()
     return False
Esempio n. 16
0
    def save(self, commit=True):
        """
        If a Django User with this username already exists, pull it from the DB and add the specified Groups to it,
        instead of creating a new User object.
        """
        user = super(LDAPUserCreateForm, self).save(commit=False)
        # Users can access django-admin iff they are a superuser.
        user.is_staff = user.is_superuser

        username = self.cleaned_data['username']
        groups = Group.objects.filter(pk__in=self.cleaned_data['groups']).all()
        logger_extras = {
            'target_user': username,
            'target_user_superuser': user.is_superuser,
            'groups': ", ".join(str(g) for g in groups)
        }
        try:
            existing_user = get_user_model().objects.get(username=username)
        except get_user_model().DoesNotExist:
            existing_user = False
        else:
            user = existing_user
            for group in groups:
                user.groups.add(group)
        if existing_user:
            logger_extras['target_user_id'] = user.id

        populate_user_from_ldap(user)

        if commit:
            user.save()
            if not existing_user:
                # Set a random password for the user, otherwise they end up with
                # '' as their password.  If the LDAP user later gets removed
                # from LDAP (because they left, for instance), this
                # account reverts to being a local user and we don't want anyone
                # to be able to login as them.
                password = get_user_model().objects.make_random_password()
                user.set_password(password)

                # Only call save_m2m() if we're not updating an existing User. It'll try to overwrite the updated
                # user.groups list, AND it'll crash for some reason I haven't figured out.
                self.save_m2m()
                logger.info('user.ldap.create', **logger_extras)
            else:
                logger.info('user.ldap.update', **logger_extras)
        return user
Esempio n. 17
0
def login(request):
    # Since we're not logged in yet, get_logger()'s logger will not be bound
    # with the username, so we add that here.
    if request.user.is_authenticated and request.user.has_perm(
            'wagtailadmin.access_admin'):
        # User is already logged in. Just redirect them to wagtail home.
        return redirect('wagtailadmin_home')
    else:
        # This code is adapted from django.contrib.auth.login(), to allow us to log login successes and failures.
        username = request.POST.get(get_user_model().USERNAME_FIELD)
        redirect_to = request.POST.get(
            REDIRECT_FIELD_NAME, request.GET.get(REDIRECT_FIELD_NAME, ''))

        if request.method == 'POST':
            form = MultitenantLoginForm(request, data=request.POST)
            if form.is_valid():
                # Ensure the user-originating redirection url is safe.
                if not is_safe_url(url=redirect_to,
                                   allowed_hosts=[request.get_host()]):
                    redirect_to = resolve_url(settings.LOGIN_REDIRECT_URL)

                # Okay, security check complete. Log the user in.
                auth_login(request, form.get_user())
                # Normally we wouldn't need to override the username here, because request.user is now a real User, but
                # because a LocalUser's username is prefixed with their Site's hostname, we log the username string
                # they actually logged in with.
                logger.info('auth.login.success', username=username)
                return HttpResponseRedirect(redirect_to)
            else:
                logger.warning('auth.login.failed', username=username)
        else:
            form = MultitenantLoginForm(request)

        current_site = get_current_site(request)

        context = {
            'form': form,
            REDIRECT_FIELD_NAME: redirect_to,
            'site': current_site,
            'site_name': current_site.name,
            'show_password_reset': password_reset_enabled(),
            'username_field': get_user_model().USERNAME_FIELD,
        }

        return TemplateResponse(request, 'wagtailadmin/login.html', context)
Esempio n. 18
0
def remove_ldap_user(request, user_id):
    if hasattr(request, 'site'):
        if request.method == 'GET':
            user = get_object_or_404(get_user_model(), pk=user_id)
            site_hostname = request.site.hostname
            for group_name in ['Admins', 'Editors']:
                g = Group.objects.get(
                    name="{} {}".format(site_hostname, group_name))
                g.user_set.remove(user)
            msg = "{} {} ({}) no longer has admin rights on {}.".format(
                user.first_name, user.last_name, user.username, site_hostname)
            messages.success(request, mark_safe(msg))
            logger.info('user.ldap.disabled-for-site',
                        target_user=user.username)
    else:
        msg = "You must be on a particular site to remove an LDAP user."
        messages.error(request, mark_safe(msg))
    return redirect(reverse('wagtailusers_users:index'))
Esempio n. 19
0
    def save(self, commit=True):
        user = super(LocalUserCreateForm, self).save(commit=False)
        # Users can access django-admin iff they are a superuser.
        user.is_staff = user.is_superuser
        user.set_password(self.cleaned_data['password1'])

        if commit:
            user.save()

            # List the quoted group names for logging.
            group_names = [str(g) for g in Group.objects.filter(pk__in=self.cleaned_data['groups']).all()]
            if user.is_superuser:
                group_names.append('Superusers')
            self.save_m2m()
            logger.info(
                'user.local.create',
                target_user=user.username, target_user_superuser=user.is_superuser, groups=", ".join(group_names)
            )
        return user
Esempio n. 20
0
def run_rules(conf):
    data = rds.get_scan_data()

    if not data:
        return

    for ip, values in data.items():
        rules = rule_manager(role='attacker')
        if 'ports' in values and len(values['ports']) > 0:
            for port in values['ports']:
                logger.info('Attacking Asset: {} on port: {}'.format(ip, port))
                for rule in rules.values():
                    if conf['config']['allow_aggressive'] >= rule.intensity:
                        thread = threading.Thread(target=rule.check_rule,
                                                  args=(ip, port, values,
                                                        conf))
                        thread.name = 'rule_{}_{}_{}'.format(
                            rule.rule, ip, port)
                        thread.start()
Esempio n. 21
0
    def wrapper(*args, **kwargs):
        lock_name = kwargs.pop('lock_name',
                               os.environ.get('SERVER_DOMAIN',
                                              'oursites.com')) + f.__name__
        timeout = kwargs.pop('timeout', 60 * 5)
        have_lock = False

        client = redis.Redis(host=os.environ.get('CACHE'), port=6379, db=2)
        lock = client.lock(lock_name, timeout=timeout)

        try:
            have_lock = lock.acquire(blocking=False)
            if have_lock:
                f(*args, **kwargs)
            else:
                logger.info('celery.task.lock.already_locked', task=f.__name__)
        finally:
            if have_lock:
                lock.release()
Esempio n. 22
0
    def _download_file(self):
        while self._files_to_be_downloaded.acquire():
            object_key = self._download_queue.get()
            if object_key is None:
                break  # A sentinel value to quit the loop

            if self._dry_run:
                return

            logger.info(f"Downloading file... ({object_key})")

            _file_path = os.path.abspath(
                os.path.join(self._target_path, object_key))
            _dir_name = os.path.dirname(_file_path)
            os.makedirs(_dir_name, exist_ok=True)

            self._s3_client.download_object(object_key=object_key,
                                            file_path=_file_path)

            self._download_queue.task_done()
Esempio n. 23
0
 def save(self, commit=True):
     """
     In case the data in LDAP has changed, or it failed to populate on the previous create/edit, we override save()
     to re-populate this User's personal info from LDAP.
     """
     user = super(LDAPUserEditForm, self).save(commit=False)
     # Users can access django-admin iff they are a superuser.
     user.is_staff = user.is_superuser
     populate_user_from_ldap(user)
     if commit:
         user.save()
         self.save_m2m()
         if self.has_changed():
             logger_extras = {
                 'target_user': user.username,
                 'target_user_superuser': user.is_superuser,
             }
             for field_name in self.changed_data:
                 logger_extras[field_name] = self.cleaned_data[field_name]
             logger.info('user.ldap.update', **logger_extras)
     return user
Esempio n. 24
0
def populate_user_from_ldap(user):
    """
    Populate the given User object's personal info from LDAP.
    """
    try:
        _, ldap_attrs = search_ldap_for_user(user.username)
        user.first_name = ldap_attrs['givenName'][0]
        user.last_name = ldap_attrs['sn'][0]
        user.email = ldap_attrs['CAPPrimaryEmail'][0]
    except (IndexError, KeyError, TypeError, ldap.LDAPError) as err:
        # If any of these attrs are missing, or anything goes wrong, fail gracefully instead of crashing.
        # These attrs are not essential, and will be updated if possible the next time this User is saved.
        logger.error('user.update_from_ldap.failed',
                     target_user=user.username,
                     reason="{}: {}".format(err.__class__.__name__, err))
    else:
        logger.info('user.update_from_ldap.success',
                    target_user=user.username,
                    first_name=user.first_name,
                    last_name=user.last_name,
                    email=user.email)
Esempio n. 25
0
    def submit_slack(self, hook, data={}):
        try:
            if not data:
                logger.info(
                    'Did not send slack notification, scan did not yield any result.'
                )
                return

            fields = []

            for _, value in data.items():
                if value['rule_sev'] == 0:
                    continue

                for k, v in value.items():

                    if not v:
                        v = 'N/A'

                    fields.append({'title': k, 'value': v, 'short': False})

            slack_data = {
                "color": '#000000',
                "pretext": "<!channel> NERVE Notification",
                "author_name": ':warning: Notification',
                "title": 'NERVE Report',
                "fields": fields,
            }
            response = requests.post(hook, data=json.dumps(slack_data))

            if response.status_code != 200:
                logger.error('Could not submit slack hook: {}'.format(
                    response.text))
            else:
                logger.info('Submitted slack hook')
        except Exception as e:
            logger.error('Could not submit slack hook: {}'.format(e))

        return
Esempio n. 26
0
def logout(request):
    """
    This code is identical to wagtail.wagtailadmin.views.account.logout, except for the post-logout redirect.
    """
    # The next_page argument is here just to make the function return faster. We don't be using this response.
    response = auth_views.LogoutView.as_view()(request, next_page='/')
    logger.info('auth.logout')
    messages.success(request, 'You have logged out.')
    # By default, logging out will generate a fresh sessionid cookie. We want to use the
    # absence of sessionid as an indication that front-end pages are being viewed by a
    # non-logged-in user and are therefore cacheable, so we forcibly delete the cookie here.
    response.delete_cookie(settings.SESSION_COOKIE_NAME,
                           settings.SESSION_COOKIE_PATH,
                           settings.SESSION_COOKIE_DOMAIN)

    # HACK: pretend that the session hasn't been modified, so that SessionMiddleware
    # won't override the above and write a new cookie.
    request.session.modified = False

    # Just in case something is messed up with the Site's settings, avoid a crash.
    try:
        alias_count = request.site.settings.aliases.count()
    except:
        alias_count = 0

    # If we can get the referer, we use it as long as it's not an admin page (which would redirect back to the
    # login prompt). The user's already there, so we know it won't give a cert error.
    referer = request.META.get('HTTP_REFERER')
    if referer and '/admin/' not in referer:
        redirect_url = referer
    elif alias_count == 1:
        # If there is exactly one alias, redirect to the homepage of that domain.
        alias = request.site.settings.aliases.first()
        redirect_url = 'http://{}'.format(alias.domain)
    else:
        # We can't know which to pick among multiple aliases, so we fall back on the homepage of the canonical hostname.
        redirect_url = 'http://{}'.format(request.site.hostname)

    return HttpResponseRedirect(redirect_url)
Esempio n. 27
0
def attacker():
    count = 0
    logger.info('Attacker process started')

    while True:
        conf = rds.get_scan_config()

        if not conf:
            time.sleep(10)
            continue

        run_rules(conf)
        count += 1

        if count == conf['config']['scan_opts']['parallel_attack']:
            time.sleep(30)
            count = 0

            if threading.active_count() > 50:
                logger.debug(
                    'Sleeping for 30 seconds to control threads (Threads: {})'.
                    format(threading.active_count()))
                time.sleep(30)
Esempio n. 28
0
 def save(self, request):
     user = get_object_or_404(get_user_model(), username=self.cleaned_data['username'])
     kwargs = {
         'uidb64': urlsafe_base64_encode(force_bytes(user.id)),
         'token': default_token_generator.make_token(user)
     }
     reset_password_path = reverse('wagtailadmin_password_reset_confirm', kwargs=kwargs)
     domain = request.site.hostname
     context = {
         'user': user,
         'reset_password_url': 'https://{}{}'.format(domain, reset_password_path),
         'domain': domain,
     }
     self.send_mail(
         'wagtail_patches/users/reset_password_email_subject.txt',
         'wagtail_patches/users/reset_password_email.txt',
         context,
         from_email=settings.SERVER_EMAIL,
         to_email=user.email
     )
     logger.info(
         'user.local.password_reset.admin',
         target_user=user.username, target_user_superuser=user.is_superuser, target_user_email=user.email
     )
Esempio n. 29
0
def scanner():
    utils = Utils()
    scanner = Scanner()

    logger.info('Scanner process started')

    while True:
        if not rds.is_session_active():
            time.sleep(10)
            continue

        conf = rds.get_scan_config()

        if not conf:
            time.sleep(10)
            continue

        hosts = rds.get_ips_to_scan(
            limit=conf['config']['scan_opts']['parallel_scan'])

        if hosts:
            conf = rds.get_scan_config()
            scan_data = scanner.scan(
                hosts,
                max_ports=conf['config']['scan_opts']['max_ports'],
                custom_ports=conf['config']['scan_opts']['custom_ports'],
                interface=conf['config']['scan_opts']['interface'])

            if scan_data:
                for host, values in scan_data.items():
                    if 'ports' in values and values['ports']:
                        logger.info('Discovered Asset: {}'.format(host))
                        logger.debug('Host: {}, Open Ports: {}'.format(
                            host, values['ports']))
                        rds.store_topology(host)
                        rds.store_sca(host, values)
                        rds.store_inv(host, values)
                    else:
                        if values['status_reason'] == 'echo-reply':
                            logger.info('Discovered Asset: {}'.format(host))
                            rds.store_topology(host)
Esempio n. 30
0
def scanner():
    scanner = Scanner()

    logger.info('Scanner process started')

    while True:
        if not rds.is_session_active():
            time.sleep(10)
            continue

        conf = rds.get_scan_config()

        if not conf:
            time.sleep(10)
            continue

        c = ConfParser(conf)

        hosts = rds.get_ips_to_scan(limit=c.get_cfg_scan_threads())

        if hosts:
            conf = rds.get_scan_config()
            scan_data = scanner.scan(hosts,
                                     max_ports=c.get_cfg_max_ports(),
                                     custom_ports=c.get_cfg_custom_ports(),
                                     interface=c.get_cfg_netinterface())

            if scan_data:
                for host, values in scan_data.items():
                    if 'ports' in values and values['ports']:
                        logger.info('Discovered Asset: {}'.format(host))
                        logger.debug('Host: {}, Open Ports: {}'.format(
                            host, values['ports']))
                        rds.store_topology(host)
                        rds.store_sca(host, values)
                        rds.store_inv(host, values)
                    else:
                        if values['status_reason'] == 'echo-reply':
                            logger.info('Discovered Asset: {}'.format(host))
                            rds.store_topology(host)