Ejemplo n.º 1
0
    def process_mail_creation(self, session, tid, data):
        user_id = data['user']['id']

        # Do not spool emails if the receiver has opted out of ntfns for this tip.
        if not data['tip']['enable_notifications']:
            log.debug("Discarding emails for %s due to receiver's preference.",
                      user_id)
            return

        # https://github.com/globaleaks/GlobaLeaks/issues/798
        # TODO: the current solution is global and configurable only by the admin
        sent_emails = self.state.get_mail_counter(user_id)
        if sent_emails >= self.state.tenant_cache[
                tid].notification.notification_threshold_per_hour:
            log.debug(
                "Discarding emails for receiver %s due to threshold already exceeded for the current hour",
                user_id)
            return

        self.state.increment_mail_counter(user_id)
        if sent_emails >= self.state.tenant_cache[
                tid].notification.notification_threshold_per_hour:
            log.info(
                "Reached threshold of %d emails with limit of %d for receiver %s",
                sent_emails,
                self.state.tenant_cache[tid].notification.
                notification_threshold_per_hour,
                user_id,
                tid=tid)

            # simply changing the type of the notification causes
            # to send the notification_limit_reached
            data['type'] = u'receiver_notification_limit_reached'

        data['notification'] = self.serialize_config(session, 'notification',
                                                     tid,
                                                     data['user']['language'])
        data['node'] = self.serialize_config(session, 'node', tid,
                                             data['user']['language'])

        if not data['node']['allow_unencrypted'] and len(
                data['user']['pgp_key_public']) == 0:
            return

        subject, body = Templating().get_mail_subject_and_body(data)

        # If the receiver has encryption enabled encrypt the mail body
        if data['user']['pgp_key_public']:
            pgpctx = PGPContext(self.state.settings.tmp_path)
            fingerprint = pgpctx.load_key(
                data['user']['pgp_key_public'])['fingerprint']
            body = pgpctx.encrypt_message(fingerprint, body)

        session.add(
            models.Mail({
                'address': data['user']['mail_address'],
                'subject': subject,
                'body': body,
                'tid': tid,
            }))
Ejemplo n.º 2
0
 def validate_admin_opt(self, pushed_af):
     fields = ['server', 'port', 'username', 'password']
     if all(field in pushed_af for field in fields):
         return True
     else:
         log.info('invalid mail settings for admin')
         return False
Ejemplo n.º 3
0
    def report_disk_usage(self, free_bytes):
        """
        Here in Alarm is written the threshold to say if we're in disk alarm
        or not. Therefore the function "report" the amount of free space and
        the evaluation + alarm shift is performed here.
        """

        # Mediam alarm threshold
        mat = Alarm._MEDIUM_DISK_ALARM * GLSetting.memory_copy.maximum_filesize
        hat = Alarm._HIGH_DISK_ALARM * GLSetting.memory_copy.maximum_filesize

        Alarm.latest_measured_freespace = free_bytes

        free_megabytes = free_bytes / (1000 * 1000)

        free_memory_str = bytes_to_pretty_str(free_bytes)

        if free_megabytes < hat:
            log.err("Warning: free space alarm (HIGH): only %s" %
                    free_memory_str)
            Alarm.stress_levels['disk_space'] = 2
        elif free_megabytes < mat:
            log.info("Warning: free space alarm (MEDIUM): %s" %
                     free_memory_str)
            Alarm.stress_levels['disk_space'] = 1
        else:
            Alarm.stress_levels['disk_space'] = 0
Ejemplo n.º 4
0
def update_receiver_settings(store, receiver_id, request, language=GLSetting.memory_copy.default_language):
    receiver = store.find(Receiver, Receiver.id == unicode(receiver_id)).one()
    receiver.description[language] = request['description']

    if not receiver:
        raise errors.ReceiverIdNotFound

    new_password = request['password']
    old_password = request['old_password']

    if len(new_password) and len(old_password):
        receiver.user.password = change_password(receiver.user.password,
                                                 old_password,
                                                 new_password,
                                                 receiver.user.salt)

    mail_address = request['mail_address']

    if mail_address != receiver.mail_address:
        log.info("Email change %s => %s" % (receiver.mail_address, mail_address))
        receiver.mail_address = mail_address

    receiver.tip_notification = acquire_bool(request['tip_notification'])
    receiver.message_notification = acquire_bool(request['message_notification'])
    receiver.comment_notification = acquire_bool(request['comment_notification'])
    receiver.file_notification = acquire_bool(request['file_notification'])

    gpg_options_parse(receiver, request)

    return receiver_serialize_receiver(receiver, language)
Ejemplo n.º 5
0
 def reactivate_receiver_mails(self):
     # Receiver return to be usable
     log.info("Expiring email suspension for %s" % self.receiver_id)
     if self.receiver_id not in LastHourMailQueue.receivers_in_threshold:
         log.err("Error while reactivating mails for a receiver")
     else:
         LastHourMailQueue.receivers_in_threshold.remove(self.receiver_id)
    def process_mail_creation(self, store, data):
        # https://github.com/globaleaks/GlobaLeaks/issues/798
        # TODO: the current solution is global and configurable only by the admin
        receiver_id = data['receiver']['id']
        sent_emails = GLSettings.get_mail_counter(receiver_id)
        if sent_emails >= GLSettings.memory_copy.notification_threshold_per_hour:
            log.debug(
                "Discarding emails for receiver %s due to threshold already exceeded for the current hour"
                % receiver_id)
            return

        GLSettings.increment_mail_counter(receiver_id)
        if sent_emails >= GLSettings.memory_copy.notification_threshold_per_hour:
            log.info(
                "Reached threshold of %d emails with limit of %d for receiver %s"
                % (sent_emails,
                   GLSettings.memory_copy.notification_threshold_per_hour,
                   receiver_id))

            # simply changing the type of the notification causes
            # to send the notification_limit_reached
            data['type'] = u'receiver_notification_limit_reached'

        data['notification'] = db_get_notification(
            store, data['receiver']['language'])
        data['node'] = db_admin_serialize_node(store,
                                               data['receiver']['language'])

        if not data['node']['allow_unencrypted'] and data['receiver'][
                'pgp_key_status'] != u'enabled':
            return

        subject, body = Templating().get_mail_subject_and_body(data)

        # If the receiver has encryption enabled encrypt the mail body
        if data['receiver']['pgp_key_status'] == u'enabled':
            gpob = GLBPGP()

            try:
                gpob.load_key(data['receiver']['pgp_key_public'])
                body = gpob.encrypt_message(
                    data['receiver']['pgp_key_fingerprint'], body)
            except Exception as excep:
                log.err(
                    "Error in PGP interface object (for %s: %s)! (notification+encryption)"
                    % (data['receiver']['username'], str(excep)))

                return
            finally:
                # the finally statement is always called also if
                # except contains a return or a raise
                gpob.destroy_environment()

        mail = models.Mail({
            'address': data['receiver']['mail_address'],
            'subject': subject,
            'body': body
        })

        store.add(mail)
Ejemplo n.º 7
0
 def reactivate_receiver_mails(self):
     # Receiver return to be usable
     log.info("Expiring email suspension for %s" % self.receiver_id)
     if self.receiver_id not in LastHourMailQueue.receivers_in_threshold:
         log.err("Error while reactivating mails for a receiver")
     else:
         LastHourMailQueue.receivers_in_threshold.remove(self.receiver_id)
Ejemplo n.º 8
0
def update_db():
    """
    This function handles update of an existing database
    """
    db_version, _ = get_db_file(Settings.db_path)
    if db_version == 0:
        return 0

    log.err('Found an already initialized database version: %d', db_version)
    if db_version == DATABASE_VERSION:
        return DATABASE_VERSION

    log.err('Performing schema migration from version %d to version %d', db_version, DATABASE_VERSION)

    try:
        from globaleaks.db import migration
        migration.perform_migration(db_version)
    except Exception as exception:
        log.err('Migration failure: %s', exception)
        log.err('Verbose exception traceback:')
        etype, value, tback = sys.exc_info()
        log.info('\n'.join(traceback.format_exception(etype, value, tback)))
        return -1

    log.err('Migration completed with success!')

    return DATABASE_VERSION
Ejemplo n.º 9
0
    def process_mail_creation(self, store, data):
        receiver_id = data['receiver']['id']

        # Do not spool emails if the receiver has opted out of ntfns for this tip.
        if not data['tip']['enable_notifications']:
          log.debug("Discarding emails for %s due to receiver's preference." % receiver_id)
          return

        # https://github.com/globaleaks/GlobaLeaks/issues/798
        # TODO: the current solution is global and configurable only by the admin
        sent_emails = GLSettings.get_mail_counter(receiver_id)
        if sent_emails >= GLSettings.memory_copy.notification_threshold_per_hour:
            log.debug("Discarding emails for receiver %s due to threshold already exceeded for the current hour" %
                      receiver_id)
            return

        GLSettings.increment_mail_counter(receiver_id)
        if sent_emails >= GLSettings.memory_copy.notification_threshold_per_hour:
            log.info("Reached threshold of %d emails with limit of %d for receiver %s" % (
                     sent_emails,
                     GLSettings.memory_copy.notification_threshold_per_hour,
                     receiver_id)
            )

            # simply changing the type of the notification causes
            # to send the notification_limit_reached
            data['type'] = u'receiver_notification_limit_reached'

        data['notification'] = db_get_notification(store, data['receiver']['language'])
        data['node'] = db_admin_serialize_node(store, data['receiver']['language'])

        if not data['node']['allow_unencrypted'] and data['receiver']['pgp_key_status'] != u'enabled':
            return

        subject, body = Templating().get_mail_subject_and_body(data)

        # If the receiver has encryption enabled encrypt the mail body
        if data['receiver']['pgp_key_status'] == u'enabled':
            gpob = GLBPGP()

            try:
                gpob.load_key(data['receiver']['pgp_key_public'])
                body = gpob.encrypt_message(data['receiver']['pgp_key_fingerprint'], body)
            except Exception as excep:
                log.err("Error in PGP interface object (for %s: %s)! (notification+encryption)" %
                        (data['receiver']['username'], str(excep)))

                return
            finally:
                # the finally statement is always called also if
                # except contains a return or a raise
                gpob.destroy_environment()

        mail = models.Mail({
            'address': data['receiver']['mail_address'],
            'subject': subject,
            'body': body
        })

        store.add(mail)
Ejemplo n.º 10
0
def update_receiver_settings(store,
                             receiver_id,
                             request,
                             language=GLSetting.memory_copy.default_language):
    receiver = store.find(Receiver, Receiver.id == unicode(receiver_id)).one()
    receiver.description[language] = request['description']

    if not receiver:
        raise errors.ReceiverIdNotFound

    new_password = request['password']
    old_password = request['old_password']

    if len(new_password) and len(old_password):
        receiver.user.password = change_password(receiver.user.password,
                                                 old_password, new_password,
                                                 receiver.user.salt)

    mail_address = request['mail_address']

    if mail_address != receiver.mail_address:
        log.info("Email change %s => %s" %
                 (receiver.mail_address, mail_address))
        receiver.mail_address = mail_address

    receiver.tip_notification = acquire_bool(request['tip_notification'])
    receiver.message_notification = acquire_bool(
        request['message_notification'])
    receiver.comment_notification = acquire_bool(
        request['comment_notification'])
    receiver.file_notification = acquire_bool(request['file_notification'])

    gpg_options_parse(receiver, request)

    return receiver_serialize_receiver(receiver, language)
Ejemplo n.º 11
0
    def perform_pgp_validation_checks(self, session):
        tenant_expiry_map = {1: []}

        for user in db_get_expired_or_expiring_pgp_users(
                session, self.state.tenant_cache.keys()):
            user_desc = user_serialize_user(
                session, user,
                self.state.tenant_cache[user.tid].default_language)
            tenant_expiry_map.setdefault(user.tid, []).append(user_desc)

            log.info('Removing expired PGP key of: %s',
                     user.username,
                     tid=user.tid)
            if user.pgp_key_expiration < datetime_now():
                user.pgp_key_public = ''
                user.pgp_key_fingerprint = ''
                user.pgp_key_expiration = datetime_null()

        for tid, expired_or_expiring in tenant_expiry_map.items():
            if self.state.tenant_cache[
                    tid].notification.disable_admin_notification_emails:
                continue

            if expired_or_expiring:
                self.prepare_admin_pgp_alerts(session, tid,
                                              expired_or_expiring)

            for user_desc in expired_or_expiring:
                self.prepare_user_pgp_alerts(session, tid, user_desc)
Ejemplo n.º 12
0
def update_db():
    """
    This function handles update of an existing database
    """
    db_version, db_file_path = get_db_file(Settings.working_path)
    if db_version == 0:
        return 0

    try:
        with warnings.catch_warnings():
            from globaleaks.db import migration
            warnings.simplefilter("ignore", category=sa_exc.SAWarning)

            log.err('Found an already initialized database version: %d',
                    db_version)
            if db_version == DATABASE_VERSION:
                migration.perform_data_update(db_file_path)
                return DATABASE_VERSION

            log.err(
                'Performing schema migration from version %d to version %d',
                db_version, DATABASE_VERSION)

            migration.perform_migration(db_version)

    except Exception as exception:
        log.err('Migration failure: %s', exception)
        log.err('Verbose exception traceback:')
        etype, value, tback = sys.exc_info()
        log.info('\n'.join(traceback.format_exception(etype, value, tback)))
        return -1

    log.err('Migration completed with success!')

    return DATABASE_VERSION
Ejemplo n.º 13
0
    def __init__(self, net_sockets, proxy_ip, proxy_port):
        log.info("Starting process monitor")

        self.shutting_down = False

        self.start_time = datetime_now()
        self.tls_process_pool = []
        self.tls_process_state = {
            'deaths': 0,
            'last_death': datetime_now(),
            'target_proc_num': multiprocessing.cpu_count(),
        }

        self.worker_path = os.path.join(
            os.path.dirname(os.path.realpath(__file__)), 'https_worker.py')

        self.tls_cfg = {
            'proxy_ip': proxy_ip,
            'proxy_port': proxy_port,
            'debug': log.loglevel <= logging.DEBUG,
        }

        if len(net_sockets) == 0:
            log.err("No ports to bind to! Spawning processes will not work!")

        self.tls_cfg['tls_socket_fds'] = [ns.fileno() for ns in net_sockets]
Ejemplo n.º 14
0
        def startup_callback(tor_conn):
            self.print_startup_error = True
            self.tor_conn = tor_conn
            self.tor_conn.protocol.on_disconnect = restart_deferred

            log.debug('Successfully connected to Tor control port')

            hs_loc = ('80 localhost:8083')
            if not hostname and not key:
                log.info('Creating new onion service')
                ephs = EphemeralHiddenService(hs_loc)
            else:
                log.info('Setting up existing onion service %s', hostname)
                ephs = EphemeralHiddenService(hs_loc, key)

            @inlineCallbacks
            def initialization_callback(ret):
                log.info('Initialization of hidden-service %s completed.',
                         ephs.hostname)
                if not hostname and not key:
                    yield set_onion_service_info(ephs.hostname,
                                                 ephs.private_key)
                    yield refresh_memory_variables()

            d = ephs.add_to_tor(self.tor_conn.protocol)
            d.addCallback(initialization_callback)  # pylint: disable=no-member
Ejemplo n.º 15
0
 def initialization_callback(ret):
     log.info('Initialization of hidden-service %s completed.',
              ephs.hostname)
     if not hostname and not key:
         yield set_onion_service_info(ephs.hostname,
                                      ephs.private_key)
         yield refresh_memory_variables()
Ejemplo n.º 16
0
    def operation():
        """
        Goal of this function is to check all the submission not
        finalized, and, if the expiration time sets in the context has
        been reached, then clean the submission_gus along with the fields,
        and, if present, the uploaded folder/files.

        Second goal of this function, is to check all the InternalTip(s)
        and their expiration date, if match, remove that, all the folder,
        comment and tip related.
        """
        try:
            submissions = yield get_tiptime_by_marker(InternalTip._marker[0])  # Submission
            log.debug("(Cleaning routines) %d unfinished Submission are check if expired" % len(submissions))
            for submission in submissions:
                if is_expired(iso2dateobj(submission["creation_date"]), seconds=submission["submission_life_seconds"]):
                    log.info(
                        "Deleting an unfinalized Submission (creation date: %s) files %d"
                        % (submission["creation_date"], submission["files"])
                    )
                    yield itip_cleaning(submission["id"])

            tips = yield get_tiptime_by_marker(InternalTip._marker[2])  # First
            log.debug("(Cleaning routines) %d Tips stored are check if expired" % len(tips))
            for tip in tips:
                if is_expired(iso2dateobj(tip["creation_date"]), seconds=tip["tip_life_seconds"]):
                    log.info(
                        "Deleting an expired Tip (creation date: %s) files %d comments %d"
                        % (tip["creation_date"], tip["files"], tip["comments"])
                    )
                    yield itip_cleaning(tip["id"])

        except Exception as excep:
            log.err("Exception failure in submission/tip cleaning routine (%s)" % excep.message)
            sys.excepthook(*sys.exc_info())
Ejemplo n.º 17
0
def update_db():
    """
    This function handles update of an existing database
    """
    try:
        db_version, db_file_path = get_db_file(GLSettings.db_path)
        if db_version == 0:
            return 0

        from globaleaks.db import migration

        log.err("Found an already initialized database version: %d" %
                db_version)

        if db_version >= FIRST_DATABASE_VERSION_SUPPORTED and db_version < DATABASE_VERSION:
            log.err(
                "Performing schema migration from version %d to version %d" %
                (db_version, DATABASE_VERSION))
            migration.perform_schema_migration(db_version)
            log.err("Migration completed with success!")

        else:
            log.err('Performing data update')
            # TODO on normal startup this line is run. We need better control flow here.
            migration.perform_data_update(
                os.path.abspath(
                    os.path.join(GLSettings.db_path,
                                 'glbackend-%d.db' % DATABASE_VERSION)))

    except Exception as exception:
        log.err("Migration failure: %s" % exception)
        log.err("Verbose exception traceback:")
        etype, value, tback = sys.exc_info()
        log.info('\n'.join(traceback.format_exception(etype, value, tback)))
        return -1
Ejemplo n.º 18
0
    def initialization_callback(ret):
        log.info('Initialization of hidden-service %s completed.' %
                 (ephs.hostname))
        if hasattr(ephs, 'private_key'):
            yield set_onion_service_info(ephs.hostname, ephs.private_key)

        reactor.addSystemEventTrigger('before', 'shutdown', shutdown_callback)
Ejemplo n.º 19
0
 def validate_admin_opt(self, pushed_af):
     fields = ['server', 'port', 'username', 'password']
     if all(field in pushed_af for field in fields):
         return True
     else:
         log.info('invalid mail settings for admin')
         return False
Ejemplo n.º 20
0
def del_cfg_not_in_groups(store):
    where = And(Not(Config.var_group == u'node'), Not(Config.var_group == u'notification'),
                Not(Config.var_group == u'private'))
    res = store.find(Config, where)
    for c in res:
        log.info("Removing extra Config <%s>" % c)
    store.find(Config, where).remove()
Ejemplo n.º 21
0
    def cert_expiration_checks(self, store):
        priv_fact = models.config.PrivateFactory(store)

        if not priv_fact.get_val(u'https_enabled'):
            return

        cert = load_certificate(FILETYPE_PEM, priv_fact.get_val(u'https_cert'))
        expiration_date = letsencrypt.convert_asn1_date(cert.get_notAfter())

        # Acme renewal checks
        if priv_fact.get_val(u'acme') and datetime.now(
        ) > expiration_date - timedelta(days=self.acme_try_renewal):
            try:
                db_acme_cert_issuance(store)
            except Exception as excep:
                self.acme_failures = +1
                log.err('ACME certificate renewal failed with: %s', excep)
                raise

            self.should_restart_https = True
            self.acme_failures = 0

        # Regular certificates expiration checks
        elif datetime.now() > expiration_date - timedelta(
                days=self.notify_expr_within):
            log.info('The HTTPS Certificate is expiring on %s',
                     expiration_date)
            if not State.tenant_cache[
                    1].notif.disable_admin_notification_emails:
                self.certificate_mail_creation(store, expiration_date)
Ejemplo n.º 22
0
    def operation(self):
        """
        This function, checks all the InternalTips and their expiration date.
        if expired InternalTips are found, it removes that along with
        all the related DB entries comment and tip related.
        """

        # Reset the exception tracking variable of GLSetting
        GLSetting.exceptions = {}

        tip_list = yield get_tip_timings()
        log.debug("Tip(s) subject to the timings check: %d" % len(tip_list))

        for tip in tip_list:

            if is_expired(ISO8601_to_datetime(tip['expiration_date'])):
                log.info("Deleting an expired Tip (creation date: %s, expiration %s) files %d comments %d" %
                         (tip['creation_date'], tip['expiration_date'], tip['files'], tip['comments']))

                yield itip_cleaning(tip['id'])
                continue

            # check if the tip is gonna to expire in 48 hours (hard-coded value above)
            if is_expired(ISO8601_to_datetime(tip['upcoming_expiration_date'])):
                log.debug("Spotted a Tip matching the upcoming expiration date and "
                          "triggering email notifications")

                expiring_tips_events = ExpiringTipEvent()
                yield expiring_tips_events.notify(tip['id'])
                yield save_events_on_db(expiring_tips_events.events)
Ejemplo n.º 23
0
def delete_anomaly_history(store):
    allanom = store.find(Anomalies)

    log.info("Deleting %d entries from Anomalies table" % allanom.count())

    allanom.remove()

    log.info("Anomalies collection removal completed.")
Ejemplo n.º 24
0
    def generate_dh_params_if_missing(cls):
        gen_dh = yield FileResource.should_gen_dh_params()
        if gen_dh:
            log.info("Generating the HTTPS DH params with %d bits" % GLSettings.key_bits)
            dh_params = yield deferToThread(tls.gen_dh_params, GLSettings.key_bits)

            log.info("Storing the HTTPS DH params")
            yield cls.save_dh_params(dh_params)
Ejemplo n.º 25
0
def delete_anomaly_history(store):
    allanom = store.find(Anomalies)

    log.info("Deleting %d entries from Anomalies table" % allanom.count())

    allanom.remove()

    log.info("Anomalies collection removal completed.")
Ejemplo n.º 26
0
def delete_weekstats_history(store):
    allws = store.find(Stats)

    log.info("Deleting %d entries from Stats table" % allws.count())

    allws.remove()

    log.info("Week statistics removal completed.")
Ejemplo n.º 27
0
def db_clean_expired_wbtips(store):
    threshold = datetime_now() - timedelta(days=GLSettings.memory_copy.wbtip_timetolive)

    wbtips = store.find(models.WhistleblowerTip, models.WhistleblowerTip.internaltip_id == models.InternalTip.id,
                                                 models.InternalTip.wb_last_access < threshold)
    for wbtip in wbtips:
        log.info("Disabling WB access to %s" % wbtip.internaltip_id)
        store.remove(wbtip)
Ejemplo n.º 28
0
def delete_weekstats_history(store):
    allws = store.find(Stats)

    log.info("Deleting %d entries from Stats table" % allws.count())

    allws.remove()

    log.info("Week statistics removal completed.")
Ejemplo n.º 29
0
def db_clean_expired_wbtips(store):
    threshold = datetime_now() - timedelta(days=GLSettings.memory_copy.wbtip_timetolive)

    itips = store.find(models.InternalTip, models.InternalTip.wb_last_access < threshold)
    for itip in itips:
        if itip.whistleblowertip is not None:
            log.info("Disabling WB access to %s" % itip.id)
            store.remove(itip.whistleblowertip)
Ejemplo n.º 30
0
    def check_disk_anomalies(self):
        """
        Here in Alarm is written the threshold to say if we're in disk alarm
        or not. Therefore the function "report" the amount of free space and
        the evaluation + alarm shift is performed here.

        workingdir: is performed a percentage check (at least 1% and an absolute comparison)

        "unusable node" threshold: happen when the space is really shitty.
        https://github.com/globaleaks/GlobaLeaks/issues/297
        https://github.com/globaleaks/GlobaLeaks/issues/872
        """
        self.measured_freespace, self.measured_totalspace = get_disk_space(
            self.state.settings.working_path)

        disk_space = 0
        disk_message = ""
        accept_submissions = True
        old_accept_submissions = State.accept_submissions

        for c in get_disk_anomaly_conditions(self.measured_freespace,
                                             self.measured_totalspace):
            if c['condition']:
                disk_space = c['alarm_level']

                info_msg = c['info_msg']()

                if disk_space == 2:
                    disk_message = "[FATAL] Disk anomaly, submissions disabled: %s" % info_msg
                else:  # == 1
                    disk_message = "[WARNING]: Disk anomaly: %s" % info_msg

                accept_submissions = c['accept_submissions']
                break

        # This check is temporarily, want to be verified that the switch can be
        # logged as part of the Anomalies via this function
        old_alarm_level = self.alarm_levels['disk_space']
        if old_alarm_level != disk_space:
            if disk_message:
                log.err(disk_message)
            else:
                log.err("Available disk space returned to normal levels")

        # the value is set here with a single assignment in order to
        # minimize possible race conditions resetting/settings the values
        self.alarm_levels['disk_space'] = disk_space
        self.alarm_levels['disk_message'] = disk_message

        # if not on testing change accept_submission to the new value
        State.accept_submissions = accept_submissions if not self.state.settings.testing else True

        if old_accept_submissions != State.accept_submissions:
            log.info("Switching disk space availability from: %s to %s",
                     old_accept_submissions, accept_submissions)

            # Must invalidate the cache here becuase accept_subs served in /public has changed
            ApiCache.invalidate()
Ejemplo n.º 31
0
    def delete(self, tenant_id):
        """
        Delete the specified tenant.
        """
        tenant_id = int(tenant_id)

        log.info('Removing tenant with id: %d', tenant_id, tid=self.request.tid)

        return delete(tenant_id)
Ejemplo n.º 32
0
    def get(self, token):
        tmp_chall_dict = State.tenant_state[
            self.request.tid].acme_tmp_chall_dict
        if token in tmp_chall_dict:
            log.info('Responding to valid .well-known request [%d]',
                     self.request.tid)
            return tmp_chall_dict[token].tok

        raise errors.ResourceNotFound
Ejemplo n.º 33
0
    def post(self):
        """
        Create a new tenant
        """
        request = self.validate_message(self.request.content.read(), requests.AdminTenantDesc)

        log.info('Creating new tenant', tid=self.request.tid)

        return create(request)
Ejemplo n.º 34
0
 def launch_worker(self):
     pp = HTTPSProcProtocol(self, self.tls_cfg)
     reactor.spawnProcess(pp,
                          executable, [executable, self.worker_path],
                          childFDs=pp.fd_map,
                          env=os.environ)
     self.tls_process_pool.append(pp)
     log.info('Launched: %s' % pp)
     return pp.startup_promise
Ejemplo n.º 35
0
 def every_notification_failed(self, store, failure, event_id):
     if event_id:
         log.err("Mail delivery failure for event %s (%s)" % (event_id, failure))
         evnt = store.find(EventLogs, EventLogs.id == event_id).one()
         if not evnt:
             log.info("Race condition spotted: Event has been deleted during the notification process")
         else:
             evnt.mail_sent = True
     else:
         log.err("Mail (Digest|Anomaly) error")
Ejemplo n.º 36
0
    def get_cleaning_map(self, store):
        subjects = store.find(InternalTip, InternalTip.expiration_date < datetime_now())

        itip_id_list = []
        for itip in subjects:
            itip_id_list.append(unicode(itip.id))

        if itip_id_list:
            log.info("Removal of %d InternalTips starts soon" % subjects.count())
        return itip_id_list
Ejemplo n.º 37
0
 def every_notification_failed(self, store, failure, event_id):
     if event_id:
         log.err("Mail delivery failure for event %s (%s)" % (event_id, failure))
         evnt = store.find(EventLogs, EventLogs.id == event_id).one()
         if not evnt:
             log.info("Race condition spotted: Event has been deleted during the notification process")
         else:
             evnt.mail_sent = True
     else:
         log.err("Mail (Digest|Anomaly) error")
Ejemplo n.º 38
0
    def add_hidden_service(self, tid, hostname, key):
        if self.tor_conn is None:
            return

        hs_loc = ('80 localhost:8083')
        if not hostname and not key:
            if tid in self.startup_semaphore:
                log.debug('Still waiting for hidden service to start', tid=tid)
                return self.startup_semaphore[tid]

            log.info('Creating new onion service', tid=tid)
            ephs = EphemeralHiddenService(hs_loc)
        else:
            log.info('Setting up existing onion service %s', hostname, tid=tid)
            ephs = EphemeralHiddenService(hs_loc, key)
            self.hs_map[hostname] = ephs

        @defer.inlineCallbacks
        def init_callback(ret):
            log.info('Initialization of hidden-service %s completed.',
                     ephs.hostname,
                     tid=tid)
            if not hostname and not key:
                if tid in State.tenant_cache:
                    self.hs_map[ephs.hostname] = ephs
                    yield set_onion_service_info(tid, ephs.hostname,
                                                 ephs.private_key)
                else:
                    yield ephs.remove_from_tor(self.tor_conn.protocol)

                tid_list = list(set([1, tid]))

                for x in tid_list:
                    ApiCache().invalidate(x)

                yield refresh_memory_variables(tid_list)

                del self.startup_semaphore[tid]

        def init_errback(failure):
            if tid in self.startup_semaphore:
                del self.startup_semaphore[tid]

            raise failure.value

        d = ephs.add_to_tor(self.tor_conn.protocol)

        # pylint: disable=no-member
        d.addCallbacks(init_callback, init_errback)
        # pylint: enable=no-member

        self.startup_semaphore[tid] = d

        return d
Ejemplo n.º 39
0
def update_defaults(store):
    if not is_cfg_valid(store):
        log.info("This update will change system configuration")

        for fact_model in factories:
            fact_model(store).clean_and_add()

        del_cfg_not_in_groups(store)

    # Set the system version to the current aligned cfg
    PrivateFactory(store).set_val(u'version', __version__)
Ejemplo n.º 40
0
def db_clean_expired_wbtips(store):
    threshold = datetime_now() - timedelta(
        days=GLSettings.memory_copy.wbtip_timetolive)

    wbtips = store.find(
        models.WhistleblowerTip,
        models.WhistleblowerTip.internaltip_id == models.InternalTip.id,
        models.InternalTip.wb_last_access < threshold)
    for wbtip in wbtips:
        log.info("Disabling WB access to %s" % wbtip.internaltip_id)
        store.remove(wbtip)
Ejemplo n.º 41
0
def acquire_email_templates(filename, fallback):

    templ_f = os.path.join(GLSetting.static_db_source, filename)

    if not os.path.isfile(templ_f):
        return fallback

    # else, load from the .txt files
    with open( templ_f) as templfd:
        template_text = templfd.read()
        log.info("Loading %d bytes from template: %s" % (len(template_text), filename))
        return template_text
Ejemplo n.º 42
0
def mark_event_as_sent(store, event_id):
    """
    Maybe for digest, maybe for filtering, this function mark an event as sent,
    but is not used in the "official notification success"
    """
    evnt = store.find(EventLogs, EventLogs.id == event_id).one()

    if not evnt:
        log.info("Race condition spotted: Event has been deleted during the notification process")
    else:
        evnt.mail_sent = True
        log.debug("Marked event [%s] as sent" % evnt.title)
Ejemplo n.º 43
0
    def get_cleaning_map(self, store):
        subjects = store.find(InternalTip,
                              InternalTip.expiration_date < datetime_now())

        itip_id_list = []
        for itip in subjects:
            itip_id_list.append(unicode(itip.id))

        if itip_id_list:
            log.info("Removal of %d InternalTips starts soon" %
                     subjects.count())
        return itip_id_list
Ejemplo n.º 44
0
def delete_anomaly_history(store):
    """
    Note: all the anomalies has to be in memory before being
        delete. In the long term this shall cause a memory exhaustion
    """
    allanom = store.find(Anomalies)
    log.info("Deleting %d entries from Anomalies History table"
             % allanom.count())

    allanom.remove()

    log.info("Anomalies collection removal completed.")
Ejemplo n.º 45
0
def update_defaults(store):
    if not is_cfg_valid(store):
        log.info("This update will change system configuration")

        for fact_model in factories:
            factory = fact_model(store, lazy=False)
            factory.clean_and_add()

        del_cfg_not_in_groups(store)

    # Set the system version to the current aligned cfg
    prv = PrivateFactory(store)
    prv.set_val('version', __version__)
Ejemplo n.º 46
0
def delete_weekstats_history(store):
    """
    Note: all the stats has to be in memory before being
        delete. In the long term this shall cause a memory exhaustion
    """

    allws = store.find(Stats)
    log.info("Deleting %d entries from hourly statistics table"
             % allws.count())

    allws.remove()

    # Now you're like a gringo without history, please invade Iraq
    log.info("Week statistics removal completed.")
Ejemplo n.º 47
0
    def process_events(self, store):
        """
        :return:
            0  = No event has been processed
           -1  = Threshold reach, emergency mode.
           >0  = Some elements to be notified has been processed
        """

        _elemscount = store.find(self.model, self.model.new == True).count()

        if _elemscount > (GLSettings.jobs_operation_limit * 10):
            # If this situation happen, we are facing a shitload of problem.
            # The reasonable option is that the entire Notification get skipped for this specific Trigger
            # all the events are marked as "new = False" and "chi si è visto si è visto"!
            # plus, the Admin get notified about it with an email.
            log.err("Waves of new %s received, notification suspended completely for all the %d %s (Threshold %d)" %
                     ( self.trigger, _elemscount,
                       self.trigger, (GLSettings.jobs_operation_limit * 10) ))
            store.find(self.model, self.model.new == True).set(new=False)
            return -1

        _elems = store.find(self.model, self.model.new == True)[:GLSettings.jobs_operation_limit]

        if _elemscount > GLSettings.jobs_operation_limit:
            log.info("Notification: Processing %d new event from a Queue of %d: %s(s) to be handled" %
                      (_elems.count(), _elemscount, self.trigger))
        elif _elemscount:
            log.debug("Notification: Processing %d new event: %s(s) to be handled" %
                      (_elems.count(), self.trigger))
        else:
            # No element to be processed
            return 0

        for e in _elems:
            # Mark event as handled as first step;
            # For resiliency reasons it's better to be sure that the
            # state machine move forward, than having starving events
            # due to possible exceptions in handling
            e.new = False
            self.process_event(store, e)

        db_save_events_on_db(store, self.events)
        log.debug("Notification: generated %d notification events of type %s" %
                  (len(self.events), self.trigger))

        return _elems.count()
Ejemplo n.º 48
0
def update_receiver_settings(store, receiver_id, request, language):
    """
    TODO: remind that 'description' is imported, but is not permitted
        by UI to be modified right now.
    """
    receiver = store.find(Receiver, Receiver.id == receiver_id).one()
    receiver.description[language] = request['description']

    if not receiver:
        raise errors.ReceiverIdNotFound

    receiver.user.language = request.get('language', GLSetting.memory_copy.language)
    receiver.user.timezone = request.get('timezone', GLSetting.memory_copy.default_timezone)

    new_password = request['password']
    old_password = request['old_password']

    if len(new_password) and len(old_password):
        receiver.user.password = change_password(receiver.user.password,
                                                 old_password,
                                                 new_password,
                                                 receiver.user.salt)

        if receiver.user.password_change_needed:
            receiver.user.password_change_needed = False

        receiver.user.password_change_date = datetime_now()

    mail_address = request['mail_address']
    ping_mail_address = request['ping_mail_address']

    if mail_address != receiver.mail_address:
        log.err("Email cannot be change by receiver, only by admin " \
                "%s rejected. Kept %s" % (receiver.mail_address, mail_address))

    if ping_mail_address != receiver.ping_mail_address:
        log.info("Ping email going to be update, %s => %s" % (
            receiver.ping_mail_address, ping_mail_address))
        receiver.ping_mail_address = ping_mail_address

    receiver.tip_notification = acquire_bool(request['tip_notification'])

    pgp_options_parse(receiver, request)

    return receiver_serialize_receiver(receiver, language)
Ejemplo n.º 49
0
    def __init__(self, receiver_id, debug=False):
        self.debug = debug
        self.creation_date = datetime_now()
        self.receiver_id = receiver_id

        if receiver_id in LastHourMailQueue.receivers_in_threshold:
            log.err("Implementation error ? Receiver %s already present" % receiver_id)

        TempObj.__init__(self,
                         LastHourMailQueue.blocked_in_queue,
                         random.randint(0, 0xffff),
                         # seconds of validity:
                         GLSettings.memory_copy.notification_suspension_time,
                         reactor_override)

        log.info("Temporary disable emails for receiver %s for four hours" % self.receiver_id)
        LastHourMailQueue.receivers_in_threshold.append(receiver_id)
        self.expireCallbacks.append(self.reactivate_receiver_mails)
Ejemplo n.º 50
0
def update_receiver_settings(store, receiver_id, request, language):
    user = db_user_update_user(store, receiver_id, request, language)
    if not user:
        raise errors.UserIdNotFound

    receiver = store.find(Receiver, Receiver.id == receiver_id).one()
    if not receiver:
        raise errors.ReceiverIdNotFound

    ping_mail_address = request['ping_mail_address']
    if ping_mail_address != receiver.ping_mail_address:
        log.info("Ping email going to be updated, %s => %s" % (
            receiver.ping_mail_address, ping_mail_address))
        receiver.ping_mail_address = ping_mail_address

    receiver.tip_notification = request['tip_notification']
    receiver.ping_notification = request['ping_notification']

    return receiver_serialize_receiver(receiver, language)
Ejemplo n.º 51
0
    def report_disk_usage(self, free_mega_bytes):
        """
        Here in Alarm is written the threshold to say if we're in disk alarm
        or not. Therefore the function "report" the amount of free space and
        the evaluation + alarm shift is performed here.
        """

        # Mediam alarm threshold
        mat = Alarm._MEDIUM_DISK_ALARM * GLSetting.memory_copy.maximum_filesize
        hat = Alarm._HIGH_DISK_ALARM * GLSetting.memory_copy.maximum_filesize

        Alarm.latest_measured_freespace = free_mega_bytes

        if free_mega_bytes < hat:
            log.err("Warning: free space HIGH ALARM: only %d Mb" % free_mega_bytes)
            Alarm.stress_levels['disk_space'] = 2
        elif free_mega_bytes < mat:
            log.info("Warning: free space medium alarm: %d Mb" % free_mega_bytes)
            Alarm.stress_levels['disk_space'] = 1
        else:
            Alarm.stress_levels['disk_space'] = 0
Ejemplo n.º 52
0
def update_receiver_settings(store, receiver_id, request, language):
    """
    TODO: remind that 'description' is imported, but is not permitted
        by UI to be modified right now.
    """
    receiver = store.find(Receiver, Receiver.id == receiver_id).one()
    receiver.description[language] = request["description"]

    if not receiver:
        raise errors.ReceiverIdNotFound

    receiver.user.language = request.get("language", GLSettings.memory_copy.default_language)
    receiver.user.timezone = request.get("timezone", GLSettings.memory_copy.default_timezone)

    new_password = request["password"]
    old_password = request["old_password"]

    if len(new_password) and len(old_password):
        receiver.user.password = change_password(receiver.user.password, old_password, new_password, receiver.user.salt)

        if receiver.user.password_change_needed:
            receiver.user.password_change_needed = False

        receiver.user.password_change_date = datetime_now()

    ping_mail_address = request["ping_mail_address"]

    if ping_mail_address != receiver.ping_mail_address:
        log.info("Ping email going to be updated, %s => %s" % (receiver.ping_mail_address, ping_mail_address))
        receiver.ping_mail_address = ping_mail_address

    receiver.tip_notification = request["tip_notification"]
    receiver.ping_notification = request["ping_notification"]

    pgp_options_parse(receiver, request)

    node = store.find(Node).one()

    return receiver_serialize_receiver(receiver, node, language)
Ejemplo n.º 53
0
    def operation(self):
        """
        Goal of this function is to check all the submission not
        finalized, and, if the expiration time sets in the context has
        been reached, then clean the submission_id along with the fields,
        and, if present, the uploaded folder/files.

        Second goal of this function, is to check all the InternalTip(s)
        and their expiration date, if match, remove that, all the folder,
        comment and tip related.

        Third goal of this function is to reset the exception counter that
        acts as limit for mail storm
        """
        try:
            # First Goal
            submissions = yield get_tiptime_by_marker(InternalTip._marker[0]) # Submission
            log.debug("(Cleaning routines) %d unfinished Submission are check if expired" % len(submissions))
            for submission in submissions:
                if is_expired(ISO8601_to_datetime(submission['creation_date']), GLSetting.defaults.submission_seconds_of_life):
                    log.info("Deleting an unfinalized Submission (creation %s expiration %s) files %d" %
                             (submission['creation_date'], submission['expiration_date'], submission['files']) )
                    yield itip_cleaning(submission['id'])

            # Second Goal
            tips = yield get_tiptime_by_marker(InternalTip._marker[2]) # First
            log.debug("(Cleaning routines) %d Tips stored are check if expired" % len(tips))
            for tip in tips:
                if is_expired(ISO8601_to_datetime(tip['expiration_date'])):
                    log.info("Deleting an expired Tip (creation date: %s, expiration %s) files %d comments %d" %
                             (tip['creation_date'], tip['expiration_date'], tip['files'], tip['comments']) )
                    yield itip_cleaning(tip['id'])

            # Third Goal: Reset of GLSetting.exceptions
            GLSetting.exceptions = {}

        except Exception as excep:
            log.err("Exception failure in submission/tip cleaning routine (%s)" % excep.message)
            sys.excepthook(*sys.exc_info())
Ejemplo n.º 54
0
    def operation(self):
        """
        This function, checks all the InternalTips and their expiration date.
        if expired InternalTips are found, it removes that along with
        all the related DB entries comment and tip related.
        """

        # Reset the exception trackiging variable of GLSetting
        GLSetting.exceptions = {}

        # Check1: check for expired InternalTips (new tips)
        new_tips = yield get_tip_timings(True)
        log.debug("[Tip timings routines / new / expiration ] #%d Tips" % len(new_tips))
        for tip in new_tips:
            if is_expired(ISO8601_to_datetime(tip['expiration_date'])):
                log.info("Deleting an expired Tip (creation date: %s, expiration %s) files %d comments %d" %
                         (tip['creation_date'], tip['expiration_date'], tip['files'], tip['comments']))

                yield itip_cleaning(tip['id'])

        # Check2: check for expired InternalTips (old tips)
        old_tips = yield get_tip_timings(False)
        log.debug("[Tip timings routines / old / expiration upcoming / expire ] #%d Tips" % len(old_tips))
        for tip in old_tips:
            # Check2.1: check if the tip is expired
            if is_expired(ISO8601_to_datetime(tip['expiration_date'])):
                log.info("Deleting an expired Tip (creation date: %s, expiration %s) files %d comments %d" %
                         (tip['creation_date'], tip['expiration_date'], tip['files'], tip['comments']))

                yield itip_cleaning(tip['id'])

            # Check2.2: check if the tip is expiring
            elif is_expired(ISO8601_to_datetime(tip['upcoming_expiration_date'])):
                log.debug("Spotted a Tip matching the upcoming expiration date and "
                          "triggering email notifications")

                expiring_tips_events = ExpiringTipEvent()
                yield expiring_tips_events.notify(tip['id'])
                yield save_events_on_db(expiring_tips_events.events)
Ejemplo n.º 55
0
    def clean_and_add(self):
        cur = self.store.find(Config, Config.var_group == self.group)
        res = {c.var_name : c for c in cur}

        actual = set(self.res.keys())
        allowed = set(self.group_desc)

        missing = allowed - actual

        for key in missing:
            desc = self.group_desc[key]
            c = Config(self.group, key, desc.default)
            log.info("Adding new config %s" % c)
            self.store.add(c)

        extra = actual - allowed

        for key in extra:
            c = res[key]
            log.info("Removing unused config: %s" % c)
            self.store.remove(c)

        return len(missing), len(extra)
Ejemplo n.º 56
0
    def check_disk_anomalies(self, free_workdir_bytes, total_workdir_bytes, free_ramdisk_bytes, total_ramdisk_bytes):
        """
        Here in Alarm is written the threshold to say if we're in disk alarm
        or not. Therefore the function "report" the amount of free space and
        the evaluation + alarm shift is performed here.

        workingdir: is performed a percentage check (at least 1% and an absolute comparison)
        ramdisk: a 2kbytes is expected at least to store temporary encryption keys

        "unusable node" threshold: happen when the space is really shitty.
        https://github.com/globaleaks/GlobaLeaks/issues/297
        https://github.com/globaleaks/GlobaLeaks/issues/872
        """

        Alarm.latest_measured_freespace = free_workdir_bytes

        disk_space = 0
        disk_message = ""
        accept_submissions = True
        old_accept_submissions = GLSetting.memory_copy.accept_submissions

        for c in get_disk_anomaly_conditions(free_workdir_bytes,
                                             total_workdir_bytes,
                                             free_ramdisk_bytes,
                                             total_ramdisk_bytes):
            if c['condition']:
                disk_space = c['stress_level']

                info_msg = c['info_msg'](free_workdir_bytes,
                                         total_workdir_bytes,
                                         free_ramdisk_bytes,
                                         total_ramdisk_bytes)

                if disk_space == 3:
                    disk_message = "Fatal (Submission disabled): %s" % info_msg
                elif disk_space == 2:
                    disk_message = "Critical (Submission near to be disabled): %s" % info_msg
                else:  # == 1
                    disk_message = "Warning: %s" % info_msg

                accept_submissions = c['accept_submissions']

                log.err(disk_message)

                break

        # the value is setted here with a single assignment in order to
        # minimize possible race conditions resetting/settings the values
        Alarm.stress_levels['disk_space'] = disk_space
        Alarm.stress_levels['disk_message'] = disk_message
        GLSetting.memory_copy.accept_submissions = accept_submissions

        if old_accept_submissions != GLSetting.memory_copy.accept_submissions:
            log.info("Switching disk space availability from: %s to %s" % (
                "True" if old_accept_submissions else "False",
                "False" if old_accept_submissions else "True"))

            # Invalidate the cache of node avoiding accesses to the db from here;
            # import GLApiCache in order to avoid circular import error
            from globaleaks.handlers.base import GLApiCache
            GLApiCache.invalidate('node')
Ejemplo n.º 57
0
    def compute_activity_level():
        """
        This function is called by the scheduled task, to update the
        Alarm level.

        At the end of the execution, reset to 0 the counters,
        this is why the content are copied for the statistic
        acquiring later.
        """
        # import here in order to avoid circular import error
        from globaleaks.handlers.admin.statistics import AnomaliesCollectionDesc

        Alarm.number_of_anomalies = 0

        current_event_matrix = {}

        requests_timing = []

        for _, event_obj in EventTrackQueue.queue.iteritems():
            current_event_matrix.setdefault(event_obj.event_type, 0)
            current_event_matrix[event_obj.event_type] += 1
            requests_timing.append(event_obj.request_time)

        if len(requests_timing) > 2:
            log.info("In latest %d seconds: worst RTT %f, best %f" %
                     ( GLSetting.anomaly_seconds_delta,
                       round(max(requests_timing), 2),
                       round(min(requests_timing), 2) )
                     )

        for event_name, threshold in Alarm.OUTCOMING_ANOMALY_MAP.iteritems():
            if event_name in current_event_matrix:
                if current_event_matrix[event_name] > threshold:
                    Alarm.number_of_anomalies += 1
                else:
                    log.debug("[compute_activity_level] %s %d < %d: it's OK (Anomalies recorded so far %d)" % (
                        event_name,
                        current_event_matrix[event_name],
                        threshold, Alarm.number_of_anomalies))

        previous_activity_sl = Alarm.stress_levels['activity']

        # Behavior: once the activity has reach a peek, the stress level
        # is raised at RED (two), and then is decremented at YELLOW (one) in the
        # next evaluation.

        if Alarm.number_of_anomalies >= 2:
            report_function = log.msg
            Alarm.stress_levels['activity'] = 2
        elif Alarm.number_of_anomalies == 1:
            report_function = log.info
            Alarm.stress_levels['activity'] = 1
        else:
            report_function = log.debug
            Alarm.stress_levels['activity'] = 0

        # slow downgrade, if something has triggered a two, next step to 1
        if previous_activity_sl == 2 and not Alarm.stress_levels['activity']:
            Alarm.stress_levels['activity'] = 1

        # if there are some anomaly or we're nearby, record it.
        if Alarm.number_of_anomalies >= 1 or Alarm.stress_levels['activity'] >= 1:
            AnomaliesCollectionDesc.update_AnomalyQ(current_event_matrix,
                                                Alarm.stress_levels['activity'])

        if previous_activity_sl or Alarm.stress_levels['activity']:
            report_function(
                "in Activity stress level switch from %d => %d" %
                (previous_activity_sl,
                 Alarm.stress_levels['activity']))

        # Alarm notification get the copy of the latest activities
        yield Alarm.admin_alarm_notification(current_event_matrix)

        defer.returnValue(Alarm.stress_levels['activity'] - previous_activity_sl)
Ejemplo n.º 58
0
    def check_disk_anomalies(self, free_workdir_bytes, total_workdir_bytes, free_ramdisk_bytes, total_ramdisk_bytes):
        """
        Here in Alarm is written the threshold to say if we're in disk alarm
        or not. Therefore the function "report" the amount of free space and
        the evaluation + alarm shift is performed here.

        workingdir: is performed a percentage check (at least 1% and an absolute comparison)
        ramdisk: a 2kbytes is expected at least to store temporary encryption keys

        "unusable node" threshold: happen when the space is really shitty.
        https://github.com/globaleaks/GlobaLeaks/issues/297
        https://github.com/globaleaks/GlobaLeaks/issues/872
        """

        self.latest_measured_freespace = free_workdir_bytes
        self.latest_measured_totalspace = total_workdir_bytes

        disk_space = 0
        disk_message = ""
        accept_submissions = True
        old_accept_submissions = GLSettings.accept_submissions

        for c in get_disk_anomaly_conditions(free_workdir_bytes,
                                             total_workdir_bytes,
                                             free_ramdisk_bytes,
                                             total_ramdisk_bytes):
            if c['condition']:
                disk_space = c['stress_level']

                info_msg = c['info_msg']()

                if disk_space <= GLSettings.disk_alarm_threshold:
                    # No alarm to be concerned, then
                    disk_space = 0
                else:
                    if disk_space == 3:
                        disk_message = "Fatal (Submission disabled): %s" % info_msg
                    elif disk_space == 2:
                        disk_message = "Critical (Submission near to be disabled): %s" % info_msg
                    else:  # == 1
                        disk_message = "Warning: %s" % info_msg

                    accept_submissions = c['accept_submissions']
                    log.err(disk_message)
                    break

        # This check is temporarily, want to be verified that the switch can be
        # logged as part of the Anomalies via this function
        old_stress_level = self.stress_levels['disk_space']
        if old_stress_level != disk_space:
            log.debug("Switch in Disk space available status, %d => %d" %
                      (old_stress_level, disk_space))

        # the value is set here with a single assignment in order to
        # minimize possible race conditions resetting/settings the values
        self.stress_levels['disk_space'] = disk_space
        self.stress_levels['disk_message'] = disk_message

        # if not on testing change accept_submission to the new value
        GLSettings.accept_submissions = accept_submissions if not GLSettings.testing else True

        if old_accept_submissions != GLSettings.accept_submissions:
            log.info("Switching disk space availability from: %s to %s" % (
                "True" if old_accept_submissions else "False",
                "False" if old_accept_submissions else "True"))

            # Invalidate the cache of node avoiding accesses to the db from here
            GLApiCache.invalidate('node')
Ejemplo n.º 59
0
    def compute_activity_level(self):
        """
        This function update the Alarm level.

        """
        self.number_of_anomalies = 0

        current_event_matrix = {}

        requests_timing = []

        for _, event_obj in event.EventTrackQueue.queue.iteritems():
            current_event_matrix.setdefault(event_obj.event_type, 0)
            current_event_matrix[event_obj.event_type] += 1
            requests_timing.append(event_obj.request_time)

        if len(requests_timing) > 2:
            log.info("In latest %d seconds: worst RTT %f, best %f" %
                     (10,
                      round(max(requests_timing), 2),
                      round(min(requests_timing), 2)))

        for event_name, threshold in self.OUTCOMING_ANOMALY_MAP.iteritems():
            if event_name in current_event_matrix:
                if current_event_matrix[event_name] > threshold:
                    self.number_of_anomalies += 1
                else:
                    log.debug("[compute_activity_level] %s %d < %d: it's OK (Anomalies recorded so far %d)" %
                              (event_name,
                               current_event_matrix[event_name],
                               threshold, self.number_of_anomalies))

        previous_activity_sl = self.stress_levels['activity']

        # Behavior: once the activity has reach a peek, the stress level
        # is raised at RED (two), and then is decremented at YELLOW (one) in the
        # next evaluation.

        if self.number_of_anomalies >= 2:
            report_function = log.msg
            self.stress_levels['activity'] = 2
        elif self.number_of_anomalies == 1:
            report_function = log.info
            self.stress_levels['activity'] = 1
        else:
            report_function = log.debug
            self.stress_levels['activity'] = 0

        # slow downgrade, if something has triggered a two, next step to 1
        if previous_activity_sl == 2 and not self.stress_levels['activity']:
            self.stress_levels['activity'] = 1

        # if there are some anomaly or we're nearby, record it.
        if self.number_of_anomalies >= 1 or self.stress_levels['activity'] >= 1:
            update_AnomalyQ(current_event_matrix, self.stress_levels['activity'])

        if previous_activity_sl or self.stress_levels['activity']:
            report_function("in Activity stress level switch from %d => %d" %
                            (previous_activity_sl,
                             self.stress_levels['activity']))


        yield self.generate_admin_alert_mail(current_event_matrix)

        defer.returnValue(self.stress_levels['activity'] - previous_activity_sl)