Пример #1
0
def request_new_certificate(hostname, accnt_key, priv_key, tmp_chall_dict,
                            directory_url):
    """Runs the entire process of ACME registration and certificate request"""

    client = create_v2_client(directory_url, accnt_key)

    try:
        client.net.account = client.new_account(
            messages.NewRegistration.from_data(terms_of_service_agreed=True))

    except errors.ConflictError as error:
        existing_reg = messages.RegistrationResource(uri=error.location)
        existing_reg = client.query_registration(existing_reg)
        client.update_registration(existing_reg)

    csr = crypto_util.make_csr(priv_key, [hostname], False)
    order = client.new_order(csr)

    log.info('Created a new order for the issuance of a certificate for %s',
             hostname)

    challb = select_http01_chall(order)

    _, chall_tok = challb.response_and_validation(client.net.key)
    v = challb.chall.encode("token")
    log.info('Exposing challenge on %s', v)
    tmp_chall_dict.set(v, ChallTok(chall_tok))

    cr = client.answer_challenge(challb, challb.response(client.net.key))
    log.debug('Acme CA responded to challenge request with: %s', cr)

    order = client.poll_and_finalize(order)

    return split_certificate_chain(order.fullchain_pem)
Пример #2
0
    def perform_pgp_validation_checks(self, session):
        tenant_expiry_map = {1: []}

        for user in db_get_expired_or_expiring_pgp_users(
                session, self.state.tenant_cache.keys()):
            user_desc = user_serialize_user(session, user, user.language)
            tenant_expiry_map.setdefault(user.tid, []).append(user_desc)

            log.info('Removing expired PGP key of: %s',
                     user.username,
                     tid=user.tid)
            if user.pgp_key_expiration < datetime_now():
                user.pgp_key_public = ''
                user.pgp_key_fingerprint = ''
                user.pgp_key_expiration = datetime_null()

        for tid, expired_or_expiring in tenant_expiry_map.items():
            for user_desc in expired_or_expiring:
                self.prepare_user_pgp_alerts(session, tid, user_desc)

            if self.state.tenant_cache[
                    tid].notification.disable_admin_notification_emails:
                continue

            if expired_or_expiring:
                self.prepare_admin_pgp_alerts(session, tid,
                                              expired_or_expiring)
Пример #3
0
    def add_onion_service(self, tid, hostname, key):
        if self.tor_conn is None:
            return

        hs_loc = ('80 localhost:8083')
        if not hostname and not key:
            log.err('Creating new onion service', tid=tid)

            if self.onion_service_version == 3:
                ephs = EphemeralHiddenService(hs_loc, 'NEW:ED25519-V3')
            else:
                ephs = EphemeralHiddenService(hs_loc, 'NEW:RSA1024')
        else:
            log.info('Setting up existing onion service %s', hostname, tid=tid)
            ephs = EphemeralHiddenService(hs_loc, key)
            self.hs_map[hostname] = ephs

        @inlineCallbacks
        def init_callback(ret):
            log.err('Initialization of onion-service %s completed.', ephs.hostname, tid=tid)
            if not hostname and not key:
                if tid in State.tenant_cache:
                    self.hs_map[ephs.hostname] = ephs
                    yield set_onion_service_info(tid, ephs.hostname, ephs.private_key)
                else:
                    yield ephs.remove_from_tor(self.tor_conn.protocol)

                tid_list = list(set([1, tid]))

                for x in tid_list:
                    Cache().invalidate(x)

                yield refresh_memory_variables(tid_list)

        return ephs.add_to_tor(self.tor_conn.protocol).addCallbacks(init_callback)  # pylint: disable=no-member
Пример #4
0
    def cert_expiration_checks(self, session, tid):
        priv_fact = models.config.ConfigFactory(session, tid, 'node')

        if not priv_fact.get_val(u'https_enabled'):
            return

        cert = load_certificate(FILETYPE_PEM, priv_fact.get_val(u'https_cert'))
        expiration_date = letsencrypt.convert_asn1_date(cert.get_notAfter())
        expiration_date_iso = datetime_to_ISO8601(expiration_date)

        # Acme renewal checks
        if priv_fact.get_val(u'acme') and datetime.now() > expiration_date - timedelta(days=self.acme_try_renewal):
            try:
                db_acme_cert_issuance(session, tid)
            except Exception as exc:
                log.err('Automatic HTTPS renewal failed: %s', exc, tid=tid)

                # Send an email to the admin cause this requires user intervention
                if not self.state.tenant_cache[tid].notification.disable_admin_notification_emails:
                    self.certificate_mail_creation(session, 'https_certificate_renewal_failure', tid, expiration_date_iso)
            else:
                self.should_restart_https = True

        # Regular certificates expiration checks
        elif datetime.now() > expiration_date - timedelta(days=self.notify_expr_within):
            log.info('The HTTPS Certificate is expiring on %s', expiration_date, tid=tid)
            if not self.state.tenant_cache[tid].notification.disable_admin_notification_emails:
                self.certificate_mail_creation(session, 'https_certificate_expiration', tid, expiration_date_iso)
Пример #5
0
    def process_mail_creation(self, session, tid, data):
        user_id = data['user']['id']

        # Do not spool emails if the receiver has opted out of ntfns for this tip.
        if not data['tip']['enable_notifications']:
            log.debug("Discarding emails for %s due to receiver's preference.",
                      user_id)
            return

        # https://github.com/globaleaks/GlobaLeaks/issues/798
        # TODO: the current solution is global and configurable only by the admin
        sent_emails = self.state.get_mail_counter(user_id)
        if sent_emails >= self.state.tenant_cache[
                tid].notification.notification_threshold_per_hour:
            log.debug(
                "Discarding emails for receiver %s due to threshold already exceeded for the current hour",
                user_id)
            return

        self.state.increment_mail_counter(user_id)
        if sent_emails >= self.state.tenant_cache[
                tid].notification.notification_threshold_per_hour:
            log.info(
                "Reached threshold of %d emails with limit of %d for receiver %s",
                sent_emails,
                self.state.tenant_cache[tid].notification.
                notification_threshold_per_hour,
                user_id,
                tid=tid)

            # simply changing the type of the notification causes
            # to send the notification_limit_reached
            data['type'] = u'receiver_notification_limit_reached'

        data['node'] = self.serialize_config(session, 'node', tid,
                                             data['user']['language'])

        if data['node']['mode'] != u'whistleblowing.it':
            data['notification'] = self.serialize_config(
                session, 'notification', tid, data['user']['language'])
        else:
            data['notification'] = self.serialize_config(
                session, 'notification', 1, data['user']['language'])

        subject, body = Templating().get_mail_subject_and_body(data)

        # If the receiver has encryption enabled encrypt the mail body
        if data['user']['pgp_key_public']:
            pgpctx = PGPContext(self.state.settings.tmp_path)
            fingerprint = pgpctx.load_key(
                data['user']['pgp_key_public'])['fingerprint']
            body = pgpctx.encrypt_message(fingerprint, body)

        session.add(
            models.Mail({
                'address': data['user']['mail_address'],
                'subject': subject,
                'body': body,
                'tid': tid,
            }))
Пример #6
0
    def check_disk_anomalies(self):
        """
        Here in Alarm is written the threshold to say if we're in disk alarm
        or not. Therefore the function "report" the amount of free space and
        the evaluation + alarm shift is performed here.

        workingdir: is performed a percentage check (at least 1% and an absolute comparison)

        "unusable node" threshold: happen when the space is really shitty.
        https://github.com/globaleaks/GlobaLeaks/issues/297
        https://github.com/globaleaks/GlobaLeaks/issues/872
        """
        self.measured_freespace, self.measured_totalspace = get_disk_space(
            self.state.settings.working_path)

        disk_space = 0
        disk_message = ""
        accept_submissions = True
        old_accept_submissions = State.accept_submissions

        for c in get_disk_anomaly_conditions(self.measured_freespace,
                                             self.measured_totalspace):
            if not c['condition']:
                continue

            disk_space = c['alarm_level']

            info_msg = c['info_msg']()

            if disk_space == 2:
                disk_message = "[FATAL] Disk anomaly, submissions disabled: %s" % info_msg
            else:  # == 1
                disk_message = "[WARNING]: Disk anomaly: %s" % info_msg

            accept_submissions = c['accept_submissions']
            break

        # This check is temporarily, want to be verified that the switch can be
        # logged as part of the Anomalies via this function
        old_alarm_level = self.alarm_levels['disk_space']
        if old_alarm_level != disk_space:
            if disk_message:
                log.err(disk_message)
            else:
                log.err("Available disk space returned to normal levels")

        # the value is set here with a single assignment in order to
        # minimize possible race conditions resetting/settings the values
        self.alarm_levels['disk_space'] = disk_space
        self.alarm_levels['disk_message'] = disk_message

        # if not on testing change accept_submission to the new value
        State.accept_submissions = accept_submissions if not self.state.settings.testing else True

        if old_accept_submissions != State.accept_submissions:
            log.info("Switching disk space availability from: %s to %s",
                     old_accept_submissions, accept_submissions)

            # Must invalidate the cache here becuase accept_subs served in /public has changed
            Cache.invalidate()
Пример #7
0
    def get(self, token):
        tmp_chall_dict = State.tenant_state[self.request.tid].acme_tmp_chall_dict
        if token in tmp_chall_dict:
            log.info('Responding to valid .well-known request [%d]', self.request.tid)
            return tmp_chall_dict[token].tok

        raise errors.ResourceNotFound
Пример #8
0
def update_db():
    """
    This function handles update of an existing database
    """
    db_version, db_file_path = get_db_file(Settings.working_path)
    if db_version == 0:
        return 0

    try:
        with warnings.catch_warnings():
            from globaleaks.db import migration
            warnings.simplefilter("ignore", category=sa_exc.SAWarning)

            log.err('Found an already initialized database version: %d', db_version)
            if db_version == DATABASE_VERSION:
                migration.perform_data_update(db_file_path)
                return DATABASE_VERSION

            log.err('Performing schema migration from version %d to version %d', db_version, DATABASE_VERSION)

            migration.perform_migration(db_version)

    except Exception as exception:
        log.err('Migration failure: %s', exception)
        log.err('Verbose exception traceback:')
        etype, value, tback = sys.exc_info()
        log.info('\n'.join(traceback.format_exception(etype, value, tback)))
        return -1

    log.err('Migration completed with success!')

    return DATABASE_VERSION
Пример #9
0
def update_db():
    """
    This function handles update of an existing database
    """
    db_version, db_file_path = get_db_file(Settings.working_path)
    if db_version == 0:
        return 0

    try:
        with warnings.catch_warnings():
            from globaleaks.db import migration
            warnings.simplefilter("ignore", category=sa_exc.SAWarning)

            log.err('Found an already initialized database version: %d',
                    db_version)
            if db_version == DATABASE_VERSION:
                migration.perform_data_update(db_file_path)
                return DATABASE_VERSION

            log.err(
                'Performing schema migration from version %d to version %d',
                db_version, DATABASE_VERSION)

            migration.perform_migration(db_version)

    except Exception as exception:
        log.err('Migration failure: %s', exception)
        log.err('Verbose exception traceback:')
        etype, value, tback = sys.exc_info()
        log.info('\n'.join(traceback.format_exception(etype, value, tback)))
        return -1

    log.err('Migration completed with success!')

    return DATABASE_VERSION
Пример #10
0
    def get(self, token):
        tmp_chall_dict = State.tenant_state[self.request.tid].acme_tmp_chall_dict
        if token in tmp_chall_dict:
            log.info('Responding to valid .well-known request [%d]', self.request.tid)
            return tmp_chall_dict[token].tok

        raise errors.ResourceNotFound
Пример #11
0
    def generate_dh_params_if_missing(cls, tid):
        gen_dh = yield FileResource.should_gen_dh_params(tid)
        if gen_dh:
            log.info("Generating the HTTPS DH params with %d bits" % Settings.key_bits)
            dh_params = yield deferToThread(tls.gen_dh_params, Settings.key_bits)

            log.info("Storing the HTTPS DH params")
            yield cls.save_dh_params(tid, dh_params)
Пример #12
0
    def generate_dh_params_if_missing(cls, tid):
        gen_dh = yield FileResource.should_gen_dh_params(tid)
        if gen_dh:
            log.info("Generating the HTTPS DH params with %d bits" % Settings.key_bits)
            dh_params = yield deferToThread(tls.gen_dh_params, Settings.key_bits)

            log.info("Storing the HTTPS DH params")
            yield cls.save_dh_params(tid, dh_params)
Пример #13
0
    def launch_worker(self):
        pp = HTTPSProcProtocol(self, self.tls_cfg)
        reactor.spawnProcess(pp, executable, [executable, self.worker_path], childFDs=pp.fd_map, env=os.environ)
        self.tls_process_pool.append(pp)

        log.info('Launched: %s', pp)

        return pp.startup_promise
Пример #14
0
    def delete(self, tenant_id):
        """
        Delete the specified tenant.
        """
        tenant_id = int(tenant_id)

        log.info('Removing tenant with id: %d', tenant_id, tid=self.request.tid)

        return delete(tenant_id)
Пример #15
0
    def post(self):
        """
        Create a new tenant
        """
        request = self.validate_message(self.request.content.read(), requests.AdminTenantDesc)

        log.info('Creating new tenant', tid=self.request.tid)

        return create(request)
Пример #16
0
    def check_disk_anomalies(self):
        """
        Here in Alarm is written the threshold to say if we're in disk alarm
        or not. Therefore the function "report" the amount of free space and
        the evaluation + alarm shift is performed here.

        workingdir: is performed a percentage check (at least 1% and an absolute comparison)

        "unusable node" threshold: happen when the space is really shitty.
        https://github.com/globaleaks/GlobaLeaks/issues/297
        https://github.com/globaleaks/GlobaLeaks/issues/872
        """
        self.measured_freespace, self.measured_totalspace = get_disk_space(self.state.settings.working_path)

        disk_space = 0
        disk_message = ""
        accept_submissions = True
        old_accept_submissions = State.accept_submissions

        for c in get_disk_anomaly_conditions(self.measured_freespace, self.measured_totalspace):
            if c['condition']:
                disk_space = c['alarm_level']

                info_msg = c['info_msg']()

                if disk_space == 2:
                    disk_message = "[FATAL] Disk anomaly, submissions disabled: %s" % info_msg
                else:  # == 1
                    disk_message = "[WARNING]: Disk anomaly: %s" % info_msg

                accept_submissions = c['accept_submissions']
                break

        # This check is temporarily, want to be verified that the switch can be
        # logged as part of the Anomalies via this function
        old_alarm_level = self.alarm_levels['disk_space']
        if old_alarm_level != disk_space:
            if disk_message:
                log.err(disk_message)
            else:
                log.err("Available disk space returned to normal levels")

        # the value is set here with a single assignment in order to
        # minimize possible race conditions resetting/settings the values
        self.alarm_levels['disk_space'] = disk_space
        self.alarm_levels['disk_message'] = disk_message

        # if not on testing change accept_submission to the new value
        State.accept_submissions = accept_submissions if not self.state.settings.testing else True

        if old_accept_submissions != State.accept_submissions:
            log.info("Switching disk space availability from: %s to %s",
                     old_accept_submissions, accept_submissions)

            # Must invalidate the cache here becuase accept_subs served in /public has changed
            Cache.invalidate()
Пример #17
0
    def migrate_model(self, model_name):
        if self.entries_count[model_name] <= 0 or self.skip_model_migration.get(
                model_name, False):
            return

        log.info(' * %s [#%d]' % (model_name, self.entries_count[model_name]))

        specific_migration_function = getattr(self, 'migrate_%s' % model_name,
                                              None)
        if specific_migration_function is None:
            self.generic_migration_function(model_name)
        else:
            specific_migration_function()
Пример #18
0
    def add_hidden_service(self, tid, hostname, key):
        if self.tor_conn is None:
            return

        hs_loc = ('80 localhost:8083')
        if not hostname and not key:
            if tid in self.startup_semaphore:
                log.debug('Still waiting for hidden service to start', tid=tid)
                return self.startup_semaphore[tid]

            log.info('Creating new onion service', tid=tid)
            ephs = EphemeralHiddenService(hs_loc)
        else:
            log.info('Setting up existing onion service %s', hostname, tid=tid)
            ephs = EphemeralHiddenService(hs_loc, key)
            self.hs_map[hostname] = ephs

        @defer.inlineCallbacks
        def init_callback(ret):
            log.info('Initialization of hidden-service %s completed.',
                     ephs.hostname,
                     tid=tid)
            if not hostname and not key:
                if tid in State.tenant_cache:
                    self.hs_map[ephs.hostname] = ephs
                    yield set_onion_service_info(tid, ephs.hostname,
                                                 ephs.private_key)
                else:
                    yield ephs.remove_from_tor(self.tor_conn.protocol)

                tid_list = list(set([1, tid]))

                for x in tid_list:
                    ApiCache().invalidate(x)

                yield refresh_memory_variables(tid_list)

                del self.startup_semaphore[tid]

        def init_errback(failure):
            if tid in self.startup_semaphore:
                del self.startup_semaphore[tid]

            raise failure.value

        self.startup_semaphore[tid] = ephs.add_to_tor(self.tor_conn.protocol)

        return self.startup_semaphore[tid].addCallbacks(
            init_callback, init_errback)  #pylint: disable=no-member
Пример #19
0
    def process_mail_creation(self, session, tid, data):
        user_id = data['user']['id']

        # Do not spool emails if the receiver has opted out of ntfns for this tip.
        if not data['tip']['enable_notifications']:
            log.debug("Discarding emails for %s due to receiver's preference.", user_id)
            return

        # https://github.com/globaleaks/GlobaLeaks/issues/798
        # TODO: the current solution is global and configurable only by the admin
        sent_emails = self.state.get_mail_counter(user_id)
        if sent_emails >= self.state.tenant_cache[tid].notification.notification_threshold_per_hour:
            log.debug("Discarding emails for receiver %s due to threshold already exceeded for the current hour",
                      user_id)
            return

        self.state.increment_mail_counter(user_id)
        if sent_emails >= self.state.tenant_cache[tid].notification.notification_threshold_per_hour:
            log.info("Reached threshold of %d emails with limit of %d for receiver %s",
                     sent_emails,
                     self.state.tenant_cache[tid].notification.notification_threshold_per_hour,
                     user_id,
                     tid=tid)

            # simply changing the type of the notification causes
            # to send the notification_limit_reached
            data['type'] = u'receiver_notification_limit_reached'

        data['node'] = self.serialize_config(session, 'node', tid, data['user']['language'])

        if data['node']['mode'] != u'whistleblowing.it':
            data['notification'] = self.serialize_config(session, 'notification', tid, data['user']['language'])
        else:
            data['notification'] = self.serialize_config(session, 'notification', 1, data['user']['language'])

        subject, body = Templating().get_mail_subject_and_body(data)

        # If the receiver has encryption enabled encrypt the mail body
        if data['user']['pgp_key_public']:
            pgpctx = PGPContext(self.state.settings.tmp_path)
            fingerprint = pgpctx.load_key(data['user']['pgp_key_public'])['fingerprint']
            body = pgpctx.encrypt_message(fingerprint, body)

        session.add(models.Mail({
            'address': data['user']['mail_address'],
            'subject': subject,
            'body': body,
            'tid': tid,
        }))
Пример #20
0
    def remove_unwanted_hidden_services(self):
        # Collect the list of all hidden services listed by tor then remove all of them
        # that are not present in the tenant cache ensuring that OnionService.hs_map is
        # kept up to date.
        running_services = yield self.get_all_hidden_services()

        tenant_services = {State.tenant_cache[tid].onionservice for tid in State.tenant_cache}

        for onion_addr in running_services:
            ephs = None
            if onion_addr not in tenant_services and onion_addr in self.hs_map:
                ephs = self.hs_map.pop(onion_addr)

            if ephs is not None:
                log.info('Removing onion address %s', ephs.hostname)
                yield ephs.remove_from_tor(self.tor_conn.protocol)
Пример #21
0
    def remove_unwanted_hidden_services(self):
        # Collect the list of all hidden services listed by tor then remove all of them
        # that are not present in the tenant cache ensuring that OnionService.hs_map is
        # kept up to date.
        running_services = yield self.get_all_hidden_services()

        tenant_services = {State.tenant_cache[tid].onionservice for tid in State.tenant_cache}

        for onion_addr in running_services:
            ephs = None
            if onion_addr not in tenant_services and onion_addr in self.hs_map:
                ephs = self.hs_map.pop(onion_addr)

            if ephs is not None:
                log.info('Removing onion address %s', ephs.hostname)
                yield ephs.remove_from_tor(self.tor_conn.protocol)
Пример #22
0
        def init_callback(ret):
            log.info('Initialization of hidden-service %s completed.', ephs.hostname, tid=tid)
            if not hostname and not key:
                if tid in State.tenant_cache:
                    self.hs_map[ephs.hostname] = ephs
                    yield set_onion_service_info(tid, ephs.hostname, ephs.private_key)
                else:
                    yield ephs.remove_from_tor(self.tor_conn.protocol)

                tid_list = list(set([1, tid]))

                for x in tid_list:
                    Cache().invalidate(x)

                yield refresh_memory_variables(tid_list)

                del self.startup_semaphore[tid]
Пример #23
0
        def init_callback(ret):
            log.info('Initialization of hidden-service %s completed.', ephs.hostname, tid=tid)
            if not hostname and not key:
                if tid in State.tenant_cache:
                    self.hs_map[ephs.hostname] = ephs
                    yield set_onion_service_info(tid, ephs.hostname, ephs.private_key)
                else:
                    yield ephs.remove_from_tor(self.tor_conn.protocol)

                tid_list = list(set([1, tid]))

                for x in tid_list:
                    Cache().invalidate(x)

                yield refresh_memory_variables(tid_list)

                del self.startup_semaphore[tid]
Пример #24
0
    def add_hidden_service(self, tid, hostname, key):
        if self.tor_conn is None:
            return

        hs_loc = ('80 localhost:8083')
        if not hostname and not key:
            if tid in self.startup_semaphore:
                log.debug('Still waiting for hidden service to start', tid=tid)
                return self.startup_semaphore[tid]

            log.info('Creating new onion service', tid=tid)
            ephs = EphemeralHiddenService(hs_loc)
        else:
            log.info('Setting up existing onion service %s', hostname, tid=tid)
            ephs = EphemeralHiddenService(hs_loc, key)
            self.hs_map[hostname] = ephs

        @defer.inlineCallbacks
        def init_callback(ret):
            log.info('Initialization of hidden-service %s completed.', ephs.hostname, tid=tid)
            if not hostname and not key:
                if tid in State.tenant_cache:
                    self.hs_map[ephs.hostname] = ephs
                    yield set_onion_service_info(tid, ephs.hostname, ephs.private_key)
                else:
                    yield ephs.remove_from_tor(self.tor_conn.protocol)

                tid_list = list(set([1, tid]))

                for x in tid_list:
                    Cache().invalidate(x)

                yield refresh_memory_variables(tid_list)

                del self.startup_semaphore[tid]

        def init_errback(failure):
            if tid in self.startup_semaphore:
                del self.startup_semaphore[tid]

            raise failure.value

        self.startup_semaphore[tid] = ephs.add_to_tor(self.tor_conn.protocol)

        return self.startup_semaphore[tid].addCallbacks(init_callback, init_errback)  # pylint: disable=no-member
Пример #25
0
    def add_onion_service(self, tid, hostname, key):
        if self.tor_conn is None:
            return

        hs_loc = ('80 localhost:8083')

        log.info('Setting up the onion service %s', hostname, tid=tid)

        ephs = EphemeralHiddenService(hs_loc, key)

        self.hs_map[hostname] = ephs

        def init_callback(ret):
            log.err('Initialization of onion-service %s completed.',
                    ephs.hostname,
                    tid=tid)

        return ephs.add_to_tor(self.tor_conn.protocol).addCallbacks(
            init_callback)  # pylint: disable=no-member
Пример #26
0
    def cert_expiration_checks(self, session, tid):
        now = datetime.now()

        priv_fact = models.config.ConfigFactory(session, tid)

        if not priv_fact.get_val('https_enabled'):
            return

        cert = load_certificate(FILETYPE_PEM, priv_fact.get_val('https_cert'))
        expiration_date = letsencrypt.convert_asn1_date(cert.get_notAfter())

        # Acme renewal checks
        if priv_fact.get_val('acme') and now > expiration_date - timedelta(
                days=self.acme_try_renewal):
            try:
                db_acme_cert_request(session, tid)
            except Exception as exc:
                log.err('Automatic HTTPS renewal failed: %s', exc, tid=tid)

                # Send an email to the admin cause this requires user intervention
                if now > expiration_date - timedelta(days=self.notify_expr_within) and \
                   not self.state.tenant_cache[tid].notification.disable_admin_notification_emails:
                    self.certificate_mail_creation(
                        session, 'https_certificate_renewal_failure', tid,
                        expiration_date)

            tls_config = load_tls_dict(session, tid)

            self.state.snimap.unload(tid)
            self.state.snimap.load(tid, tls_config)

        # Regular certificates expiration checks
        elif now > expiration_date - timedelta(days=self.notify_expr_within):
            log.info('The HTTPS Certificate is expiring on %s',
                     expiration_date,
                     tid=tid)
            if not self.state.tenant_cache[
                    tid].notification.disable_admin_notification_emails:
                self.certificate_mail_creation(session,
                                               'https_certificate_expiration',
                                               tid, expiration_date)
Пример #27
0
    def db_maybe_launch_https_workers(self, session):
        config = ConfigFactory(session, 1)

        # If root_tenant is disabled do not start https
        on = config.get_val(u'https_enabled')
        if not on:
            log.info("Not launching workers")
            return defer.succeed(None)

        site_cfgs = load_tls_dict_list(session)

        valid_cfgs, err = [], None
        # Determine which site_cfgs are valid and only pass those to the child.
        for db_cfg in site_cfgs:
            chnv = tls.ChainValidator()
            ok, err = chnv.validate(db_cfg,
                                    must_be_disabled=False,
                                    check_expiration=False)
            if ok and err is None:
                valid_cfgs.append(db_cfg)

        self.tls_cfg['site_cfgs'] = valid_cfgs

        if not valid_cfgs:
            log.info("Not launching https workers due to %s", err)
            return defer.fail(err)

        log.info("Decided to launch https workers")

        return self.launch_https_workers()
Пример #28
0
    def perform_pgp_validation_checks(self, session):
        tenant_expiry_map = {1: []}

        for user in db_get_expired_or_expiring_pgp_users(session, self.state.tenant_cache.keys()):
            user_desc = user_serialize_user(session, user, user.language)
            tenant_expiry_map.setdefault(user.tid, []).append(user_desc)

            log.info('Removing expired PGP key of: %s', user.username, tid=user.tid)
            if user.pgp_key_expiration < datetime_now():
                user.pgp_key_public = ''
                user.pgp_key_fingerprint = ''
                user.pgp_key_expiration = datetime_null()

        for tid, expired_or_expiring in tenant_expiry_map.items():
            for user_desc in expired_or_expiring:
                self.prepare_user_pgp_alerts(session, tid, user_desc)

            if self.state.tenant_cache[tid].notification.disable_admin_notification_emails:
                continue

            if expired_or_expiring:
                self.prepare_admin_pgp_alerts(session, tid, expired_or_expiring)
Пример #29
0
    def __init__(self, net_sockets, proxy_ip, proxy_port):
        log.info("Starting process monitor")

        self.shutting_down = False

        self.start_time = datetime_now()
        self.tls_process_pool = []
        self.cpu_count = multiprocessing.cpu_count()

        self.worker_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'worker_https.py')

        self.tls_cfg = {
          'proxy_ip': proxy_ip,
          'proxy_port': proxy_port,
          'debug': log.loglevel <= logging.DEBUG,
          'site_cfgs': [],
        }

        if not net_sockets:
            log.err("No ports to bind to! Spawning processes will not work!")

        self.tls_cfg['tls_socket_fds'] = [ns.fileno() for ns in net_sockets]
Пример #30
0
def run_acme_reg_to_finish(domain, accnt_key, priv_key, hostname, tmp_chall_dict, directory_url):
    """Runs the entire process of ACME registeration"""

    client = create_v2_client(directory_url, accnt_key)

    # First we need to create a registration with the email address provided
    # and accept the terms of service
    log.info("Using boulder server %s", directory_url)

    client.net.account = client.new_account(
        messages.NewRegistration.from_data(
            terms_of_service_agreed=True
        )
    )

    # Now we need to open an order and request our certificate

    # NOTE: We'll let ACME generate a CSR for our private key as there's
    # a lot of utility code it uses to generate the CSR in a specific
    # fashion. Better to use what LE provides than to roll our own as we
    # we doing with the v1 code
    #
    # This will also let us support multi-domain certificat requests in the
    # future, as well as mandate OCSP-Must-Staple if/when GL's HTTPS server
    # supports it
    csr = crypto_util.make_csr(priv_key, [hostname], False)
    order = client.new_order(csr)
    authzr = order.authorizations

    log.info('Created a new order for %s', hostname)

    # authrz is a list of Authorization resources, we need to find the
    # HTTP-01 challenge and use it
    for auth_req in authzr:  # pylint: disable=not-an-iterable
        for chall_body in auth_req.body.challenges:
            if isinstance(chall_body.chall, challenges.HTTP01):
                challb = chall_body
                break

    if challb is None:
        raise Exception("HTTP01 challenge unavailable!")

    _, chall_tok = challb.response_and_validation(client.net.key)
    v = chall_body.chall.encode("token")
    log.info('Exposing challenge on %s', v)
    tmp_chall_dict.set(v, ChallTok(chall_tok))

    cr = client.answer_challenge(challb, challb.response(client.net.key))
    log.debug('Acme CA responded to challenge request with: %s', cr)

    order = client.poll_and_finalize(order)

    return split_certificate_chain(order.fullchain_pem)
Пример #31
0
def run_acme_reg_to_finish(domain, accnt_key, priv_key, hostname,
                           tmp_chall_dict, directory_url):
    """Runs the entire process of ACME registeration"""

    client = create_v2_client(directory_url, accnt_key)

    # First we need to create a registration with the email address provided
    # and accept the terms of service
    log.info("Using boulder server %s", directory_url)

    client.net.account = client.new_account(
        messages.NewRegistration.from_data(terms_of_service_agreed=True))

    # Now we need to open an order and request our certificate

    # NOTE: We'll let ACME generate a CSR for our private key as there's
    # a lot of utility code it uses to generate the CSR in a specific
    # fashion. Better to use what LE provides than to roll our own as we
    # we doing with the v1 code
    #
    # This will also let us support multi-domain certificat requests in the
    # future, as well as mandate OCSP-Must-Staple if/when GL's HTTPS server
    # supports it
    csr = crypto_util.make_csr(priv_key, [hostname], False)
    order = client.new_order(csr)
    authzr = order.authorizations

    log.info('Created a new order for %s', hostname)

    # authrz is a list of Authorization resources, we need to find the
    # HTTP-01 challenge and use it
    for auth_req in authzr:  # pylint: disable=not-an-iterable
        for chall_body in auth_req.body.challenges:
            if isinstance(chall_body.chall, challenges.HTTP01):
                challb = chall_body
                break

    if challb is None:
        raise Exception("HTTP01 challenge unavailable!")

    _, chall_tok = challb.response_and_validation(client.net.key)
    v = chall_body.chall.encode("token")
    log.info('Exposing challenge on %s', v)
    tmp_chall_dict.set(v, ChallTok(chall_tok))

    cr = client.answer_challenge(challb, challb.response(client.net.key))
    log.debug('Acme CA responded to challenge request with: %s', cr)

    order = client.poll_and_finalize(order)

    return split_certificate_chain(order.fullchain_pem)
Пример #32
0
    def create_file(session, cls, tid):
        log.info("Generating an ACME account key with %d bits" % Settings.key_bits)

        return db_create_acme_key(session, tid)
Пример #33
0
def perform_migration(version):
    """
    @param version:
    @return:
    """
    to_delete_on_fail = []
    to_delete_on_success = []

    if version < FIRST_DATABASE_VERSION_SUPPORTED:
        log.info("Migrations from DB version lower than %d are no longer supported!" % FIRST_DATABASE_VERSION_SUPPORTED)
        quit()

    tmpdir =  os.path.abspath(os.path.join(Settings.tmp_path, 'tmp'))
    if version < 41:
        orig_db_file = os.path.abspath(os.path.join(Settings.working_path, 'db', 'glbackend-%d.db' % version))
    else:
        orig_db_file = os.path.abspath(os.path.join(Settings.working_path, 'globaleaks.db'))

    final_db_file = os.path.abspath(os.path.join(Settings.working_path, 'globaleaks.db'))

    shutil.rmtree(tmpdir, True)
    os.mkdir(tmpdir)
    shutil.copy(orig_db_file, os.path.join(tmpdir, 'old.db'))

    new_db_file = None

    try:
        while version < DATABASE_VERSION:
            old_db_file = os.path.abspath(os.path.join(tmpdir, 'old.db'))
            new_db_file = os.path.abspath(os.path.join(tmpdir, 'new.db'))

            if os.path.exists(new_db_file):
                shutil.move(new_db_file, old_db_file)

            Settings.db_file = new_db_file
            Settings.enable_input_length_checks = False

            to_delete_on_fail.append(new_db_file)
            to_delete_on_success.append(old_db_file)

            log.info("Updating DB from version %d to version %d" % (version, version + 1))

            j = version - FIRST_DATABASE_VERSION_SUPPORTED
            session_old = get_session(make_db_uri(old_db_file))

            engine = get_engine(make_db_uri(new_db_file), foreign_keys=False)
            if FIRST_DATABASE_VERSION_SUPPORTED + j + 1 == DATABASE_VERSION:
                Base.metadata.create_all(engine)
            else:
                Bases[j+1].metadata.create_all(engine)
            session_new = sessionmaker(bind=engine)()

            # Here is instanced the migration script
            MigrationModule = importlib.import_module("globaleaks.db.migrations.update_%d" % (version + 1))
            migration_script = MigrationModule.MigrationScript(migration_mapping, version, session_old, session_new)

            log.info("Migrating table:")

            try:
                try:
                    migration_script.prologue()
                except Exception as exception:
                    log.err("Failure while executing migration prologue: %s" % exception)
                    raise exception

                for model_name, _ in migration_mapping.items():
                    if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None:
                        try:
                            migration_script.migrate_model(model_name)

                            # Commit at every table migration in order to be able to detect
                            # the precise migration that may fail.
                            migration_script.commit()
                        except Exception as exception:
                            log.err("Failure while migrating table %s: %s " % (model_name, exception))
                            raise exception
                try:
                    migration_script.epilogue()
                    migration_script.commit()
                except Exception as exception:
                    log.err("Failure while executing migration epilogue: %s " % exception)
                    raise exception

            finally:
                # the database should be always closed before leaving the application
                # in order to not keep leaking journal files.
                migration_script.close()

            log.info("Migration stats:")

            # we open a new db in order to verify integrity of the generated file
            session_verify = get_session(make_db_uri(new_db_file))

            for model_name, _ in migration_mapping.items():
                if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None:
                    count = session_verify.query(migration_script.model_to[model_name]).count()
                    if migration_script.entries_count[model_name] != count:
                        if migration_script.fail_on_count_mismatch[model_name]:
                            raise AssertionError("Integrity check failed on count equality for table %s: %d != %d" %
                                                 (model_name, count, migration_script.entries_count[model_name]))
                        else:
                            log.info(" * %s table migrated (entries count changed from %d to %d)" %
                                                 (model_name, migration_script.entries_count[model_name], count))
                    else:
                        log.info(" * %s table migrated (%d entry(s))" %
                                             (model_name, migration_script.entries_count[model_name]))

            version += 1

            session_verify.close()

        perform_data_update(new_db_file)

        # in case of success first copy the new migrated db, then as last action delete the original db file
        shutil.copy(new_db_file, final_db_file)

        if orig_db_file != final_db_file:
            overwrite_and_remove(orig_db_file)

        path = os.path.join(Settings.working_path, 'db')
        if os.path.exists(path):
            shutil.rmtree(path)

    except Exception as e:
        print(e)

    finally:
        # Always cleanup the temporary directory used for the migration
        for f in os.listdir(tmpdir):
            overwrite_and_remove(os.path.join(tmpdir, f))

        shutil.rmtree(tmpdir)
Пример #34
0
def perform_migration(version):
    """
    @param version:
    @return:
    """
    to_delete_on_fail = []
    to_delete_on_success = []

    if version < FIRST_DATABASE_VERSION_SUPPORTED:
        log.info("Migrations from DB version lower than %d are no longer supported!" % FIRST_DATABASE_VERSION_SUPPORTED)
        quit()

    tmpdir =  os.path.abspath(os.path.join(Settings.tmp_path, 'tmp'))
    if version < 41:
        orig_db_file = os.path.abspath(os.path.join(Settings.working_path, 'db', 'glbackend-%d.db' % version))
    else:
        orig_db_file = os.path.abspath(os.path.join(Settings.working_path, 'globaleaks.db'))

    final_db_file = os.path.abspath(os.path.join(Settings.working_path, 'globaleaks.db'))

    shutil.rmtree(tmpdir, True)
    os.mkdir(tmpdir)
    shutil.copy(orig_db_file, os.path.join(tmpdir, 'old.db'))

    new_db_file = None

    try:
        while version < DATABASE_VERSION:
            old_db_file = os.path.abspath(os.path.join(tmpdir, 'old.db'))
            new_db_file = os.path.abspath(os.path.join(tmpdir, 'new.db'))

            if os.path.exists(new_db_file):
                shutil.move(new_db_file, old_db_file)

            Settings.db_file = new_db_file
            Settings.enable_input_length_checks = False

            to_delete_on_fail.append(new_db_file)
            to_delete_on_success.append(old_db_file)

            log.info("Updating DB from version %d to version %d" % (version, version + 1))

            j = version - FIRST_DATABASE_VERSION_SUPPORTED
            session_old = get_session(make_db_uri(old_db_file))

            engine = get_engine(make_db_uri(new_db_file), foreign_keys=False)
            if FIRST_DATABASE_VERSION_SUPPORTED + j + 1 == DATABASE_VERSION:
                Base.metadata.create_all(engine)
            else:
                Bases[j+1].metadata.create_all(engine)
            session_new = sessionmaker(bind=engine)()

            # Here is instanced the migration script
            MigrationModule = importlib.import_module("globaleaks.db.migrations.update_%d" % (version + 1))
            migration_script = MigrationModule.MigrationScript(migration_mapping, version, session_old, session_new)

            log.info("Migrating table:")

            try:
                try:
                    migration_script.prologue()
                except Exception as exception:
                    log.err("Failure while executing migration prologue: %s" % exception)
                    raise exception

                for model_name, _ in migration_mapping.items():
                    if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None:
                        try:
                            migration_script.migrate_model(model_name)

                            # Commit at every table migration in order to be able to detect
                            # the precise migration that may fail.
                            migration_script.commit()
                        except Exception as exception:
                            log.err("Failure while migrating table %s: %s " % (model_name, exception))
                            raise exception
                try:
                    migration_script.epilogue()
                    migration_script.commit()
                except Exception as exception:
                    log.err("Failure while executing migration epilogue: %s " % exception)
                    raise exception

            finally:
                # the database should be always closed before leaving the application
                # in order to not keep leaking journal files.
                migration_script.close()

            log.info("Migration stats:")

            # we open a new db in order to verify integrity of the generated file
            session_verify = get_session(make_db_uri(new_db_file))

            for model_name, _ in migration_mapping.items():
                if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None:
                     count = session_verify.query(migration_script.model_to[model_name]).count()
                     if migration_script.entries_count[model_name] != count:
                         if migration_script.fail_on_count_mismatch[model_name]:
                             raise AssertionError("Integrity check failed on count equality for table %s: %d != %d" % \
                                                  (model_name, count, migration_script.entries_count[model_name]))
                         else:
                             log.info(" * %s table migrated (entries count changed from %d to %d)" % \
                                                  (model_name, migration_script.entries_count[model_name], count))
                     else:
                         log.info(" * %s table migrated (%d entry(s))" % \
                                              (model_name, migration_script.entries_count[model_name]))

            version += 1

            session_verify.close()

        perform_data_update(new_db_file)

        # in case of success first copy the new migrated db, then as last action delete the original db file
        shutil.copy(new_db_file, final_db_file)

        if orig_db_file != final_db_file:
            overwrite_and_remove(orig_db_file)

        path = os.path.join(Settings.working_path, 'db')
        if os.path.exists(path):
            shutil.rmtree(path)

    finally:
        # Always cleanup the temporary directory used for the migration
        for f in os.listdir(tmpdir):
            overwrite_and_remove(os.path.join(tmpdir, f))

        shutil.rmtree(tmpdir)
Пример #35
0
    def create_file(session, cls, tid):
        log.info("Generating an ACME account key with %d bits" %
                 Settings.key_bits)

        return db_create_acme_key(session, tid)
Пример #36
0
    def perform_file_action(cls, tid):
        log.info("Generating the HTTPS key with %d bits" % Settings.key_bits)
        key = yield deferToThread(tls.gen_rsa_key, Settings.key_bits)

        log.debug("Saving the HTTPS key")
        yield cls.save_tls_key(tid, key)
Пример #37
0
    def perform_file_action(cls, tid):
        log.info("Generating the HTTPS key with %d bits" % Settings.key_bits)
        key = yield deferToThread(tls.gen_rsa_key, Settings.key_bits)

        log.debug("Saving the HTTPS key")
        yield cls.save_tls_key(tid, key)