def operation(self): current_time = time.time() error_msg = "" for job in self.jobs_list: if job.active is None: continue execution_time = current_time - job.start_time time_from_last_failed_check = current_time - job.last_monitor_check_failed if (execution_time > job.monitor_interval and time_from_last_failed_check > job.monitor_interval): job.last_monitor_check_failed = current_time if execution_time < 60: error = "Job %s is taking more than %d seconds to execute" % (job.name, execution_time) elif execution_time < 3600: minutes = int(execution_time / 60) error = "Job %s is taking more than %d minutes to execute" % (job.name, minutes) else: hours = int(execution_time / 3600) error = "Job %s is taking more than %d hours to execute" % (job.name, hours) error_msg += error + '\n' log.err(error) if error_msg: self.state.schedule_exception_email(error_msg)
def update_db(): """ This function handles update of an existing database """ db_version, db_file_path = get_db_file(Settings.working_path) if db_version == 0: return 0 try: with warnings.catch_warnings(): from globaleaks.db import migration warnings.simplefilter("ignore", category=sa_exc.SAWarning) log.err('Found an already initialized database version: %d', db_version) if db_version == DATABASE_VERSION: migration.perform_data_update(db_file_path) return DATABASE_VERSION log.err('Performing schema migration from version %d to version %d', db_version, DATABASE_VERSION) migration.perform_migration(db_version) except Exception as exception: log.err('Migration failure: %s', exception) log.err('Verbose exception traceback:') etype, value, tback = sys.exc_info() log.info('\n'.join(traceback.format_exception(etype, value, tback))) return -1 log.err('Migration completed with success!') return DATABASE_VERSION
def load_key(self, key): """ @param key @return: a dict with the expiration date and the key fingerprint """ try: import_result = self.gnupg.import_keys(key) except Exception as excep: log.err("Error in PGP import_keys: %s", excep) raise errors.InputValidationError if not import_result.fingerprints: raise errors.InputValidationError fingerprint = import_result.fingerprints[0] # looking if the key is effectively reachable try: all_keys = self.gnupg.list_keys() except Exception as excep: log.err("Error in PGP list_keys: %s", excep) raise errors.InputValidationError expiration = datetime.utcfromtimestamp(0) for k in all_keys: if k['fingerprint'] == fingerprint: if k['expires']: expiration = datetime.utcfromtimestamp(int(k['expires'])) break return { 'fingerprint': fingerprint, 'expiration': expiration }
def startup_errback(err): if self.print_startup_error: # Print error only on first run or failure or on a failure subsequent to a success condition self.print_startup_error = False log.err('Failed to initialize Tor connection; error: %s', err) restart_deferred.callback(None)
def failure_cb(failure): """ @param failure {Failure {twisted.internet.FirstError {Failure}}} """ log.err("SMTP connection failed (Exception: %s)", failure.value.subFailure.value, tid=tid) log.debug(failure) return False
def startup_callback(tor_conn): self.print_startup_error = True self.tor_conn = tor_conn self.tor_conn.protocol.on_disconnect = restart_deferred log.err('Successfully connected to Tor control port') return self.add_all_hidden_services()
def check_disk_anomalies(self): """ Here in Alarm is written the threshold to say if we're in disk alarm or not. Therefore the function "report" the amount of free space and the evaluation + alarm shift is performed here. workingdir: is performed a percentage check (at least 1% and an absolute comparison) "unusable node" threshold: happen when the space is really shitty. https://github.com/globaleaks/GlobaLeaks/issues/297 https://github.com/globaleaks/GlobaLeaks/issues/872 """ self.measured_freespace, self.measured_totalspace = get_disk_space(self.state.settings.working_path) disk_space = 0 disk_message = "" accept_submissions = True old_accept_submissions = State.accept_submissions for c in get_disk_anomaly_conditions(self.measured_freespace, self.measured_totalspace): if c['condition']: disk_space = c['alarm_level'] info_msg = c['info_msg']() if disk_space == 2: disk_message = "[FATAL] Disk anomaly, submissions disabled: %s" % info_msg else: # == 1 disk_message = "[WARNING]: Disk anomaly: %s" % info_msg accept_submissions = c['accept_submissions'] break # This check is temporarily, want to be verified that the switch can be # logged as part of the Anomalies via this function old_alarm_level = self.alarm_levels['disk_space'] if old_alarm_level != disk_space: if disk_message: log.err(disk_message) else: log.err("Available disk space returned to normal levels") # the value is set here with a single assignment in order to # minimize possible race conditions resetting/settings the values self.alarm_levels['disk_space'] = disk_space self.alarm_levels['disk_message'] = disk_message # if not on testing change accept_submission to the new value State.accept_submissions = accept_submissions if not self.state.settings.testing else True if old_accept_submissions != State.accept_submissions: log.info("Switching disk space availability from: %s to %s", old_accept_submissions, accept_submissions) # Must invalidate the cache here becuase accept_subs served in /public has changed Cache.invalidate()
def write_plaintext_file(sf, dest_path): try: with sf.open('rb') as encrypted_file, open(dest_path, "a+b") as plaintext_file: while True: chunk = encrypted_file.read(abstract.FileDescriptor.bufferSize) if not chunk: break plaintext_file.write(chunk) except Exception as excep: log.err("Unable to create plaintext file %s: %s", dest_path, excep)
def sync_clean_untracked_files(session): """ removes files in Settings.attachments_path that are not tracked by InternalFile/ReceiverFile. """ tracked_files = db_get_tracked_files(session) for filesystem_file in os.listdir(Settings.attachments_path): if filesystem_file not in tracked_files: file_to_remove = os.path.join(Settings.attachments_path, filesystem_file) try: log.debug('Removing untracked file: %s', file_to_remove) fs.overwrite_and_remove(file_to_remove) except OSError: log.err('Failed to remove untracked file', file_to_remove)
def execution_check(self): self.request.execution_time = datetime.now() - self.request.start_time if self.request.execution_time.seconds > self.handler_exec_time_threshold: err_tup = ("Handler [%s] exceeded execution threshold (of %d secs) with an execution time of %.2f seconds", self.name, self.handler_exec_time_threshold, self.request.execution_time.seconds) log.err(tid=self.request.tid, *err_tup) self.state.schedule_exception_email(*err_tup) track_handler(self) if self.uniform_answer_time: needed_delay = (Settings.side_channels_guard - (self.request.execution_time.microseconds / 1000)) / 1000 if needed_delay > 0: return deferred_sleep(needed_delay)
def process_receiverfiles(state, receiverfiles_maps): """ @param receiverfiles_maps: the mapping of ifile/rfiles to be created on filesystem @return: return None """ for id, receiverfiles_map in receiverfiles_maps.items(): key = receiverfiles_map['crypto_tip_pub_key'] filename = receiverfiles_map['filename'] filecode = filename.split('.')[0] plaintext_name = "%s.plain" % filecode encrypted_name = "%s.encrypted" % filecode plaintext_path = os.path.abspath(os.path.join(Settings.attachments_path, plaintext_name)) encrypted_path = os.path.abspath(os.path.join(Settings.attachments_path, encrypted_name)) sf = state.get_tmp_file_by_name(filename) if key: receiverfiles_map['filename'] = encrypted_name write_encrypted_file(key, sf, encrypted_path) for rf in receiverfiles_map['rfiles']: rf['filename'] = encrypted_name else: for rcounter, rfileinfo in enumerate(receiverfiles_map['rfiles']): with sf.open('rb') as encrypted_file: if rfileinfo['receiver']['pgp_key_public']: try: pgp_name = "pgp_encrypted-%s" % generateRandomKey(16) pgp_path = os.path.abspath(os.path.join(Settings.attachments_path, pgp_name)) encrypt_file_with_pgp(state, encrypted_file, rfileinfo['receiver']['pgp_key_public'], rfileinfo['receiver']['pgp_key_fingerprint'], pgp_path) rfileinfo['filename'] = pgp_name rfileinfo['status'] = u'encrypted' except Exception as excep: log.err("%d# Unable to complete PGP encrypt for %s on %s: %s. marking the file as unavailable.", rcounter, rfileinfo['receiver']['name'], rfileinfo['filename'], excep) rfileinfo['status'] = u'unavailable' elif state.tenant_cache[receiverfiles_map['tid']].allow_unencrypted: receiverfiles_map['plaintext_file_needed'] = True rfileinfo['filename'] = plaintext_name rfileinfo['status'] = u'reference' else: rfileinfo['status'] = u'nokey' if receiverfiles_map['plaintext_file_needed']: write_plaintext_file(sf, plaintext_path)
def perform_action(session, tid, csr_fields): db_cfg = load_tls_dict(session, tid) pkv = tls.PrivKeyValidator() ok, _ = pkv.validate(db_cfg) if not ok: raise errors.InputValidationError() key_pair = db_cfg['ssl_key'] try: csr_txt = tls.gen_x509_csr_pem(key_pair, csr_fields, Settings.csr_sign_bits) log.debug("Generated a new CSR") return csr_txt except Exception as e: log.err(e) raise errors.InputValidationError('CSR gen failed')
def write_encrypted_file(key, sf, dest_path): try: with sf.open('rb') as encrypted_file, \ GCE.streaming_encryption_open('ENCRYPT', key, dest_path) as seo: chunk = encrypted_file.read(abstract.FileDescriptor.bufferSize) while True: x = encrypted_file.read(abstract.FileDescriptor.bufferSize) if not x: seo.encrypt_chunk(chunk, 1) break seo.encrypt_chunk(chunk, 0) chunk = x except Exception as excep: log.err("Unable to create plaintext file %s: %s", dest_path, excep)
def epilogue(self): """ Imports the contents of the tor_hs directory into the config table NOTE the function does not delete the torhs dir, but instead leaves it on disk to ensure that the operator does not lose their HS key. """ config = self.model_to['Config'] def add_raw_config(session, group, name, customized, value): c = config(migrate=True) c.var_group = group c.var_name = name c.customixed = customized c.value = {'v': value} session.add(c) hostname, key = '', '' pk_path = os.path.join(TOR_DIR, 'private_key') hn_path = os.path.join(TOR_DIR, 'hostname') if os.path.exists(TOR_DIR) and os.path.exists(pk_path) and os.path.exists(hn_path): with open(hn_path, 'r') as f: hostname = f.read().strip() # TODO assert that the hostname corresponds with the key if not re.match(r'[A-Za-z0-9]{16}\.onion', hostname): raise Exception('The hostname format does not match') with open(pk_path, 'r') as f: r = f.read() if not r.startswith('-----BEGIN RSA PRIVATE KEY-----\n'): raise Exception('%s does not have the right format!') # Clean and convert the pem encoded key read into the format # expected by the ADD_ONION tor control protocol. # TODO assert the key passes deeper validation key = 'RSA1024:' + ''.join(r.strip().split('\n')[1:-1]) else: log.err('The structure of %s is incorrect. Cannot load onion service keys' % TOR_DIR) self.session_new.query(config).filter(config.var_group == u'node', config.var_name == u'onionservice').delete(synchronize_session='fetch') add_raw_config(self.session_new, u'node', u'onionservice', True, hostname) add_raw_config(self.session_new, u'private', u'tor_onion_key', True, key) self.entries_count['Config'] += 1
def __init__(self, tempdirprefix=None): if tempdirprefix is None: tempdir = tempfile.mkdtemp() else: tempdir = tempfile.mkdtemp(prefix=tempdirprefix) try: gpgbinary='gpg' if os.path.exists('/usr/bin/gpg1'): gpgbinary='gpg1' self.gnupg = GPG(gpgbinary=gpgbinary, gnupghome=tempdir, options=['--trust-model', 'always']) self.gnupg.encoding = "UTF-8" except OSError as excep: log.err("Critical, OS error in operating with GnuPG home: %s", excep) raise except Exception as excep: log.err("Unable to instance PGP object: %s" % excep) raise
def schedule_exception_email(self, exception_text, *args): if not hasattr(self.tenant_cache[1], 'notification'): log.err("Error: Cannot send mail exception before complete initialization.") return if self.exceptions_email_count >= self.settings.exceptions_email_hourly_limit: return exception_text = (exception_text % args) if args else exception_text sha256_hash = sha256(exception_text.encode()) if sha256_hash not in self.exceptions: self.exceptions[sha256_hash] = 0 self.exceptions[sha256_hash] += 1 if self.exceptions[sha256_hash] > 5: log.err("Exception mail suppressed for (%s) [reason: threshold exceeded]", sha256_hash) return self.exceptions_email_count += 1 mail_subject = "GlobaLeaks Exception" delivery_list = self.tenant_cache[1].notification.exception_delivery_list mail_body = text_type("Platform: %s\nHost: %s (%s)\nVersion: %s\n\n%s" % (self.tenant_cache[1].name, self.tenant_cache[1].hostname, self.tenant_cache[1].onionservice, __version__, exception_text)) for mail_address, pgp_key_public in delivery_list: # Opportunisticly encrypt the mail body. NOTE that mails will go out # unencrypted if one address in the list does not have a public key set. if pgp_key_public: pgpctx = PGPContext(self.settings.tmp_path) fingerprint = pgpctx.load_key(pgp_key_public)['fingerprint'] mail_body = pgpctx.encrypt_message(fingerprint, mail_body) # avoid waiting for the notification to send and instead rely on threads to handle it tw(db_schedule_email, 1, mail_address, mail_subject, mail_body)
def directory_traversal_check(trusted_absolute_prefix, untrusted_path): """ check that an 'untrusted_path' matches a 'trusted_absolute_path' prefix """ if not os.path.isabs(trusted_absolute_prefix): raise Exception("programming error: trusted_absolute_prefix is not an absolute path: %s" % trusted_absolute_prefix) # Windows fix, the trusted_absolute_prefix needs to be normalized for # commonprefix to actually work as / is a valid path seperator, but # you can end up with things like this: C:\\GlobaLeaks\\client\\app/ # without it untrusted_path = os.path.abspath(untrusted_path) trusted_absolute_prefix = os.path.abspath(trusted_absolute_prefix) if trusted_absolute_prefix != os.path.commonprefix([trusted_absolute_prefix, untrusted_path]): log.err("Blocked file operation for: (prefix, attempted_path) : ('%s', '%s')", trusted_absolute_prefix, untrusted_path) raise errors.DirectoryTraversalError
def process_file_upload(self): if b'flowFilename' not in self.request.args: return total_file_size = int(self.request.args[b'flowTotalSize'][0]) flow_identifier = self.request.args[b'flowIdentifier'][0] chunk_size = len(self.request.args[b'file'][0]) if ((chunk_size / (1024 * 1024)) > self.state.tenant_cache[self.request.tid].maximum_filesize or (total_file_size / (1024 * 1024)) > self.state.tenant_cache[self.request.tid].maximum_filesize): log.err("File upload request rejected: file too big", tid=self.request.tid) raise errors.FileTooBig(self.state.tenant_cache[self.request.tid].maximum_filesize) if flow_identifier not in self.state.TempUploadFiles: self.state.TempUploadFiles.set(flow_identifier, SecureTemporaryFile(Settings.tmp_path)) f = self.state.TempUploadFiles[flow_identifier] with f.open('w') as f: f.write(self.request.args[b'file'][0]) if self.request.args[b'flowChunkNumber'][0] != self.request.args[b'flowTotalChunks'][0]: return None f.finalize_write() mime_type, _ = mimetypes.guess_type(text_type(self.request.args[b'flowFilename'][0], 'utf-8')) if mime_type is None: mime_type = 'application/octet-stream' filename = self.request.args[b'flowFilename'][0].decode('utf-8') self.uploaded_file = { 'date': datetime_now(), 'name': filename, 'type': mime_type, 'size': total_file_size, 'filename': os.path.basename(f.filepath), 'body': f, 'description': self.request.args.get(b'description', [''])[0] }
def mail_exception_handler(etype, value, tback): """ Formats traceback and exception data and emails the error, This would be enabled only in the testing phase and testing release, not in production release. """ if isinstance(value, (GeneratorExit, defer.AlreadyCalledError, SMTPError)) or \ (etype == AssertionError and value.message == "Request closed"): # we need to bypass email notification for some exception that: # 1) raise frequently or lie in a twisted bug; # 2) lack of useful stacktraces; # 3) can be cause of email storm amplification # # this kind of exception can be simply logged error logs. log.err("exception mail suppressed for exception (%s) [reason: special exception]", str(etype)) return mail_body = "" # collection of the stacktrace info exc_type = re.sub("(<(type|class ')|'exceptions.|'>|__main__.)", "", str(etype)) mail_body += "%s %s\n\n" % (exc_type.strip(), etype.__doc__) mail_body += '\n'.join(traceback.format_exception(etype, value, tback)) log.err("Unhandled exception raised:") log.err(mail_body) State.schedule_exception_email(mail_body)
def cert_expiration_checks(self, session, tid): priv_fact = models.config.ConfigFactory(session, tid) if not priv_fact.get_val('https_enabled'): return cert = load_certificate(FILETYPE_PEM, priv_fact.get_val('https_cert')) expiration_date = letsencrypt.convert_asn1_date(cert.get_notAfter()) # Acme renewal checks if priv_fact.get_val('acme') and datetime.now( ) > expiration_date - timedelta(days=self.acme_try_renewal): try: db_acme_cert_request(session, tid) except Exception as exc: log.err('Automatic HTTPS renewal failed: %s', exc, tid=tid) # Send an email to the admin cause this requires user intervention if not self.state.tenant_cache[ tid].notification.disable_admin_notification_emails: self.certificate_mail_creation( session, 'https_certificate_renewal_failure', tid, expiration_date) tls_config = load_tls_dict(session, tid) self.state.snimap.unload(tid) self.state.snimap.load(tid, tls_config) # Regular certificates expiration checks elif datetime.now() > expiration_date - timedelta( days=self.notify_expr_within): log.info('The HTTPS Certificate is expiring on %s', expiration_date, tid=tid) if not self.state.tenant_cache[ tid].notification.disable_admin_notification_emails: self.certificate_mail_creation(session, 'https_certificate_expiration', tid, expiration_date)
def mail_exception_handler(etype, value, tback): """ Formats traceback and exception data and emails the error, This would be enabled only in the testing phase and testing release, not in production release. """ if isinstance(value, (GeneratorExit, defer.AlreadyCalledError, SMTPError)) or \ (etype == AssertionError and value.message == "Request closed"): # we need to bypass email notification for some exception that: # 1) raise frequently or lie in a twisted bug; # 2) lack of useful stacktraces; # 3) can be cause of email storm amplification # # this kind of exception can be simply logged error logs. log.err( "exception mail suppressed for exception (%s) [reason: special exception]", str(etype)) return mail_body = "" # collection of the stacktrace info exc_type = re.sub("(<(type|class ')|'exceptions.|'>|__main__.)", "", str(etype)) mail_body += "%s %s\n\n" % (exc_type.strip(), etype.__doc__) mail_body += '\n'.join(traceback.format_exception(etype, value, tback)) log.err("Unhandled exception raised:") log.err(mail_body) State.schedule_exception_email(1, mail_body)
def validate_type(value, type): retval = False if value is None: log.err("-- Invalid python_type, in [%s] expected %s", value, type) # if it's callable, than assumes is a primitive class elif callable(type): retval = BaseHandler.validate_python_type(value, type) if not retval: log.err("-- Invalid python_type, in [%s] expected %s", value, type) # value as "{foo:bar}" elif isinstance(type, collections.Mapping): retval = BaseHandler.validate_jmessage(value, type) if not retval: log.err("-- Invalid JSON/dict [%s] expected %s", value, type) # regexp elif isinstance(type, str): retval = BaseHandler.validate_regexp(value, type) if not retval: log.err("-- Failed Match in regexp [%s] against %s", value, type) # value as "[ type ]" elif isinstance(type, collections.Iterable): # empty list is ok if not value: retval = True else: retval = all( BaseHandler.validate_type(x, type[0]) for x in value) if not retval: log.err("-- List validation failed [%s] of %s", value, type) return retval
def generate(self, session): for trigger in ['ReceiverTip', 'Comment', 'Message', 'ReceiverFile']: model = trigger_model_map[trigger] silent_tids = [] for tid, cache_item in self.state.tenant_cache.items(): if cache_item.notification.disable_receiver_notification_emails: silent_tids.append(tid) if silent_tids: for x in session.query(models.ReceiverTip).filter(models.ReceiverTip.internaltip_id == models.InternalTip.id, models.InternalTip.tid.in_(silent_tids)): x.new = False for x in session.query(models.Comment).filter(models.Comment.internaltip_id == models.InternalTip.id, models.InternalTip.tid.in_(silent_tids)): x.new = False for x in session.query(models.Message).filter(models.Message.receivertip_id == models.ReceiverTip.id, models.ReceiverTip.internaltip_id == models.InternalTip.id, models.InternalTip.tid.in_(silent_tids)): x.new = False for x in session.query(models.ReceiverFile).filter(models.ReceiverFile.receivertip_id == models.ReceiverTip.id, models.ReceiverTip.internaltip_id == models.InternalTip.id, models.InternalTip.tid.in_(silent_tids)): x.new = False for element in session.query(model).filter(model.new.is_(True)): data = { 'type': trigger_template_map[trigger] } try: getattr(self, 'process_%s' % trigger)(session, element, data) except Exception as e: log.err("Unhandled exception during mail generation: %s", e) else: element.new = False
def __init__(self, net_sockets, proxy_ip, proxy_port): log.info("Starting process monitor") self.shutting_down = False self.start_time = datetime_now() self.tls_process_pool = [] self.cpu_count = multiprocessing.cpu_count() self.worker_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'worker_https.py') self.tls_cfg = { 'proxy_ip': proxy_ip, 'proxy_port': proxy_port, 'debug': log.loglevel <= logging.DEBUG, 'site_cfgs': [], } if not net_sockets: log.err("No ports to bind to! Spawning processes will not work!") self.tls_cfg['tls_socket_fds'] = [ns.fileno() for ns in net_sockets]
def startService(self): mask = 0 if Settings.devel_mode: mask = 8000 # Allocate local ports for port in Settings.bind_local_ports: http_sock, fail = reserve_port_for_ip('127.0.0.1', port) if fail is not None: log.err("Could not reserve socket for %s (error: %s)", fail[0], fail[1]) else: self.state.http_socks += [http_sock] # Allocate remote ports for port in Settings.bind_remote_ports: sock, fail = reserve_port_for_ip(Settings.bind_address, port + mask) if fail is not None: log.err("Could not reserve socket for %s (error: %s)", fail[0], fail[1]) continue if port == 80: self.state.http_socks += [sock] elif port == 443: self.state.https_socks += [sock] if Settings.disable_swap: disable_swap() fix_file_permissions(Settings.working_path, Settings.uid, Settings.gid, 0o700, 0o600) set_proc_title('globaleaks') drop_privileges(Settings.user, Settings.uid, Settings.gid) reactor.callLater(0, self.deferred_start)
def __init__(self, tempdirprefix=None): if tempdirprefix is None: tempdir = tempfile.mkdtemp() else: tempdir = tempfile.mkdtemp(prefix=tempdirprefix) try: gpgbinary = 'gpg' if os.path.exists('/usr/bin/gpg1'): gpgbinary = 'gpg1' self.gnupg = GPG(gpgbinary=gpgbinary, gnupghome=tempdir, options=['--trust-model', 'always']) self.gnupg.encoding = "UTF-8" except OSError as excep: log.err("Critical, OS error in operating with GnuPG home: %s", excep) raise except Exception as excep: log.err("Unable to instance PGP object: %s" % excep) raise
def add_hidden_service(self, tid, hostname, key): if self.tor_conn is None: return hs_loc = ('80 localhost:8083') if not hostname and not key: log.err('Creating new onion service', tid=tid) if self.onion_service_version == 3: ephs = EphemeralHiddenService(hs_loc, 'NEW:ED25519-V3') else: ephs = EphemeralHiddenService(hs_loc, 'NEW:RSA1024') else: log.info('Setting up existing onion service %s', hostname, tid=tid) ephs = EphemeralHiddenService(hs_loc, key) self.hs_map[hostname] = ephs @inlineCallbacks def init_callback(ret): log.err('Initialization of hidden-service %s completed.', ephs.hostname, tid=tid) if not hostname and not key: if tid in State.tenant_cache: self.hs_map[ephs.hostname] = ephs yield set_onion_service_info(tid, ephs.hostname, ephs.private_key) else: yield ephs.remove_from_tor(self.tor_conn.protocol) tid_list = list(set([1, tid])) for x in tid_list: Cache().invalidate(x) yield refresh_memory_variables(tid_list) return ephs.add_to_tor(self.tor_conn.protocol).addCallbacks( init_callback) # pylint: disable=no-member
def startService(self): mask = 0 if Settings.devel_mode: mask = 8000 # Allocate local ports for port in Settings.bind_local_ports: http_sock, fail = reserve_port_for_ip('127.0.0.1', port) if fail is not None: log.err("Could not reserve socket for %s (error: %s)", fail[0], fail[1]) else: self.state.http_socks += [http_sock] # Allocate remote ports for port in Settings.bind_remote_ports: sock, fail = reserve_port_for_ip(Settings.bind_address, port+mask) if fail is not None: log.err("Could not reserve socket for %s (error: %s)", fail[0], fail[1]) continue if port == 80: self.state.http_socks += [sock] elif port == 443: self.state.https_socks += [sock] if Settings.disable_swap: disable_swap() fix_file_permissions(Settings.working_path, Settings.uid, Settings.gid, 0o700, 0o600) drop_privileges(Settings.user, Settings.uid, Settings.gid) reactor.callLater(0, self.deferred_start)
def update_db(): """ This function handles the update of an existing database :return: The database version """ db_version, db_file_path = get_db_file(Settings.working_path) if db_version == 0: return 0 try: with warnings.catch_warnings(): from globaleaks.db import migration warnings.simplefilter("ignore", category=sa_exc.SAWarning) log.err('Found an already initialized database version: %d', db_version) if db_version == DATABASE_VERSION: migration.perform_data_update(db_file_path) return DATABASE_VERSION log.err( 'Performing schema migration from version %d to version %d', db_version, DATABASE_VERSION) migration.perform_migration(db_version) except Exception as exception: log.err('Migration failure: %s', exception) log.err('Verbose exception traceback:') etype, value, tback = sys.exc_info() log.info('\n'.join(traceback.format_exception(etype, value, tback))) return -1 log.err('Migration completed with success!') return DATABASE_VERSION
def validate_type(value, type): retval = False if value is None: log.err("-- Invalid python_type, in [%s] expected %s", value, type) # if it's callable, than assumes is a primitive class elif callable(type): retval = BaseHandler.validate_python_type(value, type) if not retval: log.err("-- Invalid python_type, in [%s] expected %s", value, type) # value as "{foo:bar}" elif isinstance(type, collections.Mapping): retval = BaseHandler.validate_jmessage(value, type) if not retval: log.err("-- Invalid JSON/dict [%s] expected %s", value, type) # regexp elif isinstance(type, str): retval = BaseHandler.validate_regexp(value, type) if not retval: log.err("-- Failed Match in regexp [%s] against %s", value, type) # value as "[ type ]" elif isinstance(type, collections.Iterable): # empty list is ok if not value: retval = True else: retval = all(BaseHandler.validate_type(x, type[0]) for x in value) if not retval: log.err("-- List validation failed [%s] of %s", value, type) return retval
def fail_startup(excep): log.err("ERROR: Cannot start GlobaLeaks. Please manually examine the exception.") log.err("EXCEPTION: %s", excep) log.debug('TRACE: %s', traceback.format_exc(excep)) if reactor.running: reactor.stop()
def on_error(self, excep): log.err("Exception while running %s" % self.name) log.exception(excep) extract_exception_traceback_and_schedule_email(excep)
def on_error(self, excep): error = "Job %s died with runtime %.4f [low: %.4f, high: %.4f]" % \ (self.name, self.mean_time, self.low_time, self.high_time) log.err(error) log.exception(excep) extract_exception_traceback_and_schedule_email(excep)
def _verifyCallback(conn, cert, errno, depth, ok): if not ok: log.err("Unable to verify validity of certificate: %s" % cert.get_subject()) return ok
def validate_jmessage(jmessage, message_template): """ Takes a string that represents a JSON messages and checks to see if it conforms to the message type it is supposed to be. This message must be either a dict or a list. This function may be called recursively to validate sub-parameters that are also go GLType. """ if isinstance(message_template, dict): success_check = 0 keys_to_strip = [] for key, value in jmessage.items(): if key not in message_template: # strip whatever is not validated # # reminder: it's not possible to raise an exception for the # in case more values are present because it's normal that the # client will send automatically more data. # # e.g. the client will always send 'creation_date' attributes of # objects and attributes like this are present generally only # from the second request on. # keys_to_strip.append(key) continue if not BaseHandler.validate_type(value, message_template[key]): log.err("Received key %s: type validation fail", key) raise errors.InputValidationError("Key (%s) type validation failure" % key) success_check += 1 for key in keys_to_strip: del jmessage[key] for key, value in message_template.items(): if key not in jmessage: log.debug("Key %s expected but missing!", key) log.debug("Received schema %s - Expected %s", jmessage.keys(), message_template.keys()) raise errors.InputValidationError("Missing key %s" % key) if not BaseHandler.validate_type(jmessage[key], value): log.err("Expected key: %s type validation failure", key) raise errors.InputValidationError("Key (%s) double validation failure" % key) if isinstance(message_template[key], (dict, list)) and message_template[key]: BaseHandler.validate_jmessage(jmessage[key], message_template[key]) success_check += 1 if success_check != len(message_template) * 2: log.err("Success counter double check failure: %d", success_check) raise errors.InputValidationError("Success counter double check failure") return True elif isinstance(message_template, list): if not all(BaseHandler.validate_type(x, message_template[0]) for x in jmessage): raise errors.InputValidationError("Not every element in %s is %s" % (jmessage, message_template[0])) return True else: raise errors.InputValidationError("invalid json massage: expected dict or list")
def perform_migration(version): """ @param version: @return: """ to_delete_on_fail = [] to_delete_on_success = [] if version < FIRST_DATABASE_VERSION_SUPPORTED: log.info("Migrations from DB version lower than %d are no longer supported!" % FIRST_DATABASE_VERSION_SUPPORTED) quit() tmpdir = os.path.abspath(os.path.join(Settings.tmp_path, 'tmp')) if version < 41: orig_db_file = os.path.abspath(os.path.join(Settings.working_path, 'db', 'glbackend-%d.db' % version)) else: orig_db_file = os.path.abspath(os.path.join(Settings.working_path, 'globaleaks.db')) final_db_file = os.path.abspath(os.path.join(Settings.working_path, 'globaleaks.db')) shutil.rmtree(tmpdir, True) os.mkdir(tmpdir) shutil.copy(orig_db_file, os.path.join(tmpdir, 'old.db')) new_db_file = None try: while version < DATABASE_VERSION: old_db_file = os.path.abspath(os.path.join(tmpdir, 'old.db')) new_db_file = os.path.abspath(os.path.join(tmpdir, 'new.db')) if os.path.exists(new_db_file): shutil.move(new_db_file, old_db_file) Settings.db_file = new_db_file Settings.enable_input_length_checks = False to_delete_on_fail.append(new_db_file) to_delete_on_success.append(old_db_file) log.info("Updating DB from version %d to version %d" % (version, version + 1)) j = version - FIRST_DATABASE_VERSION_SUPPORTED session_old = get_session(make_db_uri(old_db_file)) engine = get_engine(make_db_uri(new_db_file), foreign_keys=False) if FIRST_DATABASE_VERSION_SUPPORTED + j + 1 == DATABASE_VERSION: Base.metadata.create_all(engine) else: Bases[j+1].metadata.create_all(engine) session_new = sessionmaker(bind=engine)() # Here is instanced the migration script MigrationModule = importlib.import_module("globaleaks.db.migrations.update_%d" % (version + 1)) migration_script = MigrationModule.MigrationScript(migration_mapping, version, session_old, session_new) log.info("Migrating table:") try: try: migration_script.prologue() except Exception as exception: log.err("Failure while executing migration prologue: %s" % exception) raise exception for model_name, _ in migration_mapping.items(): if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None: try: migration_script.migrate_model(model_name) # Commit at every table migration in order to be able to detect # the precise migration that may fail. migration_script.commit() except Exception as exception: log.err("Failure while migrating table %s: %s " % (model_name, exception)) raise exception try: migration_script.epilogue() migration_script.commit() except Exception as exception: log.err("Failure while executing migration epilogue: %s " % exception) raise exception finally: # the database should be always closed before leaving the application # in order to not keep leaking journal files. migration_script.close() log.info("Migration stats:") # we open a new db in order to verify integrity of the generated file session_verify = get_session(make_db_uri(new_db_file)) for model_name, _ in migration_mapping.items(): if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None: count = session_verify.query(migration_script.model_to[model_name]).count() if migration_script.entries_count[model_name] != count: if migration_script.fail_on_count_mismatch[model_name]: raise AssertionError("Integrity check failed on count equality for table %s: %d != %d" % \ (model_name, count, migration_script.entries_count[model_name])) else: log.info(" * %s table migrated (entries count changed from %d to %d)" % \ (model_name, migration_script.entries_count[model_name], count)) else: log.info(" * %s table migrated (%d entry(s))" % \ (model_name, migration_script.entries_count[model_name])) version += 1 session_verify.close() perform_data_update(new_db_file) # in case of success first copy the new migrated db, then as last action delete the original db file shutil.copy(new_db_file, final_db_file) if orig_db_file != final_db_file: overwrite_and_remove(orig_db_file) path = os.path.join(Settings.working_path, 'db') if os.path.exists(path): shutil.rmtree(path) finally: # Always cleanup the temporary directory used for the migration for f in os.listdir(tmpdir): overwrite_and_remove(os.path.join(tmpdir, f)) shutil.rmtree(tmpdir)
def init_callback(ret): log.err('Initialization of onion-service %s completed.', ephs.hostname, tid=tid)
def __del__(self): try: shutil.rmtree(self.gnupg.gnupghome) except Exception as excep: log.err("Unable to clean temporary PGP environment: %s: %s", self.gnupg.gnupghome, excep)
def sendmail(tid, smtp_host, smtp_port, security, authentication, username, password, from_name, from_address, to_address, subject, body, anonymize=True, socks_host='127.0.0.1', socks_port=9050): """ Send an email using SMTPS/SMTP+TLS and maybe torify the connection. @param to_address: the 'To:' field of the email @param subject: the mail subject @param body: the mail body @return: a {Deferred} that returns a success {bool} if the message was passed to the server. """ try: timeout = 30 message = MIME_mail_build(from_name, from_address, to_address, to_address, subject, body) log.debug('Sending email to %s using SMTP server [%s:%d] [%s]', to_address, smtp_host, smtp_port, security, tid=tid) context_factory = TLSClientContextFactory() smtp_deferred = defer.Deferred() factory = ESMTPSenderFactory( username.encode('utf-8') if authentication else None, password.encode('utf-8') if authentication else None, from_address, to_address, message, smtp_deferred, contextFactory=context_factory, requireAuthentication=authentication, requireTransportSecurity=(security == 'TLS'), retries=0, timeout=timeout) if security == "SSL": factory = tls.TLSMemoryBIOFactory(context_factory, True, factory) if anonymize: socksProxy = TCP4ClientEndpoint(reactor, socks_host, socks_port, timeout=timeout) endpoint = SOCKS5ClientEndpoint(smtp_host.encode('utf-8'), smtp_port, socksProxy) else: endpoint = TCP4ClientEndpoint(reactor, smtp_host.encode('utf-8'), smtp_port, timeout=timeout) conn_deferred = endpoint.connect(factory) final = defer.DeferredList([conn_deferred, smtp_deferred], fireOnOneErrback=True, consumeErrors=True) def failure_cb(failure): """ @param failure {Failure {twisted.internet.FirstError {Failure}}} """ log.err("SMTP connection failed (Exception: %s)", failure.value.subFailure.value, tid=tid) log.debug(failure) return False def success_cb(results): """ @param results {list of (success, return_val) tuples} """ return True return final.addCallbacks(success_cb, failure_cb) except Exception as excep: # avoids raising an exception inside email logic to avoid chained errors log.err("Unexpected exception in sendmail: %s", str(excep), tid=tid) return defer.succeed(False)
def sendmail(tid, smtp_host, smtp_port, security, authentication, username, password, from_name, from_address, to_address, subject, body, anonymize=True, socks_host='127.0.0.1', socks_port=9050): """ Send an email using SMTPS/SMTP+TLS and maybe torify the connection. :param tid: A tenant id :param smtp_host: A SMTP host :param smtp_port: A SMTP port :param security: A type of security to be applied (SMTPS/SMTP+TLS) :param authentication: A boolean to enable authentication :param username: A mail account username :param password: A mail account password :param from_name: A from name :param from_address: A from address :param to_address: The to address :param subject: A mail subject :param body: A mail body :param anonymize: A boolean to enable anonymous mail connection :param socks_host: A socks host to be used for the mail connection :param socks_port: A socks port to be used for the mail connection :return: A deferred resource resolving at the end of the connection """ try: timeout = 30 message = MIME_mail_build(from_name, from_address, to_address, to_address, subject, body) log.debug('Sending email to %s using SMTP server [%s:%d] [%s]', to_address, smtp_host, smtp_port, security, tid=tid) context_factory = TLSClientContextFactory() smtp_deferred = defer.Deferred() factory = ESMTPSenderFactory( username.encode() if authentication else None, password.encode() if authentication else None, from_address, to_address, message, smtp_deferred, contextFactory=context_factory, requireAuthentication=authentication, requireTransportSecurity=(security == 'TLS'), retries=0, timeout=timeout) if security == "SSL": factory = tls.TLSMemoryBIOFactory(context_factory, True, factory) if anonymize: socksProxy = TCP4ClientEndpoint(reactor, socks_host, socks_port, timeout=timeout) endpoint = SOCKS5ClientEndpoint(smtp_host, smtp_port, socksProxy) else: endpoint = TCP4ClientEndpoint(reactor, smtp_host, smtp_port, timeout=timeout) conn_deferred = endpoint.connect(factory) final = defer.DeferredList([conn_deferred, smtp_deferred], fireOnOneErrback=True, consumeErrors=True) def failure_cb(failure): """ :param failure {Failure {twisted.internet.FirstError {Failure}}} """ log.err("SMTP connection failed (Exception: %s)", failure.value.subFailure.value, tid=tid) return False def success_cb(results): """ :param results {list of (success, return_val) tuples} """ return True return final.addCallbacks(success_cb, failure_cb) except Exception as e: # avoids raising an exception inside email logic to avoid chained errors log.err("Unexpected exception in sendmail: %s", e, tid=tid) return defer.succeed(False)
def validate_jmessage(jmessage, message_template): """ Takes a string that represents a JSON messages and checks to see if it conforms to the message type it is supposed to be. This message must be either a dict or a list. This function may be called recursively to validate sub-parameters that are also go GLType. message: the message string that should be validated message_type: the GLType class it should match. """ if isinstance(message_template, dict): success_check = 0 keys_to_strip = [] for key, value in jmessage.items(): if key not in message_template: # strip whatever is not validated # # reminder: it's not possible to raise an exception for the # in case more values are present because it's normal that the # client will send automatically more data. # # e.g. the client will always send 'creation_date' attributes of # objects and attributes like this are present generally only # from the second request on. # keys_to_strip.append(key) continue if not BaseHandler.validate_type(value, message_template[key]): log.err("Received key %s: type validation fail", key) raise errors.InputValidationError("Key (%s) type validation failure" % key) success_check += 1 for key in keys_to_strip: del jmessage[key] for key, value in message_template.items(): if key not in jmessage: log.debug("Key %s expected but missing!", key) log.debug("Received schema %s - Expected %s", jmessage.keys(), message_template.keys()) raise errors.InputValidationError("Missing key %s" % key) if not BaseHandler.validate_type(jmessage[key], value): log.err("Expected key: %s type validation failure", key) raise errors.InputValidationError("Key (%s) double validation failure" % key) if isinstance(message_template[key], (dict, list)) and message_template[key]: BaseHandler.validate_jmessage(jmessage[key], message_template[key]) success_check += 1 if success_check != len(message_template) * 2: log.err("Success counter double check failure: %d", success_check) raise errors.InputValidationError("Success counter double check failure") return True elif isinstance(message_template, list): if not all(BaseHandler.validate_type(x, message_template[0]) for x in jmessage): raise errors.InputValidationError("Not every element in %s is %s" % (jmessage, message_template[0])) return True else: raise errors.InputValidationError("invalid json massage: expected dict or list")
def receiverfile_planning(session): """ This function roll over the InternalFile uploaded, extract a path, id and receivers associated, one entry for each combination. representing the ReceiverFile that need to be created. """ receiverfiles_maps = {} for ifile in session.query(models.InternalFile).filter( models.InternalFile.new == True).order_by( models.InternalFile.creation_date): if ifile.processing_attempts >= INTERNALFILES_HANDLE_RETRY_MAX: ifile.new = False log.err( "Failed to handle receiverfiles creation for ifile %s (%d retries)", ifile.id, INTERNALFILES_HANDLE_RETRY_MAX) continue elif ifile.processing_attempts >= 1: log.err( "Failed to handle receiverfiles creation for ifile %s (retry %d/%d)", ifile.id, ifile.processing_attempts, INTERNALFILES_HANDLE_RETRY_MAX) if ifile.processing_attempts: log.debug( "Starting handling receiverfiles creation for ifile %s retry %d/%d", ifile.id, ifile.processing_attempts, INTERNALFILES_HANDLE_RETRY_MAX) ifile.processing_attempts += 1 for rtip, user in session.query(models.ReceiverTip, models.User) \ .filter(models.ReceiverTip.internaltip_id == ifile.internaltip_id, models.User.id == models.ReceiverTip.receiver_id): receiverfile = models.ReceiverFile() receiverfile.internalfile_id = ifile.id receiverfile.receivertip_id = rtip.id receiverfile.filename = ifile.filename receiverfile.size = ifile.size receiverfile.status = u'processing' # https://github.com/globaleaks/GlobaLeaks/issues/444 # avoid to mark the receiverfile as new if it is part of a submission # this way we avoid to send unuseful messages receiverfile.new = False if ifile.submission else True session.add(receiverfile) session.flush() if ifile.id not in receiverfiles_maps: receiverfiles_maps[ifile.id] = { 'plaintext_file_needed': False, 'ifile_id': ifile.id, 'ifile_name': ifile.filename, 'ifile_size': ifile.size, 'rfiles': [], 'tid': user.tid, } receiverfiles_maps[ifile.id]['rfiles'].append({ 'id': receiverfile.id, 'status': u'processing', 'filename': ifile.filename, 'size': ifile.size, 'receiver': { 'name': user.name, 'pgp_key_public': user.pgp_key_public, 'pgp_key_fingerprint': user.pgp_key_fingerprint, }, }) return receiverfiles_maps
def process_files(state, receiverfiles_maps): """ @param receiverfiles_maps: the mapping of ifile/rfiles to be created on filesystem @return: return None """ for ifile_id, receiverfiles_map in receiverfiles_maps.items(): ifile_name = receiverfiles_map['ifile_name'] plain_name = "%s.plain" % ifile_name.split('.')[0] plain_path = os.path.abspath( os.path.join(Settings.attachments_path, plain_name)) sf = state.get_tmp_file_by_name(ifile_name) receiverfiles_map['plaintext_file_needed'] = False for rcounter, rfileinfo in enumerate(receiverfiles_map['rfiles']): if rfileinfo['receiver']['pgp_key_public']: try: new_filename, new_size = fsops_pgp_encrypt( state, sf, rfileinfo['receiver']['pgp_key_public'], rfileinfo['receiver']['pgp_key_fingerprint']) log.debug( "%d# Switch on Receiver File for %s filename %s => %s size %d => %d", rcounter, rfileinfo['receiver']['name'], rfileinfo['filename'], new_filename, rfileinfo['size'], new_size) rfileinfo['filename'] = new_filename rfileinfo['size'] = new_size rfileinfo['status'] = u'encrypted' except Exception as excep: log.err( "%d# Unable to complete PGP encrypt for %s on %s: %s. marking the file as unavailable.", rcounter, rfileinfo['receiver']['name'], rfileinfo['filename'], excep) rfileinfo['status'] = u'unavailable' elif state.tenant_cache[ receiverfiles_map['tid']].allow_unencrypted: receiverfiles_map['plaintext_file_needed'] = True rfileinfo['filename'] = plain_name rfileinfo['status'] = u'reference' else: rfileinfo['status'] = u'nokey' if receiverfiles_map['plaintext_file_needed']: log.debug( "Not all receivers support PGP and the system allows plaintext version of files: %s saved as plaintext file %s", ifile_name, plain_name) try: with sf.open('rb') as encrypted_file, open( plain_path, "a+b") as plaintext_file: while True: chunk = encrypted_file.read(4096) if not chunk: break plaintext_file.write(chunk) receiverfiles_map['ifile_name'] = plain_name except Exception as excep: log.err("Unable to create plaintext file %s: %s", plain_path, excep) else: log.debug( "All receivers support PGP or the system denies plaintext version of files: marking internalfile as removed" )
def db_wizard(session, tid, hostname, request): """ Transaction for the handling of wizard request :param session: An ORM session :param tid: A tenant ID :param request: A user request """ language = request['node_language'] node = config.ConfigFactory(session, tid) if tid == 1: root_tenant_node = node encryption = True else: root_tenant_node = node encryption = root_tenant_node.get_val('encryption') if node.get_val('wizard_done'): log.err("DANGER: Wizard already initialized!", tid=tid) raise errors.ForbiddenOperation db_update_enabled_languages(session, tid, [language], language) node.set_val('encryption', encryption) node.set_val('name', request['node_name']) node.set_val('default_language', language) node.set_val('wizard_done', True) node.set_val('enable_developers_exception_notification', request['enable_developers_exception_notification']) node.set_val('hostname', hostname) node_l10n = config.ConfigL10NFactory(session, tid) node_l10n.set_val('header_title_homepage', language, request['node_name']) profiles.load_profile(session, tid, request['profile']) if encryption: crypto_escrow_prv_key, crypto_escrow_pub_key = GCE.generate_keypair() node.set_val('crypto_escrow_pub_key', crypto_escrow_pub_key) admin_desc = models.User().dict(language) admin_desc['username'] = request['admin_username'] admin_desc['name'] = request['admin_name'] admin_desc['password'] = request['admin_password'] admin_desc['name'] = request['admin_name'] admin_desc['mail_address'] = request['admin_mail_address'] admin_desc['language'] = language admin_desc['role'] = 'admin' admin_desc['pgp_key_remove'] = False admin_user = db_create_user(session, tid, admin_desc, language) admin_user.password = GCE.hash_password(request['admin_password'], admin_user.salt) admin_user.password_change_needed = False admin_user.password_change_date = datetime_now() if encryption: db_gen_user_keys(session, tid, admin_user, request['admin_password']) admin_user.crypto_escrow_prv_key = Base64Encoder.encode( GCE.asymmetric_encrypt(admin_user.crypto_pub_key, crypto_escrow_prv_key)) receiver_user = None if not request['skip_recipient_account_creation']: receiver_desc = models.User().dict(language) receiver_desc['username'] = request['receiver_username'] receiver_desc['name'] = request['receiver_name'] receiver_desc['password'] = request['receiver_password'] receiver_desc['mail_address'] = request['receiver_mail_address'] receiver_desc['language'] = language receiver_desc['role'] = 'receiver' receiver_desc['pgp_key_remove'] = False receiver_desc['send_account_activation_link'] = receiver_desc[ 'password'] == '' receiver_user = db_create_user(session, tid, receiver_desc, language) if receiver_desc['password']: receiver_user.password = GCE.hash_password( receiver_desc['password'], receiver_user.salt) if encryption: db_gen_user_keys(session, tid, receiver_user, receiver_desc['password']) context_desc = models.Context().dict(language) context_desc['name'] = 'Default' context_desc['status'] = 'enabled' context_desc['receivers'] = [receiver_user.id] if receiver_user else [] context = db_create_context(session, tid, context_desc, language) # Root tenants initialization terminates here if tid == 1: db_refresh_memory_variables(session, [tid]) return # Secondary tenants initialization starts here tenant = models.db_get(session, models.Tenant, models.Tenant.id == tid) tenant.label = request['node_name'] mode = node.get_val('mode') if mode not in ['default', 'demo']: node.set_val( 'hostname', tenant.subdomain + '.' + root_tenant_node.get_val('rootdomain')) for varname in [ 'reachable_via_web', 'enable_receipt_hint', 'disable_privacy_badge', 'simplified_login', 'can_delete_submission', 'can_postpone_expiration', 'anonymize_outgoing_connections', 'frame_ancestors', 'password_change_period', 'default_questionnaire', 'enable_password_reset' ]: node.set_val(varname, root_tenant_node.get_val(varname)) context.questionnaire_id = root_tenant_node.get_val( 'default_questionnaire') # Set data retention policy to 18 months context.tip_timetolive = 540 # Delete the admin user request['admin_password'] = '' session.delete(admin_user) if receiver_user is not None: # Enable the recipient user to configure platform general settings receiver_user.can_edit_general_settings = True # Set the recipient name equal to the node name receiver_user.name = receiver_user.public_name = request[ 'node_name'] # Apply the specific fixes related to whistleblowing.it projects if mode == 'whistleblowing.it': node.set_val('simplified_login', True) node.set_val('tor', False) db_refresh_memory_variables(session, [tid])
def db_wizard(session, tid, request, client_using_tor, language): language = request['node_language'] node = config.ConfigFactory(session, tid) if tid != 1: root_tenant_node = config.ConfigFactory(session, 1) else: root_tenant_node = node if node.get_val(u'wizard_done'): log.err("DANGER: Wizard already initialized!", tid=tid) raise errors.ForbiddenOperation db_update_enabled_languages(session, tid, [language], language) node.set_val(u'name', request['node_name']) node.set_val(u'default_language', language) node.set_val(u'wizard_done', True) node.set_val(u'enable_developers_exception_notification', request['enable_developers_exception_notification']) # Guess Tor configuration from thee media used on first configuration and # if the user is using Tor preserve node anonymity and perform outgoing connections via Tor node.set_val(u'reachable_via_web', not client_using_tor) node.set_val(u'allow_unencrypted', not client_using_tor) node.set_val(u'anonymize_outgoing_connections', client_using_tor) node_l10n = config.ConfigL10NFactory(session, tid) node_l10n.set_val(u'header_title_homepage', language, request['node_name']) profiles.load_profile(session, tid, request['profile']) admin_desc = models.User().dict(language) admin_desc['name'] = request['admin_name'] admin_desc['username'] = u'admin' admin_desc['password'] = request['admin_password'] admin_desc['name'] = request['admin_name'] admin_desc['mail_address'] = request['admin_mail_address'] admin_desc['language'] = language admin_desc['role'] =u'admin' admin_desc['deletable'] = False admin_desc['pgp_key_remove'] = False admin_user = db_create_user(session, tid, admin_desc, language) admin_user.password_change_needed = False admin_user.password_change_date = datetime_now() receiver_desc = models.User().dict(language) receiver_desc['name'] = request['receiver_name'] receiver_desc['username'] = u'recipient' receiver_desc['password'] = request['receiver_password'] receiver_desc['name'] = request['receiver_name'] receiver_desc['mail_address'] = request['receiver_mail_address'] receiver_desc['language'] = language receiver_desc['role'] =u'receiver' receiver_desc['deletable'] = True receiver_desc['pgp_key_remove'] = False receiver_user = db_create_user(session, tid, receiver_desc, language) context_desc = models.Context().dict(language) context_desc['status'] = 1 context_desc['name'] = u'Default' context_desc['receivers'] = [receiver_user.id] context = db_create_context(session, tid, context_desc, language) # Root tenants initialization terminates here if tid == 1: db_refresh_memory_variables(session, [tid]) return # Secondary tenants initialization starts here tenant = models.db_get(session, models.Tenant, models.Tenant.id == tid) tenant.label = request['node_name'] mode = node.get_val(u'mode') if mode != u'default': node.set_val(u'hostname', tenant.subdomain + '.' + node.get_val(u'rootdomain')) node.set_val(u'reachable_via_web', True) node.set_val(u'allow_unencrypted', True) node.set_val(u'anonymize_outgoing_connections', True) node.set_val(u'allow_iframes_inclusion', True) context.questionnaire_id = root_tenant_node.get_val(u'default_questionnaire') # Apply the specific fixes related to whistleblowing.it projects if mode == u'whistleblowing.it': node.set_val(u'disable_key_code_hint', True) node.set_val(u'disable_privacy_badge', True) node.set_val(u'disable_donation_panel', True) node.set_val(u'simplified_login', True) node.set_val(u'can_delete_submission', False) node.set_val(u'enable_user_pgp_key_upload', False) node.set_val(u'tor', False) # Delete the admin user session.delete(admin_user) # Set the recipient name equal to the node name receiver_user.name = request['node_name'] # Enable the recipient user to configure platform general settings receiver_user.can_edit_general_settings = True # Set data retention policy to 18 months context.tip_timetolive = 540 # Enable recipients to load files to the whistleblower context.enable_rc_to_wb_files = True db_refresh_memory_variables(session, [tid])
def perform_migration(version): """ @param version: @return: """ to_delete_on_fail = [] to_delete_on_success = [] if version < FIRST_DATABASE_VERSION_SUPPORTED: log.info("Migrations from DB version lower than %d are no longer supported!" % FIRST_DATABASE_VERSION_SUPPORTED) quit() tmpdir = os.path.abspath(os.path.join(Settings.tmp_path, 'tmp')) if version < 41: orig_db_file = os.path.abspath(os.path.join(Settings.working_path, 'db', 'glbackend-%d.db' % version)) else: orig_db_file = os.path.abspath(os.path.join(Settings.working_path, 'globaleaks.db')) final_db_file = os.path.abspath(os.path.join(Settings.working_path, 'globaleaks.db')) shutil.rmtree(tmpdir, True) os.mkdir(tmpdir) shutil.copy(orig_db_file, os.path.join(tmpdir, 'old.db')) new_db_file = None try: while version < DATABASE_VERSION: old_db_file = os.path.abspath(os.path.join(tmpdir, 'old.db')) new_db_file = os.path.abspath(os.path.join(tmpdir, 'new.db')) if os.path.exists(new_db_file): shutil.move(new_db_file, old_db_file) Settings.db_file = new_db_file Settings.enable_input_length_checks = False to_delete_on_fail.append(new_db_file) to_delete_on_success.append(old_db_file) log.info("Updating DB from version %d to version %d" % (version, version + 1)) j = version - FIRST_DATABASE_VERSION_SUPPORTED session_old = get_session(make_db_uri(old_db_file)) engine = get_engine(make_db_uri(new_db_file), foreign_keys=False) if FIRST_DATABASE_VERSION_SUPPORTED + j + 1 == DATABASE_VERSION: Base.metadata.create_all(engine) else: Bases[j+1].metadata.create_all(engine) session_new = sessionmaker(bind=engine)() # Here is instanced the migration script MigrationModule = importlib.import_module("globaleaks.db.migrations.update_%d" % (version + 1)) migration_script = MigrationModule.MigrationScript(migration_mapping, version, session_old, session_new) log.info("Migrating table:") try: try: migration_script.prologue() except Exception as exception: log.err("Failure while executing migration prologue: %s" % exception) raise exception for model_name, _ in migration_mapping.items(): if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None: try: migration_script.migrate_model(model_name) # Commit at every table migration in order to be able to detect # the precise migration that may fail. migration_script.commit() except Exception as exception: log.err("Failure while migrating table %s: %s " % (model_name, exception)) raise exception try: migration_script.epilogue() migration_script.commit() except Exception as exception: log.err("Failure while executing migration epilogue: %s " % exception) raise exception finally: # the database should be always closed before leaving the application # in order to not keep leaking journal files. migration_script.close() log.info("Migration stats:") # we open a new db in order to verify integrity of the generated file session_verify = get_session(make_db_uri(new_db_file)) for model_name, _ in migration_mapping.items(): if migration_script.model_from[model_name] is not None and migration_script.model_to[model_name] is not None: count = session_verify.query(migration_script.model_to[model_name]).count() if migration_script.entries_count[model_name] != count: if migration_script.fail_on_count_mismatch[model_name]: raise AssertionError("Integrity check failed on count equality for table %s: %d != %d" % (model_name, count, migration_script.entries_count[model_name])) else: log.info(" * %s table migrated (entries count changed from %d to %d)" % (model_name, migration_script.entries_count[model_name], count)) else: log.info(" * %s table migrated (%d entry(s))" % (model_name, migration_script.entries_count[model_name])) version += 1 session_verify.close() perform_data_update(new_db_file) # in case of success first copy the new migrated db, then as last action delete the original db file shutil.copy(new_db_file, final_db_file) if orig_db_file != final_db_file: overwrite_and_remove(orig_db_file) path = os.path.join(Settings.working_path, 'db') if os.path.exists(path): shutil.rmtree(path) except Exception as e: print(e) finally: # Always cleanup the temporary directory used for the migration for f in os.listdir(tmpdir): overwrite_and_remove(os.path.join(tmpdir, f)) shutil.rmtree(tmpdir)
def db_wizard(session, tid, request, client_using_tor, language): language = request['node_language'] node = config.ConfigFactory(session, tid) if tid != 1: root_tenant_node = config.ConfigFactory(session, 1) else: root_tenant_node = node if node.get_val(u'wizard_done'): log.err("DANGER: Wizard already initialized!", tid=tid) raise errors.ForbiddenOperation db_update_enabled_languages(session, tid, [language], language) node.set_val(u'name', request['node_name']) node.set_val(u'default_language', language) node.set_val(u'wizard_done', True) node.set_val(u'enable_developers_exception_notification', request['enable_developers_exception_notification']) # Guess Tor configuration from thee media used on first configuration and # if the user is using Tor preserve node anonymity and perform outgoing connections via Tor node.set_val(u'reachable_via_web', not client_using_tor) node.set_val(u'allow_unencrypted', not client_using_tor) node.set_val(u'anonymize_outgoing_connections', client_using_tor) node_l10n = config.ConfigL10NFactory(session, tid) node_l10n.set_val(u'header_title_homepage', language, request['node_name']) profiles.load_profile(session, tid, request['profile']) admin_desc = models.User().dict(language) admin_desc['name'] = request['admin_name'] admin_desc['username'] = u'admin' admin_desc['password'] = request['admin_password'] admin_desc['name'] = request['admin_name'] admin_desc['mail_address'] = request['admin_mail_address'] admin_desc['language'] = language admin_desc['role'] = u'admin' admin_desc['deletable'] = False admin_desc['pgp_key_remove'] = False admin_user = db_create_user(session, tid, admin_desc, language) admin_user.password_change_needed = False admin_user.password_change_date = datetime_now() receiver_desc = models.User().dict(language) receiver_desc['name'] = request['receiver_name'] receiver_desc['username'] = u'recipient' receiver_desc['password'] = request['receiver_password'] receiver_desc['name'] = request['receiver_name'] receiver_desc['mail_address'] = request['receiver_mail_address'] receiver_desc['language'] = language receiver_desc['role'] = u'receiver' receiver_desc['deletable'] = True receiver_desc['pgp_key_remove'] = False receiver_user = db_create_user(session, tid, receiver_desc, language) context_desc = models.Context().dict(language) context_desc['status'] = 1 context_desc['name'] = u'Default' context_desc['receivers'] = [receiver_user.id] context = db_create_context(session, tid, context_desc, language) # Root tenants initialization terminates here if tid == 1: db_refresh_memory_variables(session, [tid]) return # Secondary tenants initialization starts here tenant = models.db_get(session, models.Tenant, models.Tenant.id == tid) tenant.label = request['node_name'] mode = node.get_val(u'mode') if mode != u'default': node.set_val( u'hostname', tenant.subdomain + '.' + root_tenant_node.get_val(u'rootdomain')) for varname in [ 'reachable_via_web', 'disable_key_code_hint', 'disable_privacy_badge', 'disable_donation_panel', 'simplified_login', 'can_delete_submission', 'can_postpone_expiration', 'enable_user_pgp_key_upload', 'allow_unencrypted', 'anonymize_outgoing_connections', 'allow_iframes_inclusion', 'password_change_period', 'default_questionnaire' ]: node.set_val(varname, root_tenant_node.get_val(varname)) context.questionnaire_id = root_tenant_node.get_val( u'default_questionnaire') # Apply the general settings to apply on all mode != default if mode != u'default': # Enable the recipient user to configure platform general settings receiver_user.can_edit_general_settings = True # Set data retention policy to 18 months context.tip_timetolive = 540 # Apply the specific fixes related to whistleblowing.it projects if mode == u'whistleblowing.it': node.set_val(u'simplified_login', True) node.set_val(u'tor', False) # Enable recipients to load files to the whistleblower context.enable_rc_to_wb_files = True # Set the recipient name equal to the node name receiver_user.name = request['node_name'] # Delete the admin user session.delete(admin_user) db_refresh_memory_variables(session, [tid])