Esempio n. 1
0
    def handle_file_upload(self, itip_id):
        result_list = []

        # measure the operation of all the files (via browser can be selected
        # more than 1), because all files are delivered in the same time.
        start_time = time.time()

        uploaded_file = self.request.body

        uploaded_file['body'].avoid_delete()
        uploaded_file['body'].close()

        try:
            # First: dump the file in the filesystem,
            # and exception raised here would prevent the InternalFile recordings
            filepath = yield threads.deferToThread(dump_file_fs, uploaded_file)
        except Exception as excep:
            log.err("Unable to save a file in filesystem: %s" % excep)
            raise errors.InternalServerError("Unable to accept new files")
        try:
            # Second: register the file in the database
            registered_file = yield register_file_db(uploaded_file, filepath, itip_id)
        except Exception as excep:
            log.err("Unable to register file in DB: %s" % excep)
            raise errors.InternalServerError("Unable to accept new files")

        registered_file['elapsed_time'] = time.time() - start_time
        result_list.append(registered_file)

        self.set_status(201) # Created
        self.write({'files': result_list})
Esempio n. 2
0
    def post(self, tip_id, rfile_id, *uriargs):

        rfile = yield download_file(self.current_user.user_id, tip_id, rfile_id)

        # keys:  'file_path'  'size' : 'content_type' 'file_name'

        self.set_status(200)

        self.set_header('X-Download-Options', 'noopen')
        self.set_header('Content-Type', 'application/octet-stream')
        self.set_header('Content-Length', rfile['size'])
        self.set_header('Content-Disposition','attachment; filename=\"%s\"' % rfile['name'])

        filelocation = os.path.join(GLSetting.submission_path, rfile['path'])

        try:

            with open(filelocation, "rb") as requestf:
                chunk_size = 8192
                while True:
                    chunk = requestf.read(chunk_size)
                    if len(chunk) == 0:
                        break
                    self.write(chunk)

        except IOError as srcerr:
            log.err("Unable to open %s: %s " % (filelocation, srcerr.strerror))
            self.set_status(404)

        self.finish()
Esempio n. 3
0
def admin_update_appdata(store, loaded_appdata):

    appdata = store.find(ApplicationData).one()
    node = store.find(Node).one()

    if not appdata:
        appdata = ApplicationData()
        has_been_updated = True
        old_version = 0
        store.add(appdata)
    elif appdata.version > loaded_appdata['version']:
        has_been_updated = False
        old_version = appdata.version
    else: # appdata not None and new_v >= old_v
        has_been_updated = True
        old_version = appdata.version

    if has_been_updated:

        log.debug("Updating Application Data Fields %d => %d" %
                  (old_version, loaded_appdata['version']))

        appdata.version = loaded_appdata['version']

        try:

            log.debug("Validating %d fields" % len(loaded_appdata['fields']))

            accepted_types = [ "text", "radio", "select", "checkboxes",
                               "textarea", "number", "url", "phone", "email" ]

            for field in loaded_appdata['fields']:
                if field['type'] not in accepted_types:
                    log.debug("Invalid type received: %s" % field['type'])
                    raise errors.InvalidInputFormat("Invalid type supply")

            appdata.fields = loaded_appdata['fields']

        except Exception as excep:
            log.debug("Failed Fields initialization %s" % excep)
            raise excep

        if 'node_presentation' in loaded_appdata:
            node.presentation = loaded_appdata['node_presentation']

        if 'node_footer' in loaded_appdata:
            node.footer = loaded_appdata['node_footer']

        if 'node_subtitle' in loaded_appdata:
            node.subtitle = loaded_appdata['node_subtitle']

    else:
        log.err("NOT updating the Application Data Fields current %d proposed %d" %
                (appdata.version, version))

    # in both cases, update or not, return the running version
    return {
        'version': appdata.version,
        'fields': appdata.fields,
    }
Esempio n. 4
0
    def postApplication(self):
        """
        Run the application.
        """

        try:
            self.startApplication(self.application)
        except Exception as ex:
            statusPipe = self.config.get("statusPipe", None)
            if statusPipe is not None:
                # Limit the total length to the passed string to 100
                strippedError = str(ex)[:98]
                untilConcludes(os.write, statusPipe, "1 %s" % (strippedError,))
                untilConcludes(os.close, statusPipe)
            self.removePID(self.config["pidfile"])
            raise
        else:
            statusPipe = self.config.get("statusPipe", None)
            if statusPipe is not None:
                untilConcludes(os.write, statusPipe, "0")
                untilConcludes(os.close, statusPipe)

        if globaleaks_start():
            self.startReactor(None, self.oldstdout, self.oldstderr)
        else:
            log.err("Cannot start GlobaLeaks; please manual check the error.")
            quit(-1)

        self.removePID(self.config["pidfile"])
Esempio n. 5
0
    def handler_request_logging_begin(self):
        if GLSettings.devel_mode and GLSettings.log_requests_responses:
            try:
                content = ">" * 15
                content += " Request %d " % GLSettings.requests_counter
                content += (">" * 15) + "\n\n"

                content += self.request.method + " " + self.request.full_url() + "\n\n"

                content += "request-headers:\n"
                for k, v in self.request.headers.get_all():
                    content += "%s: %s\n" % (k, v)

                if type(self.request.body) == dict and "body" in self.request.body:
                    # this is needed due to cyclone hack for file uploads
                    body = self.request.body["body"].read()
                else:
                    body = self.request.body

                if len(body):
                    content += "\nrequest-body:\n" + body + "\n"

                self.do_verbose_log(content)

            except Exception as excep:
                log.err("HTTP Request logging fail: %s" % excep.message)
                return
Esempio n. 6
0
def get_submission(store, id):
    submission = store.find(InternalTip, InternalTip.id == unicode(id)).one()
    if not submission:
        log.err("Invalid Submission requested %s in GET" % id)
        raise errors.SubmissionGusNotFound

    return wb_serialize_internaltip(submission)
Esempio n. 7
0
    def get_file_upload(self):
        try:
            if (int(self.request.arguments['flowTotalSize'][0]) / (1024 * 1024)) > GLSetting.defaults.maximum_filesize:
                log.err("File upload request rejected: file too big")
                raise errors.FileTooBig

            if self.request.arguments['flowIdentifier'][0] not in GLUploads:
                f = GLSecureTemporaryFile(GLSetting.tmp_upload_path)
                GLUploads[self.request.arguments['flowIdentifier'][0]] = f
            else:
                f = GLUploads[self.request.arguments['flowIdentifier'][0]]

            f.write(self.request.files['file'][0]['body'])

            if self.request.arguments['flowChunkNumber'][0] != self.request.arguments['flowTotalChunks'][0]:
                return None

            uploaded_file = {}
            uploaded_file['filename'] = self.request.files['file'][0]['filename']
            uploaded_file['content_type'] = self.request.files['file'][0]['content_type']
            uploaded_file['body_len'] = int(self.request.arguments['flowTotalSize'][0])
            uploaded_file['body_filepath'] = f.filepath
            uploaded_file['body'] = f

            return uploaded_file

        except errors.FileTooBig:
            raise # propagate the exception

        except Exception as exc:
            log.err("Error while handling file upload %s" % exc)
            return None
Esempio n. 8
0
def wizard(store, request, language):
    try:
        request['node']['default_language'] = language
        request['node']['languages_enabled'] = [language]

        # Header title of the homepage is initially set with the node title
        request['node']['header_title_homepage'] = request['node']['name']

        db_update_node(store, request['node'], True, language)
        context = db_create_context(store, request['context'], language)

        # associate the new context to the receiver
        request['receiver']['contexts'] = [context.id]

        db_create_receiver(store, request['receiver'], language)

        admin = store.find(models.User, (models.User.username == unicode('admin'))).one()

        admin.mail_address = request['admin']['mail_address']

        password = request['admin']['password']
        old_password = request['admin']['old_password']

        if password and old_password and len(password) and len(old_password):
            admin.password = security.change_password(admin.password,
                                                      old_password,
                                                      password,
                                                      admin.salt)
    except Exception as excep:
        log.err("Failed wizard initialization %s" % excep)
        raise excep
Esempio n. 9
0
def receiverfile_planning(store):
    """
    This function roll over the InternalFile uploaded, extract a path, id and
    receivers associated, one entry for each combination. representing the
    ReceiverFile that need to be created.
    """
    receiverfiles_maps = {}

    for ifile in store.find(InternalFile, InternalFile.new == True):
        if ifile.processing_attempts >= INTERNALFILES_HANDLE_RETRY_MAX:
            ifile.new = False
            error = "Failed to handle receiverfiles creation for ifile %s (%d retries)" % \
                    (ifile.id, INTERNALFILES_HANDLE_RETRY_MAX)
            log.err(error)
            continue

        elif ifile.processing_attempts >= 1:
            log.err("Failed to handle receiverfiles creation for ifile %s (retry %d/%d)" %
                    (ifile.id, ifile.processing_attempts, INTERNALFILES_HANDLE_RETRY_MAX))


        if ifile.processing_attempts:
            log.debug("Starting handling receiverfiles creation for ifile %s retry %d/%d" %
                  (ifile.id, ifile.processing_attempts, INTERNALFILES_HANDLE_RETRY_MAX))

        ifile.processing_attempts += 1

        for rtip in ifile.internaltip.receivertips:
            receiverfile = ReceiverFile()
            receiverfile.internalfile_id = ifile.id
            receiverfile.receivertip_id = rtip.id
            receiverfile.file_path = ifile.file_path
            receiverfile.size = ifile.size
            receiverfile.status = u'processing'

            # https://github.com/globaleaks/GlobaLeaks/issues/444
            # avoid to mark the receiverfile as new if it is part of a submission
            # this way we avoid to send unuseful messages
            receiverfile.new = False if ifile.submission else True

            store.add(receiverfile)

            if ifile.id not in receiverfiles_maps:
                receiverfiles_maps[ifile.id] = {
                  'plaintext_file_needed': False,
                  'ifile_id': ifile.id,
                  'ifile_path': ifile.file_path,
                  'ifile_size': ifile.size,
                  'rfiles': []
                }

            receiverfiles_maps[ifile.id]['rfiles'].append({
                'id': receiverfile.id,
                'status': u'processing',
                'path': ifile.file_path,
                'size': ifile.size,
                'receiver': admin_serialize_receiver(rtip.receiver, GLSettings.memory_copy.default_language)
            })

    return receiverfiles_maps
Esempio n. 10
0
    def encrypt_file(self, key_fingerprint, plainpath, filestream, output_path):
        """
        @param pgp_key_public:
        @param plainpath:
        @return:
        """
        encrypt_obj = self.pgph.encrypt_file(filestream, str(key_fingerprint))

        if not encrypt_obj.ok:
            raise errors.PGPKeyInvalid

        log.debug("Encrypting for key %s file %s (%d bytes)" %
                  (key_fingerprint,
                   plainpath, len(str(encrypt_obj))))

        encrypted_path = os.path.join(os.path.abspath(output_path),
                                      "pgp_encrypted-%s" % rstr.xeger(r'[A-Za-z0-9]{16}'))

        try:
            with open(encrypted_path, "w+") as f:
                f.write(str(encrypt_obj))

            return encrypted_path, len(str(encrypt_obj))
        except Exception as excep:
            log.err("Error in writing PGP file output: %s (%s) bytes %d" %
                    (excep.message, encrypted_path, len(str(encrypt_obj)) ))
            raise errors.InternalServerError("Error in writing [%s]" % excep.message)
Esempio n. 11
0
    def create_key(self):
        """
        Create the AES Key to encrypt uploaded file.
        """
        self.key = os.urandom(GLSettings.AES_key_size)

        self.key_id = rstr.xeger(GLSettings.AES_key_id_regexp)
        self.keypath = os.path.join(GLSettings.ramdisk_path, "%s%s" %
                                    (GLSettings.AES_keyfile_prefix, self.key_id))

        while os.path.isfile(self.keypath):
            self.key_id = rstr.xeger(GLSettings.AES_key_id_regexp)
            self.keypath = os.path.join(GLSettings.ramdisk_path, "%s%s" %
                                        (GLSettings.AES_keyfile_prefix, self.key_id))

        self.key_counter_nonce = os.urandom(GLSettings.AES_counter_nonce)
        self.initialize_cipher()

        saved_struct = {
            'key': self.key,
            'key_counter_nonce': self.key_counter_nonce
        }

        log.debug("Key initialization at %s" % self.keypath)

        with open(self.keypath, 'w') as kf:
            pickle.dump(saved_struct, kf)

        if not os.path.isfile(self.keypath):
            log.err("Unable to write keyfile %s" % self.keypath)
            raise Exception("Unable to write keyfile %s" % self.keypath)
Esempio n. 12
0
    def encrypt_message(self, plaintext):
        """
        @param plaindata:
            An arbitrary long text that would be encrypted

        @param receiver_desc:

            The output of
                globaleaks.handlers.admin.admin_serialize_receiver()
            dictionary. It contain the fingerprint of the Receiver PUBKEY

        @return:
            The unicode of the encrypted output (armored)

        """
        if not self.validate_key(self.receiver_desc['gpg_key_armor']):
            raise errors.GPGKeyInvalid

        # This second argument may be a list of fingerprint, not just one
        encrypt_obj = self.gpgh.encrypt(plaintext, str(self.receiver_desc['gpg_key_fingerprint']) )

        if not encrypt_obj.ok:
            # else, is not .ok
            log.err("Falure in encrypting %d bytes for %s (%s)" % (len(plaintext),
                    self.receiver_desc['username'], self.receiver_desc['gpg_key_fingerprint']) )
            log.err(encrypt_obj.stderr)
            raise errors.GPGKeyInvalid

        log.debug("Encrypting for %s (%s) %d byte of plain data (%d cipher output)" %
                  (self.receiver_desc['username'], self.receiver_desc['gpg_key_fingerprint'],
                   len(plaintext), len(str(encrypt_obj))) )

        return str(encrypt_obj)
Esempio n. 13
0
 def every_notification_failed(self, store, failure, event_id):
     if event_id:
         log.err("Mail delivery failure for event %s (%s)" % (event_id, failure))
         evnt = store.find(EventLogs, EventLogs.id == event_id).one()
         evnt.mail_sent = True
     else:
         log.err("Mail error error")
Esempio n. 14
0
    def operation(self):
        current_time = time.time()

        error_msg = ""
        for job in self.jobs_list:
            execution_time = 0
            if job.running:
                execution_time = current_time - job.start_time

                time_from_last_failed_check = current_time - job.last_monitor_check_failed

                if (execution_time > job.monitor_interval
                    and time_from_last_failed_check > job.monitor_interval):

                    job.last_monitor_check_failed = current_time

                    if execution_time < 60:
                        error = "Job %s is taking more than %d seconds to execute" % (job.name, execution_time)
                    elif execution_time < 3600:
                        minutes = int(execution_time / 60)
                        error = "Job %s is taking more than %d minutes to execute" % (job.name, minutes)
                    else:
                        hours = int(execution_time / 3600)
                        error = "Job %s is taking more than %d hours to execute" % (job.name, hours)
                    error_msg += '\n' + error
                    log.err(error)

            if error_msg != "":
                send_exception_email(error)
Esempio n. 15
0
    def create_key(self):
        """
        Create the AES Key to encrypt uploaded file.
        """
        self.key = os.urandom(GLSettings.AES_key_size)

        self.key_id = generateRandomKey(16)
        self.keypath = os.path.join(GLSettings.ramdisk_path, "%s%s" %
                                    (GLSettings.AES_keyfile_prefix, self.key_id))

        while os.path.isfile(self.keypath):
            self.key_id = generateRandomKey(16)
            self.keypath = os.path.join(GLSettings.ramdisk_path, "%s%s" %
                                        (GLSettings.AES_keyfile_prefix, self.key_id))

        self.key_counter_nonce = os.urandom(GLSettings.AES_counter_nonce)
        self.initialize_cipher()

        key_json = {
            'key': base64.b64encode(self.key),
            'key_counter_nonce': base64.b64encode(self.key_counter_nonce)
        }

        log.debug("Key initialization at %s" % self.keypath)

        with open(self.keypath, 'w') as kf:
            json.dump(key_json, kf)

        if not os.path.isfile(self.keypath):
            log.err("Unable to write keyfile %s" % self.keypath)
            raise Exception("Unable to write keyfile %s" % self.keypath)
Esempio n. 16
0
def import_files(store, submission, files, finalize):
    """
    @param submission: the Storm obj
    @param files: the list of InternalFiles UUIDs
    @return:
        Look if all the files specified in the list exist,
        Look if the context *require* almost a file, raise
            an error if missed
    """
    for file_id in files:
        try:
            ifile = store.find(InternalFile, InternalFile.id == unicode(file_id)).one()
        except Exception as excep:
            log.err("Storm error, not found %s file in import_files (%s)" %
                    (file_id, excep))
            raise errors.FileGusNotFound

        ifile.internaltip_id = submission.id
    
    if finalize and submission.context.file_required and not len(files):
        log.debug("Missing file for a submission in context %s" %
                  submission.context.name)
        raise errors.FileRequiredMissing

    # commit before return
    store.commit()
Esempio n. 17
0
def db_create_step(store, context_id, steps, language):
    """
    Add a new step to the store, then return the new serialized object.
    """
    context = models.Context.get(store, context_id)
    if context is None:
        raise errors.ContextIdNotFound

    n = 1
    for step in steps:
        step['context_id'] = context_id
        step['number'] = n

        fill_localized_keys(step, models.Step.localized_strings, language)

        s = models.Step.new(store, step)
        for f in step['children']:
            field = models.Field.get(store, f['id'])
            if not field:
                log.err("Creation error: unexistent field can't be associated")
                raise errors.FieldIdNotFound

            # remove current step/field fieldgroup/field association
            a_s, a_f = get_field_association(store, field.id)
            if a_s != s.id:
                disassociate_field(store, field.id)
                s.children.add(field)

        n += 1
Esempio n. 18
0
 def perform_cleaning(self, store, itip_id, tip_id_number):
     itip = store.find(InternalTip, InternalTip.id == itip_id).one()
     # Is happen that itip was NoneType, so, we are managing this condition
     if itip:
         db_delete_itip(store, itip, tip_id_number)
     else:
         log.err("DB Inconsistency ? InternalTip to be deleted %s is None" % itip_id)
Esempio n. 19
0
def get_tiptime_by_marker(store, marker):
    assert marker in InternalTip._marker

    itip_list = store.find(InternalTip, InternalTip.mark == marker)

    tipinfo_list = []
    for itip in itip_list:

        comment_cnt = store.find(Comment, Comment.internaltip_id == itip.id).count()
        files_cnt = store.find(InternalFile, InternalFile.internaltip_id == itip.id).count()

        if not itip.context:
            log.err("A Tip related to a not existent Context! This would not happen if delete on cascade is working")
            # And the removal is forced putting 1 second of life to the Tip.
            tip_timetolive = 1
            submission_timetolive = 1
        else:
            tip_timetolive = itip.context.tip_timetolive
            submission_timetolive = itip.context.submission_timetolive

        serialized_tipinfo = {
            'id': itip.id,
            'creation_date': datetime_to_ISO8601(itip.creation_date),
            'expiration_date': datetime_to_ISO8601(itip.expiration_date),
            'tip_life_seconds':  tip_timetolive,
            'submission_life_seconds':  submission_timetolive,
            'files': files_cnt,
            'comments': comment_cnt,
        }
        tipinfo_list.append(serialized_tipinfo)

    return tipinfo_list
Esempio n. 20
0
def update_internalfile_and_store_receiverfiles(store, receiverfiles_maps):
    for ifile_id, receiverfiles_map in receiverfiles_maps.iteritems():
        try:
            ifile = store.find(InternalFile,
                               InternalFile.id == ifile_id).one()
        except Exception as excep:
            log.err("Error in find %s: %s" % (ifile_id, excep.message))
            continue

        if ifile is None:
            continue

        ifile.new = False

        for rf in receiverfiles_map['rfiles']:
            try:
                rfile = store.find(ReceiverFile,
                                   ReceiverFile.id == rf['id']).one()

            except Exception as excep:
                log.err("Inconsistency!? ReceiverFile %s :%s" % (rf, excep.message))
                continue

            if rfile is None:
                continue

            rfile.status = rf['status']
            rfile.file_path = rf['path']
            rfile.size = rf['size']

        # update filepath possibly changed in case of plaintext file needed
        ifile.file_path = receiverfiles_map['ifile_path']
Esempio n. 21
0
 def reactivate_receiver_mails(self):
     # Receiver return to be usable
     log.info("Expiring email suspension for %s" % self.receiver_id)
     if self.receiver_id not in LastHourMailQueue.receivers_in_threshold:
         log.err("Error while reactivating mails for a receiver")
     else:
         LastHourMailQueue.receivers_in_threshold.remove(self.receiver_id)
Esempio n. 22
0
    def post(self, lang):
        """
        Upload a custom language file
        """
        uploaded_file = self.get_file_upload()
        if uploaded_file is None:
            self.set_status(201)
            self.finish()
            return

        path = self.custom_langfile_path(lang)
        directory_traversal_check(GLSettings.static_path_l10n, path)

        try:
            dumped_file = yield threads.deferToThread(dump_static_file, uploaded_file, path)
        except OSError as excpd:
            log.err("OSError while create a new custom lang file [%s]: %s" % (path, excpd))
            raise errors.InternalServerError(excpd.strerror)
        except Exception as excpd:
            log.err("Unexpected exception: %s" % excpd)
            raise errors.InternalServerError(excpd)

        log.debug("Admin uploaded new lang file: %s" % dumped_file['filename'])

        self.set_status(201)  # Created
        self.finish(dumped_file)
Esempio n. 23
0
    def operation():
        """
        Goal of this function is to check all the submission not
        finalized, and, if the expiration time sets in the context has
        been reached, then clean the submission_gus along with the fields,
        and, if present, the uploaded folder/files.

        Second goal of this function, is to check all the InternalTip(s)
        and their expiration date, if match, remove that, all the folder,
        comment and tip related.
        """
        try:
            submissions = yield get_tiptime_by_marker(InternalTip._marker[0])  # Submission
            log.debug("(Cleaning routines) %d unfinished Submission are check if expired" % len(submissions))
            for submission in submissions:
                if is_expired(iso2dateobj(submission["creation_date"]), seconds=submission["submission_life_seconds"]):
                    log.info(
                        "Deleting an unfinalized Submission (creation date: %s) files %d"
                        % (submission["creation_date"], submission["files"])
                    )
                    yield itip_cleaning(submission["id"])

            tips = yield get_tiptime_by_marker(InternalTip._marker[2])  # First
            log.debug("(Cleaning routines) %d Tips stored are check if expired" % len(tips))
            for tip in tips:
                if is_expired(iso2dateobj(tip["creation_date"]), seconds=tip["tip_life_seconds"]):
                    log.info(
                        "Deleting an expired Tip (creation date: %s) files %d comments %d"
                        % (tip["creation_date"], tip["files"], tip["comments"])
                    )
                    yield itip_cleaning(tip["id"])

        except Exception as excep:
            log.err("Exception failure in submission/tip cleaning routine (%s)" % excep.message)
            sys.excepthook(*sys.exc_info())
Esempio n. 24
0
def db_create_step(store, context, steps, language):
    """
    Add the specified steps

    :param store: the store on which perform queries.
    :param context: the context on which register specified steps.
    :param steps: a dictionary containing the new steps.
    :param language: the language of the specified steps.
    """
    n = 1
    for step in steps:
        step['context_id'] = context.id
        step['number'] = n

        fill_localized_keys(step, models.Step.localized_strings, language)

        s = models.Step.new(store, step)
        for f in step['children']:
            field = models.Field.get(store, f['id'])
            if not field:
                log.err("Creation error: unexistent field can't be associated")
                raise errors.FieldIdNotFound

            # remove current step/field fieldgroup/field association
            a_s, _ = get_field_association(store, field.id)
            if a_s != s.id:
                disassociate_field(store, field.id)
                s.children.add(field)

        n += 1
Esempio n. 25
0
    def do_notify(self, event):
        if event.type == "digest":
            subject = event.tip_info["body"]
            body = event.tip_info["title"]
        else:
            subject, body = self.get_mail_subject_and_body(event)

        receiver_mail = event.receiver_info["mail_address"]

        # If the receiver has encryption enabled (for notification), encrypt the mail body
        if event.receiver_info["pgp_key_status"] == u"enabled":
            gpob = GLBPGP()
            try:
                gpob.load_key(event.receiver_info["pgp_key_public"])
                body = gpob.encrypt_message(event.receiver_info["pgp_key_fingerprint"], body)
            except Exception as excep:
                log.err(
                    "Error in PGP interface object (for %s: %s)! (notification+encryption)"
                    % (event.receiver_info["username"], str(excep))
                )

                # On this condition (PGP enabled but key invalid) the only
                # thing to do is to return None;
                # It will be duty of the PGP check schedule will disable the key
                # and advise the user and the admin about that action.
                return fail(None)
            finally:
                # the finally statement is always called also if
                # except contains a return or a raise
                gpob.destroy_environment()

        return sendmail(receiver_mail, subject, body)
Esempio n. 26
0
def receiverfile_create(store, if_path, recv_path, status, recv_size, receiver_desc):
    try:
        ifile = store.find(InternalFile, InternalFile.file_path == unicode(if_path)).one()

        if not ifile:
            log.err("InternalFile with path %s not found !?" % if_path)
            raise Exception("This is bad!")

        log.debug("ReceiverFile creation for user %s, '%s' bytes %d = %s)"
                % (receiver_desc['name'], ifile.name, recv_size, status ) )

        rtrf = store.find(ReceiverTip, ReceiverTip.internaltip_id == ifile.internaltip_id,
                          ReceiverTip.receiver_id == receiver_desc['id']).one()

        receiverfile = ReceiverFile()
        receiverfile.receiver_id = receiver_desc['id']
        receiverfile.internaltip_id = ifile.internaltip_id
        receiverfile.internalfile_id = ifile.id
        receiverfile.receivertip_id = rtrf.id
        receiverfile.file_path = unicode(recv_path)
        receiverfile.size = recv_size
        receiverfile.status = unicode(status)

        store.add(receiverfile)

    except Exception as excep:
        log.err("Error when saving ReceiverFile %s for '%s': %s" %
                (if_path, receiver_desc['name'], excep.message))
Esempio n. 27
0
    def _handle_request_exception(self, e):
        if isinstance(e, Failure):
            exc_type = e.type
            exc_value = e.value
            exc_tb = e.getTracebackObject()
            e = e.value
        else:
            exc_type, exc_value, exc_tb = sys.exc_info()

        if isinstance(e, (HTTPError, HTTPAuthenticationRequired)):
            if GLSettings.log_requests_responses and e.log_message:
                string_format = "%d %s: " + e.log_message
                args = [e.status_code, self._request_summary()] + list(e.args)
                msg = lambda *args: string_format % args
                log.msg(msg(*args))
            if e.status_code not in httplib.responses:
                log.msg("Bad HTTP status code: %d" % e.status_code)
                return self.send_error(500, exception=e)
            else:
                return self.send_error(e.status_code, exception=e)
        else:
            log.err("Uncaught exception %s %s %s" % (exc_type, exc_value, exc_tb))
            if GLSettings.log_requests_responses:
                log.msg(e)
            mail_exception_handler(exc_type, exc_value, exc_tb)
            return self.send_error(500, exception=e)
Esempio n. 28
0
def register_file_db(store, uploaded_file, internaltip_id):
    """
    Remind: this is used only with fileApp - Tip append a new file,
    for the submission section, we relay on Token to keep track of the
    associated file, and in handlers/submission.py InternalFile(s) are
    created.

    :param uploaded_file: contain this struct of data:
        {
          'body': <closed file u'/home/qq/Dev/GlobaLeaks/backend/workingdir/files/encrypted_upload/lryZO8IlldGg3BS3.aes', mode 'w+b' at 0xb5b68498>,
          'body_len': 667237,
          'content_type': 'image/png',
          'encrypted_path': u'/home/XYZ/Dev/GlobaLeaks/backend/workingdir/files/submission/lryZO8IlldGg3BS3.aes',
          'filename': 'SteganographyIsImportant.png'
        }
    """
    internaltip = store.find(InternalTip,
                             InternalTip.id == internaltip_id).one()

    if not internaltip:
        log.err("File associated to a non existent Internaltip!")
        raise errors.TipIdNotFound

    new_file = InternalFile()
    new_file.name = uploaded_file['filename']
    new_file.content_type = uploaded_file['content_type']
    new_file.size = uploaded_file['body_len']
    new_file.internaltip_id = internaltip_id
    new_file.file_path = uploaded_file['encrypted_path']

    store.add(new_file)

    log.debug("=> Recorded new InternalFile %s" % uploaded_file['filename'])

    return serialize_file(new_file)
    def process_mail_creation(self, store, data):
        receiver_id = data['receiver']['id']

        # Do not spool emails if the receiver has opted out of ntfns for this tip.
        if not data['tip']['enable_notifications']:
          log.debug("Discarding emails for %s due to receiver's preference." % receiver_id)
          return

        # https://github.com/globaleaks/GlobaLeaks/issues/798
        # TODO: the current solution is global and configurable only by the admin
        sent_emails = GLSettings.get_mail_counter(receiver_id)
        if sent_emails >= GLSettings.memory_copy.notification_threshold_per_hour:
            log.debug("Discarding emails for receiver %s due to threshold already exceeded for the current hour" %
                      receiver_id)
            return

        GLSettings.increment_mail_counter(receiver_id)
        if sent_emails >= GLSettings.memory_copy.notification_threshold_per_hour:
            log.info("Reached threshold of %d emails with limit of %d for receiver %s" % (
                     sent_emails,
                     GLSettings.memory_copy.notification_threshold_per_hour,
                     receiver_id)
            )

            # simply changing the type of the notification causes
            # to send the notification_limit_reached
            data['type'] = u'receiver_notification_limit_reached'

        data['notification'] = db_get_notification(store, data['receiver']['language'])
        data['node'] = db_admin_serialize_node(store, data['receiver']['language'])

        if not data['node']['allow_unencrypted'] and data['receiver']['pgp_key_status'] != u'enabled':
            return

        subject, body = Templating().get_mail_subject_and_body(data)

        # If the receiver has encryption enabled encrypt the mail body
        if data['receiver']['pgp_key_status'] == u'enabled':
            gpob = GLBPGP()

            try:
                gpob.load_key(data['receiver']['pgp_key_public'])
                body = gpob.encrypt_message(data['receiver']['pgp_key_fingerprint'], body)
            except Exception as excep:
                log.err("Error in PGP interface object (for %s: %s)! (notification+encryption)" %
                        (data['receiver']['username'], str(excep)))

                return
            finally:
                # the finally statement is always called also if
                # except contains a return or a raise
                gpob.destroy_environment()

        mail = models.Mail({
            'address': data['receiver']['mail_address'],
            'subject': subject,
            'body': body
        })

        store.add(mail)
Esempio n. 30
0
    def load_key(self, key):
        """
        @param key:
        @return: True or False, True only if a key is effectively importable and listed.
        """
        try:
            import_result = self.gnupg.import_keys(key)
        except Exception as excep:
            log.err("Error in PGP import_keys: %s" % excep)
            raise errors.PGPKeyInvalid

        if len(import_result.fingerprints) == 0:
            raise errors.PGPKeyInvalid

        fingerprint = import_result.fingerprints[0]

        # looking if the key is effectively reachable
        try:
            all_keys = self.gnupg.list_keys()
        except Exception as excep:
            log.err("Error in PGP list_keys: %s" % excep)
            raise errors.PGPKeyInvalid

        expiration = datetime.utcfromtimestamp(0)
        for key in all_keys:
            if key['fingerprint'] == fingerprint:
                if key['expires']:
                    expiration = datetime.utcfromtimestamp(int(key['expires']))
                break

        return {
            'fingerprint': fingerprint,
            'expiration': expiration,
        }
Esempio n. 31
0
def delete_questionnaire(store, questionnaire_id):
    """
    Deletes the specified questionnaire. If no such questionnaire exists raises
    :class:`globaleaks.errors.QuestionnaireIdNotFound`.

    Args:
        questionnaire_id: the questionnaire id of the questionnaire to remove.
    """
    questionnaire = store.find(
        models.Questionnaire,
        models.Questionnaire.id == questionnaire_id).one()
    if not questionnaire:
        log.err("Invalid questionnaire requested in removal")
        raise errors.QuestionnaireIdNotFound

    store.remove(questionnaire)
Esempio n. 32
0
    def write(self, data):
        """
        The last action is kept track because the internal status
        need to track them. read below read()
        """
        assert (self.last_action != 'read'), "you can write after read!"

        self.last_action = 'write'
        try:
            if isinstance(data, unicode):
                data = data.encode('utf-8')

            self.file.write(self.encryptor.update(data))
        except Exception as wer:
            log.err("Unable to write() in GLSecureTemporaryFile: %s" % wer.message)
            raise wer
Esempio n. 33
0
def directory_traversal_check(trusted_absolute_prefix, untrusted_path):
    """
    check that an 'untrusted_path' match a 'trusted_absolute_path' prefix
    """

    if not os.path.isabs(trusted_absolute_prefix):
        raise Exception("programming error: trusted_absolute_prefix is not an absolute path: %s" %
                        trusted_absolute_prefix)

    untrusted_path = os.path.abspath(untrusted_path)

    if trusted_absolute_prefix != os.path.commonprefix([trusted_absolute_prefix, untrusted_path]):
        log.err("Blocked file operation out of the expected path: (\"%s\], \"%s\"" %
                (trusted_absolute_prefix, untrusted_path))

        raise errors.DirectoryTraversalError
Esempio n. 34
0
def _db_get_archived_questionnaire_schema(store, hash, type, language):
    aqs = store.find(models.ArchivedSchema, models.ArchivedSchema.hash == hash,
                     models.ArchivedSchema.type == type).one()

    if not aqs:
        log.err("Unable to find questionnaire schema with hash %s" % hash)
        questionnaire = []
    else:
        questionnaire = copy.deepcopy(aqs.schema)

    for step in questionnaire:
        for field in step['children']:
            _db_get_archived_field_recursively(field, language)
        get_localized_values(step, step, models.Step.localized_keys, language)

    return questionnaire
Esempio n. 35
0
    def execution_check(self):
        self.request.execution_time = datetime.now() - self.request.start_time

        if self.request.execution_time.seconds > self.handler_exec_time_threshold:
            error = "Handler [%s] exceeded execution threshold (of %d secs) with an execution time of %.2f seconds" % \
                    (self.name, self.handler_exec_time_threshold, self.request.execution_time.seconds)
            log.err(error)

            send_exception_email(error)

        track_handler(self)

        if self.uniform_answer_time:
            needed_delay = (GLSettings.side_channels_guard - (self.request.execution_time.microseconds / 1000)) / 1000
            if needed_delay > 0:
                yield deferred_sleep(needed_delay)
Esempio n. 36
0
def login(session, tid, username, password, client_using_tor, client_ip, token=''):
    """
    login returns a tuple (user_id, state, pcn)
    """
    if token:
        user = session.query(User).filter(User.auth_token == token, \
                                          User.state != u'disabled', \
                                          User.tid == tid).one_or_none()
    else:
        user = session.query(User).filter(User.username == username, \
                                          User.state != u'disabled', \
                                          User.tid == tid).one_or_none()

    if user is None or (not token and not security.check_password(password, user.salt, user.password)):
        log.debug("Login: Invalid credentials")
        Settings.failed_login_attempts += 1
        raise errors.InvalidAuthentication

    if not client_using_tor and not State.tenant_cache[tid]['https_' + user.role]:
        log.err("Denied login request over Web for role '%s'" % user.role)
        raise errors.TorNetworkRequired

    # Check if we're doing IP address checks today
    if State.tenant_cache[tid]['ip_filter_authenticated_enable']:
        ip_networks = parse_csv_ip_ranges_to_ip_networks(
            State.tenant_cache[tid]['ip_filter_authenticated']
        )
        client_ip = text_type(client_ip)
        client_ip_obj = ipaddress.ip_address(client_ip)

        # Safety check, we always allow localhost to log in
        success = False
        if client_ip_obj.is_loopback is True:
            success = True

        for ip_network in ip_networks:
            if client_ip_obj in ip_network:
                success = True

        if success is not True:
            raise errors.AccessLocationInvalid

    log.debug("Login: Success (%s)" % user.role)

    user.last_login = datetime_now()

    return user.id, user.state, user.role, user.password_change_needed
Esempio n. 37
0
    def load_key(self, key):
        """
        @param key:
        @return: True or False, True only if a key is effectively importable and listed.
        """
        try:
            import_result = self.pgph.import_keys(key)
        except Exception as excep:
            log.err("Error in PGP import_keys: %s" % excep)
            raise errors.PGPKeyInvalid

        if len(import_result.fingerprints) != 1:
            raise errors.PGPKeyInvalid

        fingerprint = import_result.fingerprints[0]

        # looking if the key is effectively reachable
        try:
            all_keys = self.pgph.list_keys()
        except Exception as excep:
            log.err("Error in PGP list_keys: %s" % excep)
            raise errors.PGPKeyInvalid

        info = u""
        expiration = datetime.utcfromtimestamp(0)
        for key in all_keys:
            if key['fingerprint'] == fingerprint:

                if key['expires']:
                    expiration = datetime.utcfromtimestamp(int(key['expires']))
                    exp_date = datetime_to_day_str(expiration)
                else:
                    exp_date = u'Never'

                info += "Key length: %s\n" % key['length']
                info += "Key expiration: %s\n" % exp_date

                try:
                    for uid in key['uids']:
                        info += "\t%s\n" % uid
                except Exception as excep:
                    log.err("Error in PGP key format/properties: %s" % excep)
                    raise errors.PGPKeyInvalid

                break

        if not len(info):
            log.err("Key apparently imported but unable to reload it")
            raise errors.PGPKeyInvalid

        ret = {
            'fingerprint': fingerprint,
            'expiration': expiration,
            'info': info
        }

        return ret
Esempio n. 38
0
    def get_file_upload(self):
        try:
            if len(self.request.files) != 1:
                raise errors.InvalidInputFormat("cannot accept more than a file upload at once")

            chunk_size = len(self.request.files['file'][0]['body'])
            total_file_size = int(self.request.arguments['flowTotalSize'][0]) if 'flowTotalSize' in self.request.arguments else chunk_size
            flow_identifier = self.request.arguments['flowIdentifier'][0] if 'flowIdentifier' in self.request.arguments else generateRandomKey(10)

            if ((chunk_size / (1024 * 1024)) > GLSettings.memory_copy.maximum_filesize or
                (total_file_size / (1024 * 1024)) > GLSettings.memory_copy.maximum_filesize):
                log.err("File upload request rejected: file too big")
                raise errors.FileTooBig(GLSettings.memory_copy.maximum_filesize)

            if flow_identifier not in GLUploads:
                f = GLSecureTemporaryFile(GLSettings.tmp_upload_path)
                GLUploads[flow_identifier] = f
            else:
                f = GLUploads[flow_identifier]

            f.write(self.request.files['file'][0]['body'])

            if 'flowChunkNumber' in self.request.arguments and 'flowTotalChunks' in self.request.arguments:
                if self.request.arguments['flowChunkNumber'][0] != self.request.arguments['flowTotalChunks'][0]:
                    return None


            uploaded_file = {
                'name': self.request.files['file'][0]['filename'],
                'type': self.request.files['file'][0]['content_type'],
                'size': total_file_size,
                'path': f.filepath,
                'body': f,
                'description': self.request.arguments.get('description', [''])[0]
            }

            self.request._start_time = f.creation_date
            track_handler(self)

            return uploaded_file

        except errors.FileTooBig:
            raise  # propagate the exception

        except Exception as exc:
            log.err("Error while handling file upload %s" % exc)
            return None
Esempio n. 39
0
    def schedule_exception_email(self, exception_text, *args):
        if not hasattr(self.tenant_cache[1], 'notification'):
            log.err(
                "Error: Cannot send mail exception before complete initialization."
            )
            return

        if self.exceptions_email_count >= self.settings.exceptions_email_hourly_limit:
            return

        exception_text = (exception_text % args) if args else exception_text

        sha256_hash = sha256(exception_text.encode())

        if sha256_hash not in self.exceptions:
            self.exceptions[sha256_hash] = 0

        self.exceptions[sha256_hash] += 1
        if self.exceptions[sha256_hash] > 5:
            log.err(
                "Exception mail suppressed for (%s) [reason: threshold exceeded]",
                sha256_hash)
            return

        self.exceptions_email_count += 1

        mail_subject = "GlobaLeaks Exception"
        delivery_list = self.tenant_cache[
            1].notification.exception_delivery_list

        mail_body = text_type("Platform: %s\nHost: %s (%s)\nVersion: %s\n\n%s" \
                          % (self.tenant_cache[1].name,
                             self.tenant_cache[1].hostname,
                             self.tenant_cache[1].onionservice,
                             __version__,
                             exception_text))

        for mail_address, pgp_key_public in delivery_list:
            # Opportunisticly encrypt the mail body. NOTE that mails will go out
            # unencrypted if one address in the list does not have a public key set.
            if pgp_key_public:
                pgpctx = PGPContext(self.settings.tmp_path)
                fingerprint = pgpctx.load_key(pgp_key_public)['fingerprint']
                mail_body = pgpctx.encrypt_message(fingerprint, mail_body)

            # avoid waiting for the notification to send and instead rely on threads to handle it
            schedule_email(1, mail_address, mail_subject, mail_body)
Esempio n. 40
0
def globaleaks_start():

    GLSetting.fix_file_permissions()
    GLSetting.drop_privileges()
    GLSetting.check_directories()

    if not GLSetting.accepted_hosts:
        log.err("Missing a list of hosts usable to contact GLBackend, abort")
        return False

    if not check_schema_version():
        return False

    d = create_tables()

    d.addCallback(clean_untracked_files)

    @d.addCallback
    @inlineCallbacks
    def cb(res):
        start_asynchronous()
        yield import_memory_variables()
        tor_configured_hosts = yield apply_cli_options()

        log.msg("GLBackend is now running")
        for ip in GLSetting.bind_addresses:
            log.msg("Visit http://%s:%d to interact with me" %
                    (ip, GLSetting.bind_port))

        for host in GLSetting.accepted_hosts:
            if host not in GLSetting.bind_addresses:
                log.msg("Visit http://%s:%d to interact with me" %
                        (host, GLSetting.bind_port))

        if tor_configured_hosts:
            for other in tor_configured_hosts:
                if other:
                    log.msg("Visit %s to interact with me" % other)

        log.msg(
            "Remind: GlobaLeaks is not accessible from other URLs, this is strictly enforced"
        )
        log.msg(
            "Check documentation in https://github.com/globaleaks/GlobaLeaks/wiki/ for special enhancement"
        )

    return True
Esempio n. 41
0
def get_receiver(store,
                 receiver_id,
                 language=GLSetting.memory_copy.default_language):
    """
    raises :class:`globaleaks.errors.ReceiverIdNotFound` if the receiver does
    not exist.
    Returns:
        (dict) the receiver

    """
    receiver = store.find(Receiver, Receiver.id == unicode(receiver_id)).one()

    if not receiver:
        log.err("Requested in receiver")
        raise errors.ReceiverIdNotFound

    return admin_serialize_receiver(receiver, language)
Esempio n. 42
0
    def process_events(self, store):
        """
        :return:
            0  = No event has been processed
           -1  = Threshold reach, emergency mode.
           >0  = Some elements to be notified has been processed
        """

        _elemscount = store.find(self.model, self.model.new == True).count()

        if _elemscount > (GLSettings.jobs_operation_limit * 10):
            # If this situation happen, we are facing a shitload of problem.
            # The reasonable option is that the entire Notification get skipped for this specific Trigger
            # all the events are marked as "new = False" and "chi si è visto si è visto"!
            # plus, the Admin get notified about it with an email.
            log.err("Waves of new %s received, notification suspended completely for all the %d %s (Threshold %d)" %
                     ( self.trigger, _elemscount,
                       self.trigger, (GLSettings.jobs_operation_limit * 10) ))
            store.find(self.model, self.model.new == True).set(new=False)
            return -1

        _elems = store.find(self.model, self.model.new == True)[:GLSettings.jobs_operation_limit]

        if _elemscount > GLSettings.jobs_operation_limit:
            log.info("Notification: Processing %d new event from a Queue of %d: %s(s) to be handled" %
                      (_elems.count(), _elemscount, self.trigger))
        elif _elemscount:
            log.debug("Notification: Processing %d new event: %s(s) to be handled" %
                      (_elems.count(), self.trigger))
        else:
            # No element to be processed
            return 0

        for e in _elems:
            # Mark event as handled as first step;
            # For resiliency reasons it's better to be sure that the
            # state machine move forward, than having starving events
            # due to possible exceptions in handling
            e.new = False
            self.process_event(store, e)

        db_save_events_on_db(store, self.events)
        log.debug("Notification: generated %d notification events of type %s" %
                  (len(self.events), self.trigger))

        return _elems.count()
Esempio n. 43
0
def receiverfile_create(store, if_path, recv_path, status, recv_size,
                        receiver_desc):

    assert type(1) == type(recv_size)
    assert isinstance(receiver_desc, dict)
    assert os.path.isfile(os.path.join(GLSetting.submission_path, if_path))

    try:
        ifile = store.find(InternalFile,
                           InternalFile.file_path == unicode(if_path)).one()

        if not ifile:
            log.err("InternalFile with path %s not found !?" % if_path)
            raise Exception("This is bad!")

        log.debug("ReceiverFile creation for user %s, '%s' bytes %d = %s)" %
                  (receiver_desc['name'], ifile.name, recv_size, status))

        receiverfile = ReceiverFile()

        receiverfile.downloads = 0
        receiverfile.receiver_id = receiver_desc['id']
        receiverfile.internalfile_id = ifile.id
        receiverfile.internaltip_id = ifile.internaltip_id

        # Receiver Tip reference
        rtrf = store.find(
            ReceiverTip, ReceiverTip.internaltip_id == ifile.internaltip_id,
            ReceiverTip.receiver_id == receiver_desc['id']).one()
        receiverfile.receiver_tip_id = rtrf.id

        # inherited by previous operation and checks
        receiverfile.file_path = unicode(recv_path)
        receiverfile.size = recv_size
        receiverfile.status = unicode(status)

        receiverfile.mark = u'not notified'

        store.add(receiverfile)

        return serialize_receiverfile(receiverfile)

    except Exception as excep:
        log.err("Error when saving ReceiverFile %s for '%s': %s" %
                (if_path, receiver_desc['name'], excep.message))
        return []
Esempio n. 44
0
    def handler_time_analysis_end(self):
        """
        If the software is running with the option -S --stats (GLSetting.log_timing_stats)
        then we are doing performance testing, having our mailbox spammed is not important,
        so we just skip to report the anomaly.
        """
        current_run_time = time.time() - self.start_time

        if current_run_time > self.handler_exec_time_threshold:
            error = "Handler [%s] exceeded execution threshold (of %d secs) with an execution time of %.2f seconds" % \
                    (self.name, self.handler_exec_time_threshold, current_run_time)
            log.err(error)

            send_exception_email(error)

        if GLSettings.log_timing_stats:
            TimingStatsHandler.log_measured_timing(self.request.method, self.request.uri, self.start_time, current_run_time)
Esempio n. 45
0
def change_password(old_password_hash, old_password, new_password, salt):
    """
    @param old_password_hash: the stored password hash.
    @param old_password: The user provided old password for password change protection.
    @param new_password: The user provided new password.
    @param salt: The salt to be used for password hashing.

    @return:
        the scrypt hash in base64 of the new password
    """
    if not check_password(old_password, old_password_hash, salt):
        log.err("change_password(): Error - provided invalid old_password")
        raise errors.InvalidOldPassword

    check_password_format(new_password)

    return hash_password(new_password, salt)
Esempio n. 46
0
def directory_traversal_check(trusted_absolute_prefix, untrusted_path):
    """
    Check that an 'untrusted_path' matches a 'trusted_absolute_path' prefix

    :param trusted_absolute_prefix: A prefix of the sandbox
    :param untrusted_path:  The untrasted path
    """
    untrusted_path = os.path.abspath(untrusted_path)
    trusted_absolute_prefix = os.path.abspath(trusted_absolute_prefix)

    if trusted_absolute_prefix != os.path.commonprefix(
        [trusted_absolute_prefix, untrusted_path]):
        log.err(
            "Blocked file operation for: (prefix, attempted_path) : ('%s', '%s')",
            trusted_absolute_prefix, untrusted_path)

        raise errors.DirectoryTraversalError
Esempio n. 47
0
def login_whistleblower(store, receipt, client_using_tor):
    """
    login_whistleblower returns the WhistleblowerTip.id
    """
    wbtip = db_get_wbtip_by_receipt(store, receipt)
    if not wbtip:
        log.debug("Whistleblower login: Invalid receipt")
        GLSettings.failed_login_attempts += 1
        raise errors.InvalidAuthentication

    if not client_using_tor and not GLSettings.memory_copy.accept_tor2web_access['whistleblower']:
        log.err("Denied login request over clear Web for role 'whistleblower'")
        raise errors.TorNetworkRequired

    log.debug("Whistleblower login: Valid receipt")
    wbtip.last_access = datetime_now()
    return wbtip.id
Esempio n. 48
0
    def perform_action(store, csr_fields):
        db_cfg = load_tls_dict(store)

        pkv = tls.PrivKeyValidator()
        ok, err = pkv.validate(db_cfg)
        if not ok or not err is None:
            raise err

        key_pair = db_cfg['ssl_key']
        try:
            csr_txt = tls.gen_x509_csr(key_pair, csr_fields,
                                       GLSettings.csr_sign_bits)
            log.debug("Generated a new CSR")
            return csr_txt
        except Exception as e:
            log.err(e)
            raise errors.ValidationError('CSR gen failed')
Esempio n. 49
0
def hash_password(proposed_password, salt_input):
    """
    @param proposed_password: a password, not security enforced.
        is not accepted an empty string.

    @return:
        the scrypt hash in base64 of the password
    """
    proposed_password = proposed_password.encode('utf-8')
    salt = get_salt(salt_input)

    if not len(proposed_password):
        log.err("password string has been not really provided (0 len)")
        raise errors.InvalidInputFormat("Missing password")

    hashed_passwd = scrypt.hash(proposed_password, salt)
    return binascii.b2a_hex(hashed_passwd)
Esempio n. 50
0
    def epilogue(self):
        """
        Imports the contents of the tor_hs directory into the config table

        NOTE the function does not delete the torhs dir, but instead leaves it
        on disk to ensure that the operator does not lose their HS key.
        """
        config = self.model_to['Config']

        def add_raw_config(session, group, name, customized, value):
            c = config(migrate=True)
            c.var_group = group
            c.var_name =  name
            c.customixed = customized
            c.value = {'v': value}
            session.add(c)

        hostname, key = '', ''
        pk_path = os.path.join(TOR_DIR, 'private_key')
        hn_path = os.path.join(TOR_DIR, 'hostname')
        if os.path.exists(TOR_DIR) and os.path.exists(pk_path) and os.path.exists(hn_path):
            with open(hn_path, 'r') as f:
                hostname = f.read().strip()
                # TODO assert that the hostname corresponds with the key
                if not re.match(r'[A-Za-z0-9]{16}\.onion', hostname):
                    raise Exception('The hostname format does not match')

            with open(pk_path, 'r') as f:
                r = f.read()
                if not r.startswith('-----BEGIN RSA PRIVATE KEY-----\n'):
                    raise Exception('%s does not have the right format!')
                # Clean and convert the pem encoded key read into the format
                # expected by the ADD_ONION tor control protocol.
                # TODO assert the key passes deeper validation
                key = 'RSA1024:' + ''.join(r.strip().split('\n')[1:-1])

        else:
            log.err('The structure of %s is incorrect. Cannot load onion service keys' % TOR_DIR)

        self.session_new.query(config).filter(config.var_group == u'node', config.var_name == u'onionservice').delete(synchronize_session='fetch')

        add_raw_config(self.session_new, u'node', u'onionservice', True, hostname)
        add_raw_config(self.session_new, u'private', u'tor_onion_key', True, key)

        self.entries_count['Config'] += 1
Esempio n. 51
0
    def validate_jmessage(jmessage, message_template):
        """
        Takes a string that represents a JSON messages and checks to see if it
        conforms to the message type it is supposed to be.

        This message must be either a dict or a list. This function may be called
        recursively to validate sub-parameters that are also go GLType.

        message: the message string that should be validated

        message_type: the GLType class it should match.
        """
        if isinstance(message_template, dict):
            valid_jmessage = {}
            for key in message_template.keys():
                if key not in jmessage:
                    log.err('validate_message: key %s not in %s' % (key, jmessage))
                    raise errors.InvalidInputFormat('wrong schema: missing %s' % key)
                else:
                    valid_jmessage[key] = jmessage[key]

            if GLSetting.loglevel == "DEBUG":
                # check if wrong keys are reaching the GLBackend, they are
                # stripped in the previous loop, because valid_jmessage is returned
                for double_k in jmessage.keys():
                    if double_k not in message_template.keys():
                        log.err("[!?] validate_message: key %s not expected" % double_k)

            jmessage = valid_jmessage

            for key, value in message_template.iteritems():
                if not BaseHandler.validate_type(jmessage[key], value):
                    raise errors.InvalidInputFormat("REST integrity check 1, fail in %s" % key)

            for key, value in jmessage.iteritems():
                if not BaseHandler.validate_type(value, message_template[key]):
                    raise errors.InvalidInputFormat("REST integrity check 2, fail in %s" % key)

            return True

        elif isinstance(message_template, list):
            return all(BaseHandler.validate_type(x, message_template[0]) for x in jmessage)

        else:
            raise errors.InvalidInputFormat("invalid json massage: expected dict or list")
Esempio n. 52
0
    def flush(self, include_footers=False):
        """
        This method is used internally by Cyclone,
        Cyclone specify the function on_finish but in that time the request is already flushed,
        so overwrite flush() was the easiest way to achieve our collection.

        It's here implemented to supports the I/O logging if requested
        with the command line options --io $number_of_request_recorded
        """

        # This is the event tracker, used to keep track of the
        # outcome of the events.
        if not hasattr(self, '_status_code'):
            log.debug("Developer, check this out")
            if GLSetting.devel_mode:
                import pdb; pdb.set_trace()

        for event in outcoming_event_monitored:
            if event['handler_check'](self.request.uri) and \
                    event['method'] == self.request.method and \
                    event['status_checker'](self._status_code):
                EventTrack(event, self.request.request_time())
                # if event['anomaly_management']:
                #    event['anomaly_management'](self.request)

        if hasattr(self, 'globaleaks_io_debug'):
            try:
                content = ("<" * 15)
                content += (" Response %d " % self.globaleaks_io_debug)
                content += ("<" * 15) + "\n\n"
                content += "status code: " + str(self._status_code) + "\n\n"

                content += "headers:\n"
                for k, v in self._headers.iteritems():
                    content += "%s: %s\n" % (k, v)

                if self._write_buffer is not None:
                    content += "\nbody: " + str(self._write_buffer) + "\n"

                self.do_verbose_log(content)
            except Exception as excep:
                log.err("JSON logging fail (flush): %s" % excep.message)
                return

        RequestHandler.flush(self, include_footers)
Esempio n. 53
0
    def process_file_upload(self):
        if 'flowFilename' not in self.request.args:
            return None

        total_file_size = int(self.request.args['flowTotalSize'][0])
        flow_identifier = self.request.args['flowIdentifier'][0]

        chunk_size = len(self.request.args['file'][0])
        if ((chunk_size / (1024 * 1024)) >
                self.state.tenant_cache[self.request.tid].maximum_filesize
                or (total_file_size / (1024 * 1024)) >
                self.state.tenant_cache[self.request.tid].maximum_filesize):
            log.err("File upload request rejected: file too big",
                    tid=self.request.tid)
            raise errors.FileTooBig(
                self.state.tenant_cache[self.request.tid].maximum_filesize)

        if flow_identifier not in self.state.TempUploadFiles:
            self.state.TempUploadFiles.set(
                flow_identifier, SecureTemporaryFile(Settings.tmp_path))

        f = self.state.TempUploadFiles[flow_identifier]
        with f.open('w') as f:
            f.write(self.request.args['file'][0])

            if self.request.args['flowChunkNumber'][0] != self.request.args[
                    'flowTotalChunks'][0]:
                return None
            else:
                f.finalize_write()

        mime_type, _ = mimetypes.guess_type(
            self.request.args['flowFilename'][0])
        if mime_type is None:
            mime_type = 'application/octet-stream'

        self.uploaded_file = {
            'date': datetime_now(),
            'name': self.request.args['flowFilename'][0],
            'type': mime_type,
            'size': total_file_size,
            'path': f.filepath,
            'body': f,
            'description': self.request.args.get('description', [''])[0]
        }
Esempio n. 54
0
def MIME_mail_build(source_name, source_mail, receiver_name, receiver_mail,
                    title, txt_body):

    # Override python's weird assumption that utf-8 text should be encoded with
    # base64, and instead use quoted-printable (for both subject and body).  I
    # can't figure out a way to specify QP (quoted-printable) instead of base64 in
    # a way that doesn't modify global state. :-(

    Charset.add_charset('utf-8', Charset.QP, Charset.QP, 'utf-8')

    # This example is of an email with text and html alternatives.
    multipart = MIMEMultipart('alternative')

    # We need to use Header objects here instead of just assigning the strings in
    # order to get our headers properly encoded (with QP).
    # You may want to avoid this if your headers are already ASCII, just so people
    # can read the raw message without getting a headache.
    multipart['Subject'] = Header(title.encode('utf-8'), 'UTF-8').encode()
    multipart['Date'] = rfc822_date()

    multipart['To'] = Header(receiver_name.encode('utf-8'), 'UTF-8').encode() + \
                        " <" + receiver_mail + ">"

    multipart['From'] = Header(source_name.encode('utf-8'), 'UTF-8').encode() + \
                        " <" + source_mail + ">"

    multipart['X-Mailer'] = "fnord"

    # Attach the parts with the given encodings.
    # html = '<html>...</html>'
    # htmlpart = MIMEText(html.encode('utf-8'), 'html', 'UTF-8')
    # multipart.attach(htmlpart)

    textpart = MIMEText(txt_body.encode('utf-8'), 'plain', 'UTF-8')
    multipart.attach(textpart)

    # And here we have to instantiate a Generator object to convert the multipart
    # object to a string (can't use multipart.as_string, because that escapes
    # "From" lines).
    try:
        io = StringIO.StringIO(multipart.as_string())
        return io
    except Exception as excep:
        log.err("Unable to encode and email: %s" % excep)
        return None
Esempio n. 55
0
def overwrite_and_remove(absolutefpath, iterations_number=1):
    """
    Overwrite the file with all_zeros, all_ones, random patterns

    Note: At each iteration the original size of the file is altered.
    """
    log.debug("Starting secure deletion of file %s", absolutefpath)

    randomgen = random.SystemRandom()

    try:
        # in the following loop, the file is open and closed on purpose, to trigger flush operations
        all_zeros = "\0\0\0\0" * 1024  # 4kb of zeros

        if sys.version_info[0] == 2:
            all_ones = "FFFFFFFF".decode("hex") * 1024  # 4kb of ones
        else:
            all_ones = "\xFF" * 4096

        for iteration in range(iterations_number):
            OPTIMIZATION_RANDOM_BLOCK = randomgen.randint(4096, 4096 * 2)

            random_pattern = ""
            for _ in range(OPTIMIZATION_RANDOM_BLOCK):
                random_pattern += str(randomgen.randrange(256))

            log.debug("Excecuting rewrite iteration (%d out of %d)", iteration,
                      iterations_number)

            _overwrite(absolutefpath, all_zeros)
            _overwrite(absolutefpath, all_ones)
            _overwrite(absolutefpath, random_pattern)

    except Exception as excep:
        log.err("Unable to perform secure overwrite for file %s: %s",
                absolutefpath, excep)

    finally:
        try:
            os.remove(absolutefpath)
        except OSError as excep:
            log.err("Unable to perform unlink operation on file %s: %s",
                    absolutefpath, excep)

    log.debug("Performed deletion of file: %s", absolutefpath)
Esempio n. 56
0
    def post(self):
        if GLSettings.memory_copy.hostname == '':
            raise errors.ValidationError('hostname is not set')

        net_agent = GLSettings.get_agent()

        t = ('http', GLSettings.memory_copy.hostname, 'robots.txt', None, None)
        url = bytes(urlparse.urlunsplit(t))
        try:
            resp = yield net_agent.request('GET', url)
            body = yield readBody(resp)

            server_h = resp.headers.getRawHeaders('Server', [None])[-1].lower()
            if not body.startswith('User-agent: *') or server_h != 'globaleaks':
                raise EnvironmentError('Response unexpected')
        except (EnvironmentError, ConnectError) as e:
            log.err(e)
            raise errors.ExternalResourceError()
Esempio n. 57
0
def directory_traversal_check(trusted_absolute_prefix, untrusted_path):
    """
    check that an 'untrusted_path' matches a 'trusted_absolute_path' prefix
    """
    if not os.path.isabs(trusted_absolute_prefix):
        raise Exception(
            "programming error: trusted_absolute_prefix is not an absolute path: %s"
            % trusted_absolute_prefix)

    untrusted_path = os.path.abspath(untrusted_path)

    if trusted_absolute_prefix != os.path.commonprefix(
        [trusted_absolute_prefix, untrusted_path]):
        log.err(
            "Blocked file operation for: (prefix, attempted_path) : ('%s', '%s')",
            trusted_absolute_prefix, untrusted_path)

        raise errors.DirectoryTraversalError
Esempio n. 58
0
    def __iter__(self):
        for f in self.files:

            log.debug("Compressing (%s)" % f['name'])

            if 'path' in f:
                try:
                    for data in self.zip_file(f['path'], f['name']):
                        yield data
                except (OSError, IOError) as excpd:
                    log.err("IOError while adding %s to files collection: %s" %
                            (f['path'], excpd))

            elif 'buf' in f:
                for data in self.zip_buf(f['buf'], f['name']):
                    yield data

        yield self.archive_footer()
Esempio n. 59
0
 def __init__(self):
     """
     every time is needed, a new keyring is created here.
     """
     try:
         temp_pgproot = os.path.join(GLSettings.pgproot,
                                     "%s" % generateRandomKey(8))
         os.makedirs(temp_pgproot, mode=0700)
         self.gnupg = GPG(gnupghome=temp_pgproot,
                          options=['--trust-model', 'always'])
         self.gnupg.encoding = "UTF-8"
     except OSError as ose:
         log.err("Critical, OS error in operating with GnuPG home: %s" %
                 ose)
         raise
     except Exception as excep:
         log.err("Unable to instance PGP object: %s" % excep)
         raise
Esempio n. 60
0
    def write(self, data):
        """
        The last action is kept track because the internal status
        need to track them. read below read()
        """
        if self.last_action == 'read':
            raise Exception("Error: Write call performed after read")

        self.last_action = 'write'
        try:
            if isinstance(data, unicode):
                data = data.encode('utf-8')

            self.file.write(self.encryptor.update(data))
        except Exception as wer:
            log.err("Unable to write() in GLSecureTemporaryFile: %s" %
                    wer.message)
            raise wer