Esempio n. 1
0
 def get_server_log_entries(self, environ, session_token):
     try:
         session = self.sessions[session_token]
         self.ui.set_progress_text("Sending log entries...")
         log_entries = session.database.log_entries_to_sync_for(\
             session.client_info["machine_id"],
             session.client_info["interested_in_old_reps"])
         number_of_entries = session.database.\
             number_of_log_entries_to_sync_for(\
             session.client_info["machine_id"],
             session.client_info["interested_in_old_reps"])
         for buffer in self._stream_log_entries(log_entries,
             number_of_entries):
             yield buffer
     except:
         yield self.handle_error(session, traceback_string())
     # Now that all the data is underway to the client, we can already
     # start applying the client log entries. If there are errors that
     # occur, we save them and communicate them to the client in
     # 'get_sync_finish'.
     try:
         self.ui.set_progress_text("Applying log entries...")
         # First, dump to the science log, so that we can skip over the new
         # logs in case the client uploads them.
         session.database.dump_to_science_log()
         for log_entry in session.client_log:
             session.database.apply_log_entry(log_entry)
         # Skip over the logs that the client promised to upload.
         if session.client_info["upload_science_logs"]:
             session.database.skip_science_log()
     except:
         session.apply_error = traceback_string()
Esempio n. 2
0
 def content():
     try:
         for buffer in self.stream_binary_file(filename):
             yield buffer
         binary_format.clean_up()
     except:
         yield self.handle_error(session, traceback_string())
Esempio n. 3
0
def _deploy_without_asking():
    if env.offline:
        commands = OFFLINE_DEPLOY_COMMANDS
    else:
        commands = ONLINE_DEPLOY_COMMANDS

    try:
        for index, command in enumerate(commands):
            deploy_checkpoint(index, command.func_name, execute_with_timing,
                              command)
    except PreindexNotFinished:
        mail_admins(
            " You can't deploy to {} yet. There's a preindex in process.".
            format(env.environment),
            ("Preindexing is taking a while, so hold tight "
             "and wait for an email saying it's done. "
             "Thank you for using AWESOME DEPLOY."))
    except Exception:
        execute_with_timing(
            mail_admins, "Deploy to {environment} failed. Try resuming with "
            "fab {environment} deploy:resume=yes.".format(
                environment=env.environment), traceback_string())
        # hopefully bring the server back to life
        silent_services_restart()
        raise
    else:
        execute_with_timing(release.update_current)
        silent_services_restart()
        execute_with_timing(release.record_successful_release)
        execute_with_timing(release.record_successful_deploy)
        clear_cached_deploy()
Esempio n. 4
0
 def get_server_entire_database_binary(self, environ, session_token):
     try:
         session = self.sessions[session_token]
         self.ui.set_progress_text("Sending entire binary database...")
         binary_format = self.binary_format_for(session)
         filename = binary_format.binary_filename(\
             session.client_info["store_pregenerated_data"],
             session.client_info["interested_in_old_reps"])
         global mnemosyne_content_length
         mnemosyne_content_length = os.path.getsize(filename)
         # Since we want to modify the headers in this function, we cannot
         # use 'yield' directly to stream content, but have to add one layer
         # of indirection: http://www.cherrypy.org/wiki/ReturnVsYield
         #
         # Since we return an iterator, we also need to re-encapsulate our
         # code in a try block.
         def content():
             try:
                 for buffer in self.stream_binary_file(filename):
                     yield buffer
                 binary_format.clean_up()
             except:
                 yield self.handle_error(session, traceback_string())
         return content()
         # This is a full sync, we don't need to apply client log
         # entries here.
     except:
         return self.handle_error(session, traceback_string())
Esempio n. 5
0
 def get_server_binary_file(self, environ, session_token, filename):
     try:
         session = self.sessions[session_token]
         global mnemosyne_content_length
         socket = environ["wsgi.input"]
         filename = unicode(filename, "utf-8")
         # Make sure a malicious client cannot access anything outside
         # of the media directory.
         filename = filename.replace("../", "").replace("..\\", "")
         filename = filename.replace("/..", "").replace("\\..", "")
         filename = os.path.join(session.database.data_dir(), filename)
         file_size = os.path.getsize(filename)
         mnemosyne_content_length = file_size
         # Since we want to modify the headers in this function, we cannot
         # use 'yield' directly to stream content, but have to add one layer
         # of indirection: http://www.cherrypy.org/wiki/ReturnVsYield
         #
         # We don't have progress bars here, as 'get_server_media_file'
         # gets called too frequently, and this would slow down the UI.
         #
         # Since we return an iterator, we also need to re-encapsulate our
         # code in a try block.
         def content():
             try:
                 for buffer in self.stream_binary_file(\
                     filename, progress_bar=False):
                     yield buffer
             except:
                 yield self.handle_error(session, traceback_string())
         return content()
     except:
         return self.handle_error(session, traceback_string())
Esempio n. 6
0
 def content():
     try:
         for buffer in self.stream_binary_file(\
             filename, progress_bar=False):
             yield buffer
     except:
         yield self.handle_error(session, traceback_string())
Esempio n. 7
0
 def get_server_media_filenames(self, environ, session_token,
                                redownload_all=False):
     try:
         session = self.sessions[session_token]
         global mnemosyne_content_length
         mnemosyne_content_length = 0
         self.ui.set_progress_text("Sending media files...")
         # Send list of filenames in the format <mediadir>/<filename>, i.e.
         # relative to the data_dir. Note we always use / internally.           
         subdir = os.path.basename(session.database.media_dir())
         if redownload_all in ["1", "True", "true"]:
             filenames = [subdir + "/" + filename for filename in \
                          session.database.all_media_filenames()]                
         else:
             filenames = [subdir + "/" + filename  for filename in \
                          session.database.media_filenames_to_sync_for(\
                              session.client_info["machine_id"])]                 
         if len(filenames) == 0:
             return ""
         for filename in filenames:
             mnemosyne_content_length += os.path.getsize((os.path.join(\
                     session.database.data_dir(), filename)))
         return "\n".join(filenames).encode("utf-8")
     except:
         return self.handle_error(session, traceback_string())
Esempio n. 8
0
 def get_server_generate_log_entries_for_settings(\
         self, environ, session_token):
     try:
         session = self.sessions[session_token]
         session.database.generate_log_entries_for_settings()
         return self.text_format.repr_message("OK")
     except:
         return self.handle_error(session, traceback_string())
Esempio n. 9
0
 def get_sync_cancel(self, environ, session_token):
     try:
         self.ui.set_progress_text("Sync cancelled!")
         self.cancel_session_with_token(session_token)
         return self.text_format.repr_message("OK")
     except:
         session = self.sessions[session_token]
         return self.handle_error(session, traceback_string())
Esempio n. 10
0
 def get_sync_cancel(self, environ, session_token):
     try:
         self.ui.set_progress_text("Sync cancelled!")
         self.cancel_session_with_token(session_token)
         return self.text_format.repr_message("OK")
     except:
         session = self.sessions[session_token]
         return self.handle_error(session, traceback_string())
Esempio n. 11
0
 def get_sync_finish(self, environ, session_token):
     try:
         session = self.sessions[session_token]
         if session.apply_error is not None:
             return self.handle_error(session, session.apply_error)
         self.ui.set_progress_text("Sync finished!")
         self.close_session_with_token(session_token)
         # Now is a good time to garbage-collect dangling sessions.
         self.expire_old_sessions()
         return self.text_format.repr_message("OK")
     except:
         return self.handle_error(session, traceback_string())
Esempio n. 12
0
 def get_server_entire_database(self, environ, session_token):
     try:
         session = self.sessions[session_token]
         self.ui.set_progress_text("Sending entire database...")
         session.database.dump_to_science_log()
         log_entries = session.database.all_log_entries(\
             session.client_info["interested_in_old_reps"])
         number_of_entries = session.database.number_of_log_entries(\
             session.client_info["interested_in_old_reps"])
         for buffer in self._stream_log_entries(log_entries,
             number_of_entries):
             yield buffer
     except:
         yield self.handle_error(session, traceback_string())
Esempio n. 13
0
 def get_server_check_media_files(self, environ, session_token):
     # We check if files were updated outside of the program, or if
     # media files need to be generated dynamically, e.g. latex. This
     # can generate MEDIA_EDITED log entries, so it should be done first.
     try:
         session = self.sessions[session_token]
         if self.check_for_edited_local_media_files:
             self.ui.set_progress_text("Checking for edited media files...")
             session.database.check_for_edited_media_files()
         # Always create media files, otherwise they are not synced across.
         self.ui.set_progress_text("Dynamically creating media files...")
         session.database.dynamically_create_media_files()
         return self.text_format.repr_message("OK")
     except:
         return self.handle_error(session, traceback_string())
Esempio n. 14
0
 def put_client_binary_file(self, environ, session_token, filename):
     try:
         session = self.sessions[session_token]
         socket = environ["wsgi.input"]
         size = int(environ["CONTENT_LENGTH"])
         filename = unicode(filename, "utf-8")
         # Make sure a malicious client cannot overwrite anything outside
         # of the media directory.
         filename = filename.replace("../", "").replace("..\\", "")
         filename = filename.replace("/..", "").replace("\\..", "")            
         filename = os.path.join(session.database.data_dir(), filename)
         # We don't have progress bars here, as 'put_client_binary_file'
         # gets called too frequently, and this would slow down the UI.
         self.download_binary_file(environ["wsgi.input"], filename, size,
             progress_bar=False)
         return self.text_format.repr_message("OK")
     except:
         return self.handle_error(session, traceback_string())
Esempio n. 15
0
 def put_client_entire_database_binary(self, environ, session_token):
     try:
         session = self.sessions[session_token]
         self.ui.set_progress_text("Getting entire binary database...")
         filename = session.database.path()
         session.database.abandon()
         file_size = int(environ["CONTENT_LENGTH"])
         self.download_binary_file(\
             environ["wsgi.input"], filename, file_size)
         session.database.load(filename)
         session.database.change_user_id(session.client_info["user_id"])
         session.database.create_if_needed_partnership_with(\
             session.client_info["machine_id"])
         session.database.remove_partnership_with(self.machine_id)
         # Next sync with a third party should be a full sync too.
         session.database.reset_partnerships()
         return self.text_format.repr_message("OK")
     except:
         return self.handle_error(session, traceback_string())
Esempio n. 16
0
 def get_server_media_filenames(self, environ, session_token,
                                redownload_all=False):
     try:
         session = self.sessions[session_token]
         global mnemosyne_content_length
         mnemosyne_content_length = 0
         self.ui.set_progress_text("Sending media files...")
         if redownload_all in ["1", "True", "true"]:
             filenames = list(session.database.all_media_filenames())
         else:
             filenames = list(session.database.media_filenames_to_sync_for(\
                 session.client_info["machine_id"]))
         if len(filenames) == 0:
             return ""
         for filename in filenames:
             mnemosyne_content_length += os.path.getsize(\
                 os.path.join(session.database.media_dir(), filename))
         return "\n".join(filenames).encode("utf-8")
     except:
         return self.handle_error(session, traceback_string())
Esempio n. 17
0
 def put_client_log_entries(self, environ, session_token):
     try:
         session = self.sessions[session_token]
         self.ui.set_progress_text("Receiving log entries...")
         socket = environ["wsgi.input"]
         element_loop = self.text_format.parse_log_entries(socket)
         session.number_of_client_entries = int(element_loop.next())
         if session.number_of_client_entries == 0:
             return self.text_format.repr_message("OK")
         self.ui.set_progress_range(session.number_of_client_entries)
         self.ui.set_progress_update_interval(\
             session.number_of_client_entries/50)
         for log_entry in element_loop:
             session.client_log.append(log_entry)
             if log_entry["type"] not in self.dont_cause_conflict:
                 if "fname" in log_entry:
                     log_entry["o_id"] = log_entry["fname"]
                 session.client_o_ids.append(log_entry["o_id"])
             self.ui.set_progress_value(len(session.client_log))
         # If we haven't downloaded all entries yet, tell the client
         # it's OK to continue.
         if len(session.client_log) < session.number_of_client_entries:
             return self.text_format.repr_message("Continue")
         # Now we have all the data from the client and we can determine
         # whether there are conflicts.
         for log_entry in session.database.log_entries_to_sync_for(\
             session.client_info["machine_id"]):
             if not log_entry:
                 continue  # Irrelevent entry for card-based clients.
             if "fname" in log_entry:
                 log_entry["o_id"] = log_entry["fname"]
             if log_entry["type"] not in self.dont_cause_conflict and \
                 log_entry["o_id"] in session.client_o_ids:
                 return self.text_format.repr_message("Conflict")
         if session.database.is_empty():
             session.database.change_user_id(session.client_info["user_id"])
         return self.text_format.repr_message("OK")
     except:
         return self.handle_error(session, traceback_string())
Esempio n. 18
0
 def get_server_media_filenames(self,
                                environ,
                                session_token,
                                redownload_all=False):
     try:
         session = self.sessions[session_token]
         global mnemosyne_content_length
         mnemosyne_content_length = 0
         self.ui.set_progress_text("Sending media files...")
         if redownload_all in ["1", "True", "true"]:
             filenames = list(session.database.all_media_filenames())
         else:
             filenames = list(session.database.media_filenames_to_sync_for(\
                 session.client_info["machine_id"]))
         if len(filenames) == 0:
             return ""
         for filename in filenames:
             mnemosyne_content_length += os.path.getsize(\
                 os.path.join(session.database.media_dir(), filename))
         return "\n".join(filenames).encode("utf-8")
     except:
         return self.handle_error(session, traceback_string())
Esempio n. 19
0
 def get_server_archive_filenames(self, environ, session_token):
     try:
         session = self.sessions[session_token]
         global mnemosyne_content_length
         mnemosyne_content_length = 0
         self.ui.set_progress_text("Sending archive files...")
         # Send list of filenames in the format "archive"/<filename>, i.e.
         # relative to the data_dir. Note we always use / internally.       
         archive_dir = os.path.join(session.database.data_dir(), "archive")
         if not os.path.exists(archive_dir):
             return ""
         filenames = ["archive/" + filename for filename in \
                      os.listdir(archive_dir) if os.path.isfile\
                      (os.path.join(archive_dir, filename))]            
         if len(filenames) == 0:
             return ""
         for filename in filenames:
             mnemosyne_content_length += os.path.getsize(os.path.join(\
                     session.database.data_dir(), filename))
         return "\n".join(filenames).encode("utf-8")
     except:
         return self.handle_error(session, traceback_string())
Esempio n. 20
0
def hotfix_deploy():
    """
    deploy ONLY the code with no extra cleanup or syncing

    for small python-only hotfixes

    """
    if not console.confirm('Are you sure you want to deploy to {env.environment}?'.format(env=env), default=False) or \
       not console.confirm('Did you run "fab {env.environment} preindex_views"? '.format(env=env), default=False) or \
       not console.confirm('HEY!!!! YOU ARE ONLY DEPLOYING CODE. THIS IS NOT A NORMAL DEPLOY. COOL???', default=False):
        utils.abort('Deployment aborted.')

    _require_target()
    run('echo ping!')  # workaround for delayed console response
    try:
        execute(release.update_code, env.deploy_metadata.deploy_ref, True)
    except Exception:
        execute(mail_admins, "Deploy failed", traceback_string())
        # hopefully bring the server back to life
        silent_services_restart(use_current_release=True)
        raise
    else:
        silent_services_restart(use_current_release=True)
        execute(release.record_successful_deploy)
Esempio n. 21
0
 def sync(self, server, port, username, password):
     try:
         self.server = socket.gethostbyname(server)
         self.port = port
         if self.do_backup:
             self.ui.set_progress_text("Creating backup...")
             backup_file = self.database.backup()
         # We check if files were edited outside of the program. This can
         # generate EDITED_MEDIA_FILES log entries, so it should be done
         # first.
         if self.check_for_edited_local_media_files:
             self.ui.set_progress_text("Checking for edited media files...")
             self.database.check_for_edited_media_files()
         # Always create media files, otherwise they are not synced across.
         self.ui.set_progress_text("Dynamically creating media files...")
         self.database.dynamically_create_media_files()
         # Set timeout long enough for e.g. a slow NAS waking from
         # hibernation.
         socket.setdefaulttimeout(60)
         self.login(username, password)
         # Generating media files at the server side could take some time,
         # so we update the timeout.
         self.con = None
         socket.setdefaulttimeout(15 * 60)
         self.get_server_check_media_files()
         # Do a full sync after either the client or the server has restored
         # from a backup.
         if self.database.is_sync_reset_needed(\
             self.server_info["machine_id"]) or \
             self.server_info["sync_reset_needed"] == True:
             self.resolve_conflicts(restored_from_backup=True)
         # First sync, fetch database from server.
         elif self.database.is_empty():
             self.get_server_media_files()
             if self.server_info["supports_binary_transfer"]:
                 self.get_server_entire_database_binary()
             else:
                 self.get_server_entire_database()
             self.get_sync_finish()
             # Fetch config settings.
             self.login(username, password)
             self.get_server_generate_log_entries_for_settings()
             self.get_server_log_entries()
             self.get_sync_finish()
         # First sync, put binary database to server if supported.
         elif not self.database.is_empty() and \
                 self.server_info["is_database_empty"] and \
                 self.supports_binary_upload():
             self.put_client_media_files(reupload_all=True)
             self.put_client_entire_database_binary()
             self.get_sync_finish()
             # Upload config settings.
             self.login(username, password)
             self.database.generate_log_entries_for_settings()
             self.put_client_log_entries()
             self.get_server_log_entries()
             self.get_sync_finish()
         else:
             # Upload local changes and check for conflicts.
             result = self.put_client_log_entries()
             if result == "OK":
                 self.put_client_media_files()
                 self.get_server_media_files()
                 self.get_server_log_entries()
                 self.get_sync_finish()
             else:
                 self.resolve_conflicts()
         self.ui.show_information("Sync finished!")
     except Exception, exception:
         self.ui.close_progress()
         serious = True
         if type(exception) == type(socket.gaierror()):
             self.ui.show_error("Could not find server!")
             serious = False
         elif type(exception) == type(socket.error()):
             self.ui.show_error("Could not connect to server!")
             serious = False
         elif type(exception) == type(socket.timeout()):
             self.ui.show_error("Timeout while waiting for server!")
         elif type(exception) == type(SyncError()):
             self.ui.show_error(str(exception))
             serious = False
         elif type(exception) == type(SeriousSyncError()):
             self.ui.show_error(str(exception))
         else:
             self.ui.show_error(traceback_string())
         if serious and self.do_backup:
             # Only serious errors should result in the need for a full
             # sync next time.
             self.ui.show_error("Sync failed, restoring from backup. " + \
                 "The next sync will need to be a full sync.")
             if backup_file:
                 self.database.restore(backup_file)
Esempio n. 22
0
 def sync(self, server, port, username, password):
     try:
         self.server = socket.gethostbyname(server)
         self.port = port
         if self.do_backup:
             self.ui.set_progress_text("Creating backup...")
             backup_file = self.database.backup()
         # We check if files were edited outside of the program. This can
         # generate EDITED_MEDIA_FILES log entries, so it should be done
         # first.
         if self.check_for_edited_local_media_files:
             self.ui.set_progress_text("Checking for edited media files...")
             self.database.check_for_edited_media_files()
         # Always create media files, otherwise they are not synced across.
         self.ui.set_progress_text("Dynamically creating media files...")
         self.database.dynamically_create_media_files()
         # Set timeout long enough for e.g. a slow NAS waking from 
         # hibernation.
         socket.setdefaulttimeout(60)
         self.login(username, password)
         # Generating media files at the server side could take some time,
         # so we update the timeout.
         self.con = None
         socket.setdefaulttimeout(15*60)
         self.get_server_check_media_files()
         # Do a full sync after either the client or the server has restored
         # from a backup.
         if self.database.is_sync_reset_needed(\
             self.server_info["machine_id"]) or \
             self.server_info["sync_reset_needed"] == True:
             self.resolve_conflicts(restored_from_backup=True)
         # First sync, fetch database from server.
         elif self.database.is_empty():
             self.get_server_media_files(redownload_all=True)
             self.get_server_archive_files()
             if self.server_info["supports_binary_transfer"]:
                 self.get_server_entire_database_binary()
             else:
                 self.get_server_entire_database()
             self.get_sync_finish()
             # Fetch config settings.
             self.login(username, password)
             self.get_server_generate_log_entries_for_settings()
             self.get_server_log_entries()
             self.get_sync_finish()
         # First sync, put binary database to server if supported.
         elif not self.database.is_empty() and \
                 self.server_info["is_database_empty"] and \
                 self.supports_binary_upload():
             self.put_client_media_files(reupload_all=True)
             self.put_client_archive_files()
             self.put_client_entire_database_binary()
             self.get_sync_finish()
             # Upload config settings.
             self.login(username, password)
             self.database.generate_log_entries_for_settings()
             self.put_client_log_entries()
             self.get_server_log_entries()
             self.get_sync_finish()
         else:
             # Upload local changes and check for conflicts.
             result = self.put_client_log_entries()
             if result == "OK":
                 self.put_client_media_files()
                 self.get_server_media_files()
                 self.get_server_log_entries()
                 self.get_sync_finish()
             else:
                 self.resolve_conflicts()
         self.ui.show_information("Sync finished!")
     except Exception, exception:
         self.ui.close_progress()
         serious = True
         if type(exception) == type(socket.gaierror()):
             self.ui.show_error("Could not find server!")
             serious = False
         elif type(exception) == type(socket.error()):
             self.ui.show_error("Could not connect to server!")
             serious = False
         elif type(exception) == type(socket.timeout()):
             self.ui.show_error("Timeout while waiting for server!")
         elif type(exception) == type(SyncError()):
             self.ui.show_error(str(exception))
             serious = False
         elif type(exception) == type(SeriousSyncError()):
             self.ui.show_error(str(exception))
         else:
             self.ui.show_error(traceback_string())
         if serious and self.do_backup:
             # Only serious errors should result in the need for a full
             # sync next time.
             self.ui.show_error("Sync failed, restoring from backup. " + \
                 "The next sync will need to be a full sync.")
             if backup_file:
                 self.database.restore(backup_file)
Esempio n. 23
0
 def put_login(self, environ):
     session = None
     try:
         self.ui.set_progress_text("Client logging in...")
         client_info_repr = environ["wsgi.input"].readline()
         client_info = self.text_format.parse_partner_info(\
             client_info_repr)
         if not self.authorise(client_info["username"],
             client_info["password"]):
             self.ui.close_progress()
             return self.text_format.repr_message("Access denied")
         # Close old session waiting in vain for client input.
         # This will also close any session which timed out while
         # trying to log in just before, so we need to make sure the
         # client timeout is long enough for e.g. a NAS which is slow to
         # wake from hibernation.
         old_running_session_token = self.session_token_for_user.\
             get(client_info["username"])
         if old_running_session_token:
             self.terminate_session_with_token(old_running_session_token)
         session = self.create_session(client_info)
         # If the client database is empty, perhaps it was reset, and we
         # need to delete the partnership from our side too.
         if session.client_info["is_database_empty"] == True:
             session.database.remove_partnership_with(\
                 session.client_info["machine_id"])
         # Make sure there are no cycles in the sync graph. Don't worry
         # about this if the database is empty, sinc the previous statement
         # would otherwise cause a spurious error.
         server_in_client_partners = self.machine_id in \
             session.client_info["partners"]
         client_in_server_partners = session.client_info["machine_id"] in \
             session.database.partners()
         if (server_in_client_partners and not client_in_server_partners)\
            or \
            (client_in_server_partners and not server_in_client_partners):
             if not session.client_info["is_database_empty"]:
                 self.terminate_session_with_token(session.token)
                 self.ui.close_progress()
                 return self.text_format.repr_message("Sync cycle detected")
         # Detect the case where a user has copied the entire mnemosyne
         # directory before syncing.
         if session.client_info["machine_id"] == self.machine_id:
             self.terminate_session_with_token(session.token)
             self.ui.close_progress()
             return self.text_format.repr_message("same machine ids")
         # Create partnerships.
         session.database.create_if_needed_partnership_with(\
             client_info["machine_id"])
         session.database.merge_partners(client_info["partners"])
         # Note that we need to send 'user_id' to the client as well, so
         # that the client can make sure the 'user_id's (used to label the
         # anonymous uploaded logs) are consistent across machines.
         server_info = {"user_id": session.database.user_id(),
             "machine_id": self.machine_id,
             "program_name": self.program_name,
             "program_version": self.program_version,
             "database_version": session.database.version,
             "partners": session.database.partners(),
             "session_token": session.token,
             "supports_binary_transfer": \
                 self.supports_binary_transfer(session),
             "is_database_empty": session.database.is_empty()}
         # Signal if we need a sync reset after restoring from a backup.
         server_info["sync_reset_needed"] = \
             session.database.is_sync_reset_needed(\
             client_info["machine_id"])
         # Add optional program-specific information.
         server_info = \
             session.database.append_to_sync_partner_info(server_info)
         return self.text_format.repr_partner_info(server_info)\
                .encode("utf-8")
     except:
         # We need to be really thorough in our exception handling, so as
         # to always revert the database to its last backup if an error
         # occurs. It is important that this happens as soon as possible,
         # especially if this server is being run as a built-in server in a
         # thread in an SRS desktop application.
         # As mentioned before, the error handling should happen here, at
         # the lowest level, and not in e.g. 'wsgi_app'.
         return self.handle_error(session, traceback_string())