Exemplo n.º 1
0
def do_export_realm(realm, output_dir, threads=0):
    response = {} # type: Dict[str, Any]

    logging.info("Exporting realm configuration")
    export_realm_data(realm, response)
    logging.info("Exporting core realm data")
    export_with_admin_auth(realm, response)
    export_file = os.path.join(output_dir, "realm.json")
    with open(export_file, "w") as f:
        f.write(ujson.dumps(response, indent=4))

    logging.info("Exporting uploaded files and avatars")
    if not settings.LOCAL_UPLOADS_DIR:
        export_uploads(realm, output_dir)
    else:
        export_uploads_local(realm, output_dir)

    user_profile_ids = set(x["id"] for x in response['zerver_userprofile'] +
                           response['zerver_userprofile_crossrealm'])
    recipient_ids = set(x["id"] for x in response['zerver_recipient'])
    logging.info("Exporting messages")
    export_messages(realm, user_profile_ids, recipient_ids, output_dir=output_dir,
                    threads=threads)
    if threads > 0:
        # Start parallel jobs to export the UserMessage objects
        def run_job(shard):
            subprocess.call(["./manage.py", 'export_usermessage_batch', '--path',
                             output_dir, '--thread', shard])
            return 0
        for (status, job) in run_parallel(run_job, [str(x) for x in range(0, threads)], threads=threads):
            print("Shard %s finished, status %s" % (job, status))
            pass
    logging.info("Finished exporting %s" % (realm.domain))
Exemplo n.º 2
0
def transfer_emoji_to_s3(processes: int) -> None:
    def _transfer_emoji_to_s3(realm_emoji: RealmEmoji) -> int:
        if not realm_emoji.file_name or not realm_emoji.author:
            return 0  # nocoverage
        emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
            realm_id=realm_emoji.realm.id,
            emoji_file_name=realm_emoji.file_name
        )
        emoji_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", emoji_path) + ".original"
        try:
            with open(emoji_path, 'rb') as f:
                s3backend.upload_emoji_image(f, realm_emoji.file_name, realm_emoji.author)
                logging.info("Uploaded emoji file in path {}".format(emoji_path))
        except FileNotFoundError:  # nocoverage
            pass
        return 0

    realm_emojis = list(RealmEmoji.objects.filter())
    if processes == 1:
        for realm_emoji in realm_emojis:
            _transfer_emoji_to_s3(realm_emoji)
    else:  # nocoverage
        output = []
        connection.close()
        for status, job in run_parallel(_transfer_emoji_to_s3, realm_emojis, processes):
            output.append(job)
Exemplo n.º 3
0
def process_uploads(upload_list: List[ZerverFieldsT], upload_dir: str,
                    threads: int) -> List[ZerverFieldsT]:
    """
    This function gets the uploads and saves it in the realm's upload directory
    """
    def get_uploads(upload: List[str]) -> int:
        upload_url = upload[0]
        upload_path = upload[1]
        upload_path = os.path.join(upload_dir, upload_path)

        response = requests.get(upload_url, stream=True)
        os.makedirs(os.path.dirname(upload_path), exist_ok=True)
        with open(upload_path, 'wb') as upload_file:
            shutil.copyfileobj(response.raw, upload_file)
        return 0

    logging.info('######### GETTING ATTACHMENTS #########\n')
    logging.info('DOWNLOADING ATTACHMENTS .......\n')
    upload_url_list = []
    for upload in upload_list:
        upload_url = upload['path']
        upload_s3_path = upload['s3_path']
        upload_url_list.append([upload_url, upload_s3_path])
        upload['path'] = upload_s3_path

    # Run downloads parallely
    output = []
    for (status, job) in run_parallel(get_uploads,
                                      upload_url_list,
                                      threads=threads):
        output.append(job)

    logging.info('######### GETTING ATTACHMENTS FINISHED #########\n')
    return upload_list
Exemplo n.º 4
0
def run_parallel_wrapper(
        f: Callable[[ListJobData], None],
        full_items: List[ListJobData],
        threads: int = 6) -> Iterator[Tuple[int, List[ListJobData]]]:
    logging.info("Distributing %s items across %s threads", len(full_items),
                 threads)

    def wrapping_function(items: List[ListJobData]) -> int:
        count = 0
        for item in items:
            try:
                f(item)
            except Exception:
                logging.exception("Error processing item: %s",
                                  item,
                                  stack_info=True)
            count += 1
            if count % 1000 == 0:
                logging.info("A download thread finished %s items", count)
        return 0

    job_lists: List[List[ListJobData]] = [
        full_items[i::threads] for i in range(threads)
    ]
    return run_parallel(wrapping_function, job_lists, threads=threads)
Exemplo n.º 5
0
def process_uploads(upload_list: List[ZerverFieldsT], upload_dir: str,
                    threads: int) -> List[ZerverFieldsT]:
    """
    This function gets the uploads and saves it in the realm's upload directory
    """
    def get_uploads(upload: List[str]) -> int:
        upload_url = upload[0]
        upload_path = upload[1]
        upload_path = os.path.join(upload_dir, upload_path)

        response = requests.get(upload_url, stream=True)
        os.makedirs(os.path.dirname(upload_path), exist_ok=True)
        with open(upload_path, 'wb') as upload_file:
            shutil.copyfileobj(response.raw, upload_file)
        return 0

    logging.info('######### GETTING ATTACHMENTS #########\n')
    logging.info('DOWNLOADING ATTACHMENTS .......\n')
    upload_url_list = []
    for upload in upload_list:
        upload_url = upload['path']
        upload_s3_path = upload['s3_path']
        upload_url_list.append([upload_url, upload_s3_path])
        upload['path'] = upload_s3_path

    # Run downloads parallely
    output = []
    for (status, job) in run_parallel(get_uploads, upload_url_list, threads=threads):
        output.append(job)

    logging.info('######### GETTING ATTACHMENTS FINISHED #########\n')
    return upload_list
Exemplo n.º 6
0
def transfer_message_files_to_s3(processes: int) -> None:
    def _transfer_message_files_to_s3(attachment: Attachment) -> int:
        file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files",
                                 attachment.path_id)
        try:
            with open(file_path, 'rb') as f:
                guessed_type = guess_type(attachment.file_name)[0]
                upload_image_to_s3(s3backend.uploads_bucket,
                                   attachment.path_id, guessed_type,
                                   attachment.owner, f.read())
                logging.info("Uploaded message file in path %s", file_path)
        except FileNotFoundError:  # nocoverage
            pass
        return 0

    attachments = list(Attachment.objects.all())
    if processes == 1:
        for attachment in attachments:
            _transfer_message_files_to_s3(attachment)
    else:  # nocoverage
        output = []
        connection.close()
        for status, job in run_parallel(_transfer_message_files_to_s3,
                                        attachments, processes):
            output.append(job)
Exemplo n.º 7
0
def transfer_emoji_to_s3(processes: int) -> None:
    def _transfer_emoji_to_s3(realm_emoji: RealmEmoji) -> int:
        if not realm_emoji.file_name or not realm_emoji.author:
            return 0  # nocoverage
        emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
            realm_id=realm_emoji.realm.id,
            emoji_file_name=realm_emoji.file_name,
        )
        emoji_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars",
                                  emoji_path) + ".original"
        try:
            with open(emoji_path, 'rb') as f:
                s3backend.upload_emoji_image(f, realm_emoji.file_name,
                                             realm_emoji.author)
                logging.info("Uploaded emoji file in path %s", emoji_path)
        except FileNotFoundError:  # nocoverage
            pass
        return 0

    realm_emojis = list(RealmEmoji.objects.filter())
    if processes == 1:
        for realm_emoji in realm_emojis:
            _transfer_emoji_to_s3(realm_emoji)
    else:  # nocoverage
        output = []
        connection.close()
        for status, job in run_parallel(_transfer_emoji_to_s3, realm_emojis,
                                        processes):
            output.append(job)
Exemplo n.º 8
0
def process_avatars(avatar_list: List[ZerverFieldsT],
                    avatar_dir: str,
                    realm_id: int,
                    threads: int,
                    size_url_suffix: str = '') -> List[ZerverFieldsT]:
    """
    This function gets the avatar of the user and saves it in the
    user's avatar directory with both the extensions '.png' and '.original'
    Required parameters:

    1. avatar_list: List of avatars to be mapped in avatars records.json file
    2. avatar_dir: Folder where the downloaded avatars are saved
    3. realm_id: Realm ID.
    """
    def get_avatar(avatar_upload_list: List[str]) -> int:
        avatar_url = avatar_upload_list[0]
        image_path = avatar_upload_list[1]
        original_image_path = avatar_upload_list[2]
        response = requests.get(avatar_url + size_url_suffix, stream=True)
        with open(image_path, 'wb') as image_file:
            shutil.copyfileobj(response.raw, image_file)
        shutil.copy(image_path, original_image_path)
        return 0

    logging.info('######### GETTING AVATARS #########\n')
    logging.info('DOWNLOADING AVATARS .......\n')
    avatar_original_list = []
    avatar_upload_list = []
    for avatar in avatar_list:
        avatar_hash = user_avatar_path_from_ids(avatar['user_profile_id'],
                                                realm_id)
        avatar_url = avatar['path']
        avatar_original = dict(avatar)

        image_path = ('%s/%s.png' % (avatar_dir, avatar_hash))
        original_image_path = ('%s/%s.original' % (avatar_dir, avatar_hash))

        avatar_upload_list.append(
            [avatar_url, image_path, original_image_path])
        # We don't add the size field here in avatar's records.json,
        # since the metadata is not needed on the import end, and we
        # don't have it until we've downloaded the files anyway.
        avatar['path'] = image_path
        avatar['s3_path'] = image_path

        avatar_original['path'] = original_image_path
        avatar_original['s3_path'] = original_image_path
        avatar_original_list.append(avatar_original)

    # Run downloads parallely
    output = []
    for (status, job) in run_parallel(get_avatar,
                                      avatar_upload_list,
                                      threads=threads):
        output.append(job)

    logging.info('######### GETTING AVATARS FINISHED #########\n')
    return avatar_list + avatar_original_list
Exemplo n.º 9
0
def process_avatars(avatar_list: List[ZerverFieldsT], avatar_dir: str, realm_id: int,
                    threads: int, size_url_suffix: str='') -> List[ZerverFieldsT]:
    """
    This function gets the avatar of the user and saves it in the
    user's avatar directory with both the extensions '.png' and '.original'
    Required parameters:

    1. avatar_list: List of avatars to be mapped in avatars records.json file
    2. avatar_dir: Folder where the downloaded avatars are saved
    3. realm_id: Realm ID.

    We use this for Slack and Gitter conversions, where avatars need to be
    downloaded.  For simpler conversions see write_avatar_png.
    """

    def get_avatar(avatar_upload_list: List[str]) -> int:
        avatar_url = avatar_upload_list[0]

        image_path = os.path.join(avatar_dir, avatar_original_list[1])
        original_image_path = os.path.join(avatar_dir, avatar_original_list[2])

        response = requests.get(avatar_url + size_url_suffix, stream=True)
        with open(image_path, 'wb') as image_file:
            shutil.copyfileobj(response.raw, image_file)
        shutil.copy(image_path, original_image_path)
        return 0

    logging.info('######### GETTING AVATARS #########\n')
    logging.info('DOWNLOADING AVATARS .......\n')
    avatar_original_list = []
    avatar_upload_list = []
    for avatar in avatar_list:
        avatar_hash = user_avatar_path_from_ids(avatar['user_profile_id'], realm_id)
        avatar_url = avatar['path']
        avatar_original = dict(avatar)

        image_path = ('%s.png' % (avatar_hash))
        original_image_path = ('%s.original' % (avatar_hash))

        avatar_upload_list.append([avatar_url, image_path, original_image_path])
        # We don't add the size field here in avatar's records.json,
        # since the metadata is not needed on the import end, and we
        # don't have it until we've downloaded the files anyway.
        avatar['path'] = image_path
        avatar['s3_path'] = image_path

        avatar_original['path'] = original_image_path
        avatar_original['s3_path'] = original_image_path
        avatar_original_list.append(avatar_original)

    # Run downloads parallely
    output = []
    for (status, job) in run_parallel(get_avatar, avatar_upload_list, threads=threads):
        output.append(job)

    logging.info('######### GETTING AVATARS FINISHED #########\n')
    return avatar_list + avatar_original_list
Exemplo n.º 10
0
def launch_user_message_subprocesses(threads: int, output_dir: Path) -> None:
    logging.info('Launching %d PARALLEL subprocesses to export UserMessage rows' % (threads,))

    def run_job(shard: str) -> int:
        subprocess.call(["./manage.py", 'export_usermessage_batch', '--path',
                         str(output_dir), '--thread', shard])
        return 0

    for (status, job) in run_parallel(run_job,
                                      [str(x) for x in range(0, threads)],
                                      threads=threads):
        print("Shard %s finished, status %s" % (job, status))
Exemplo n.º 11
0
def launch_user_message_subprocesses(threads: int, output_dir: Path) -> None:
    logging.info('Launching %d PARALLEL subprocesses to export UserMessage rows' % (threads,))

    def run_job(shard: str) -> int:
        subprocess.call(["./manage.py", 'export_usermessage_batch', '--path',
                         str(output_dir), '--thread', shard])
        return 0

    for (status, job) in run_parallel(run_job,
                                      [str(x) for x in range(0, threads)],
                                      threads=threads):
        print("Shard %s finished, status %s" % (job, status))
Exemplo n.º 12
0
def process_emojis(zerver_realmemoji: List[ZerverFieldsT], emoji_dir: str,
                   emoji_url_map: ZerverFieldsT,
                   threads: int) -> List[ZerverFieldsT]:
    """
    This function downloads the custom emojis and saves in the output emoji folder.
    Required parameters:

    1. zerver_realmemoji: List of all RealmEmoji objects to be imported
    2. emoji_dir: Folder where the downloaded emojis are saved
    3. emoji_url_map: Maps emoji name to its url
    """
    def get_emojis(upload: List[str]) -> int:
        emoji_url = upload[0]
        emoji_path = upload[1]
        upload_emoji_path = os.path.join(emoji_dir, emoji_path)

        response = requests.get(emoji_url, stream=True)
        os.makedirs(os.path.dirname(upload_emoji_path), exist_ok=True)
        with open(upload_emoji_path, 'wb') as emoji_file:
            shutil.copyfileobj(response.raw, emoji_file)
        return 0

    emoji_records = []
    upload_emoji_list = []
    logging.info('######### GETTING EMOJIS #########\n')
    logging.info('DOWNLOADING EMOJIS .......\n')
    for emoji in zerver_realmemoji:
        emoji_url = emoji_url_map[emoji['name']]
        emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
            realm_id=emoji['realm'], emoji_file_name=emoji['name'])

        upload_emoji_list.append([emoji_url, emoji_path])

        emoji_record = dict(emoji)
        emoji_record['path'] = emoji_path
        emoji_record['s3_path'] = emoji_path
        emoji_record['realm_id'] = emoji_record['realm']
        emoji_record.pop('realm')

        emoji_records.append(emoji_record)

    # Run downloads parallely
    output = []
    for (status, job) in run_parallel(get_emojis,
                                      upload_emoji_list,
                                      threads=threads):
        output.append(job)

    logging.info('######### GETTING EMOJIS FINISHED #########\n')
    return emoji_records
Exemplo n.º 13
0
def process_avatars(avatar_list: List[ZerverFieldsT], avatar_dir: str,
                    realm_id: int, threads: int) -> List[ZerverFieldsT]:
    """
    This function gets the avatar of size 512 px and saves it in the
    user's avatar directory with both the extensions
    '.png' and '.original'
    """
    def get_avatar(avatar_upload_list: List[str]) -> int:
        # get avatar of size 512
        slack_avatar_url = avatar_upload_list[0]
        image_path = avatar_upload_list[1]
        original_image_path = avatar_upload_list[2]
        response = requests.get(slack_avatar_url + '-512', stream=True)
        with open(image_path, 'wb') as image_file:
            shutil.copyfileobj(response.raw, image_file)
        shutil.copy(image_path, original_image_path)
        return 0

    logging.info('######### GETTING AVATARS #########\n')
    logging.info('DOWNLOADING AVATARS .......\n')
    avatar_original_list = []
    avatar_upload_list = []
    for avatar in avatar_list:
        avatar_hash = user_avatar_path_from_ids(avatar['user_profile_id'],
                                                realm_id)
        slack_avatar_url = avatar['path']
        avatar_original = dict(avatar)

        image_path = ('%s/%s.png' % (avatar_dir, avatar_hash))
        original_image_path = ('%s/%s.original' % (avatar_dir, avatar_hash))

        avatar_upload_list.append(
            [slack_avatar_url, image_path, original_image_path])

        avatar['path'] = image_path
        avatar['s3_path'] = image_path

        avatar_original['path'] = original_image_path
        avatar_original['s3_path'] = original_image_path
        avatar_original_list.append(avatar_original)

    # Run downloads parallely
    output = []
    for (status, job) in run_parallel(get_avatar,
                                      avatar_upload_list,
                                      threads=threads):
        output.append(job)

    logging.info('######### GETTING AVATARS FINISHED #########\n')
    return avatar_list + avatar_original_list
Exemplo n.º 14
0
def process_avatars(avatar_list: List[ZerverFieldsT], avatar_dir: str,
                    realm_id: int, threads: int) -> List[ZerverFieldsT]:
    """
    This function gets the avatar of size 512 px and saves it in the
    user's avatar directory with both the extensions
    '.png' and '.original'
    """
    def get_avatar(avatar_upload_list: List[str]) -> int:
        # get avatar of size 512
        slack_avatar_url = avatar_upload_list[0]
        image_path = avatar_upload_list[1]
        original_image_path = avatar_upload_list[2]
        response = requests.get(slack_avatar_url + '-512', stream=True)
        with open(image_path, 'wb') as image_file:
            shutil.copyfileobj(response.raw, image_file)
        shutil.copy(image_path, original_image_path)
        return 0

    logging.info('######### GETTING AVATARS #########\n')
    logging.info('DOWNLOADING AVATARS .......\n')
    avatar_original_list = []
    avatar_upload_list = []
    for avatar in avatar_list:
        avatar_hash = user_avatar_path_from_ids(avatar['user_profile_id'], realm_id)
        slack_avatar_url = avatar['path']
        avatar_original = dict(avatar)

        image_path = ('%s/%s.png' % (avatar_dir, avatar_hash))
        original_image_path = ('%s/%s.original' % (avatar_dir, avatar_hash))

        avatar_upload_list.append([slack_avatar_url, image_path, original_image_path])

        # We don't add the size field here in avatar's records.json,
        # since the metadata is not needed on the import end, and we
        # don't have it until we've downloaded the files anyway.
        avatar['path'] = image_path
        avatar['s3_path'] = image_path

        avatar_original['path'] = original_image_path
        avatar_original['s3_path'] = original_image_path
        avatar_original_list.append(avatar_original)

    # Run downloads parallely
    output = []
    for (status, job) in run_parallel(get_avatar, avatar_upload_list, threads=threads):
        output.append(job)

    logging.info('######### GETTING AVATARS FINISHED #########\n')
    return avatar_list + avatar_original_list
Exemplo n.º 15
0
def process_emojis(zerver_realmemoji: List[ZerverFieldsT], emoji_dir: str,
                   emoji_url_map: ZerverFieldsT, threads: int) -> List[ZerverFieldsT]:
    """
    This function downloads the custom emojis and saves in the output emoji folder.
    Required parameters:

    1. zerver_realmemoji: List of all RealmEmoji objects to be imported
    2. emoji_dir: Folder where the downloaded emojis are saved
    3. emoji_url_map: Maps emoji name to its url
    """
    def get_emojis(upload: List[str]) -> int:
        emoji_url = upload[0]
        emoji_path = upload[1]
        upload_emoji_path = os.path.join(emoji_dir, emoji_path)

        response = requests.get(emoji_url, stream=True)
        os.makedirs(os.path.dirname(upload_emoji_path), exist_ok=True)
        with open(upload_emoji_path, 'wb') as emoji_file:
            shutil.copyfileobj(response.raw, emoji_file)
        return 0

    emoji_records = []
    upload_emoji_list = []
    logging.info('######### GETTING EMOJIS #########\n')
    logging.info('DOWNLOADING EMOJIS .......\n')
    for emoji in zerver_realmemoji:
        emoji_url = emoji_url_map[emoji['name']]
        emoji_path = RealmEmoji.PATH_ID_TEMPLATE.format(
            realm_id=emoji['realm'],
            emoji_file_name=emoji['name'])

        upload_emoji_list.append([emoji_url, emoji_path])

        emoji_record = dict(emoji)
        emoji_record['path'] = emoji_path
        emoji_record['s3_path'] = emoji_path
        emoji_record['realm_id'] = emoji_record['realm']
        emoji_record.pop('realm')

        emoji_records.append(emoji_record)

    # Run downloads parallely
    output = []
    for (status, job) in run_parallel(get_emojis, upload_emoji_list, threads=threads):
        output.append(job)

    logging.info('######### GETTING EMOJIS FINISHED #########\n')
    return emoji_records
Exemplo n.º 16
0
def run_parallel_wrapper(f: Callable[[ListJobData], None], full_items: List[ListJobData],
                         threads: int=6) -> Iterable[Tuple[int, List[ListJobData]]]:
    logging.info("Distributing %s items across %s threads" % (len(full_items), threads))

    def wrapping_function(items: List[ListJobData]) -> int:
        count = 0
        for item in items:
            try:
                f(item)
            except Exception:
                logging.info("Error processing item: %s" % (item,))
                traceback.print_exc()
            count += 1
            if count % 1000 == 0:
                logging.info("A download thread finished %s items" % (count,))
        return 0
    job_lists = [full_items[i::threads] for i in range(threads)]  # type: List[List[ListJobData]]
    return run_parallel(wrapping_function, job_lists, threads=threads)
Exemplo n.º 17
0
def run_parallel_wrapper(f: Callable[[ListJobData], None], full_items: List[ListJobData],
                         threads: int=6) -> Iterable[Tuple[int, List[ListJobData]]]:
    logging.info("Distributing %s items across %s threads" % (len(full_items), threads))

    def wrapping_function(items: List[ListJobData]) -> int:
        count = 0
        for item in items:
            try:
                f(item)
            except Exception:
                logging.info("Error processing item: %s" % (item,))
                traceback.print_exc()
            count += 1
            if count % 1000 == 0:
                logging.info("A download thread finished %s items" % (count,))
        return 0
    job_lists = [full_items[i::threads] for i in range(threads)]  # type: List[List[ListJobData]]
    return run_parallel(wrapping_function, job_lists, threads=threads)
Exemplo n.º 18
0
def do_export_realm(realm, output_dir, threads):
    # type: (Realm, Path, int) -> None
    response = {} # type: TableData

    # We need at least one thread running to export
    # UserMessage rows.  The management command should
    # enforce this for us.
    assert threads >= 1

    logging.info("Exporting realm configuration")
    export_realm_data(realm, response)
    logging.info("Exporting core realm data")
    export_with_admin_auth(realm, response)
    export_file = os.path.join(output_dir, "realm.json")
    with open(export_file, "w") as f:
        f.write(ujson.dumps(response, indent=4))

    logging.info("Exporting uploaded files and avatars")
    if not settings.LOCAL_UPLOADS_DIR:
        export_uploads(realm, output_dir)
    else:
        export_uploads_local(realm, output_dir)

    user_profile_ids = set(x["id"] for x in response['zerver_userprofile'] +
                           response['zerver_userprofile_crossrealm'])
    recipient_ids = set(x["id"] for x in response['zerver_recipient'])
    logging.info("Exporting messages")
    export_messages(realm, user_profile_ids, recipient_ids, output_dir=output_dir)

    # Start parallel jobs to export the UserMessage objects
    logging.info('Launching %d PARALLEL subprocesses to export UserMessage rows' % (threads,))
    def run_job(shard):
        # type: (str) -> int
        subprocess.call(["./manage.py", 'export_usermessage_batch', '--path',
                         str(output_dir), '--thread', shard])
        return 0

    for (status, job) in run_parallel(run_job,
                                      [str(x) for x in range(0, threads)],
                                      threads=threads):
        print("Shard %s finished, status %s" % (job, status))

    logging.info("Finished exporting %s" % (realm.domain))
Exemplo n.º 19
0
def transfer_avatars_to_s3(processes: int) -> None:
    def _transfer_avatar_to_s3(user: UserProfile) -> int:
        avatar_path = user_avatar_path(user)
        file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", avatar_path) + ".original"
        try:
            with open(file_path, 'rb') as f:
                s3backend.upload_avatar_image(f, user, user)
                logging.info("Uploaded avatar for %s in realm %s", user.id, user.realm.name)
        except FileNotFoundError:
            pass
        return 0

    users = list(UserProfile.objects.all())
    if processes == 1:
        for user in users:
            _transfer_avatar_to_s3(user)
    else:  # nocoverage
        output = []
        connection.close()
        for (status, job) in run_parallel(_transfer_avatar_to_s3, users, processes):
            output.append(job)
Exemplo n.º 20
0
def transfer_avatars_to_s3(processes: int) -> None:
    def _transfer_avatar_to_s3(user: UserProfile) -> int:
        avatar_path = user_avatar_path(user)
        file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", avatar_path) + ".original"
        try:
            with open(file_path, 'rb') as f:
                s3backend.upload_avatar_image(f, user, user)
                logging.info("Uploaded avatar for {} in realm {}".format(user.email, user.realm.name))
        except FileNotFoundError:
            pass
        return 0

    users = list(UserProfile.objects.all())
    if processes == 1:
        for user in users:
            _transfer_avatar_to_s3(user)
    else:  # nocoverage
        output = []
        connection.close()
        for (status, job) in run_parallel(_transfer_avatar_to_s3, users, processes):
            output.append(job)
Exemplo n.º 21
0
def transfer_message_files_to_s3(processes: int) -> None:
    def _transfer_message_files_to_s3(attachment: Attachment) -> int:
        file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", attachment.path_id)
        try:
            with open(file_path, 'rb') as f:
                bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
                guessed_type = guess_type(attachment.file_name)[0]
                upload_image_to_s3(bucket_name, attachment.path_id, guessed_type, attachment.owner, f.read())
                logging.info("Uploaded message file in path {}".format(file_path))
        except FileNotFoundError:  # nocoverage
            pass
        return 0

    attachments = list(Attachment.objects.all())
    if processes == 1:
        for attachment in attachments:
            _transfer_message_files_to_s3(attachment)
    else:  # nocoverage
        output = []
        connection.close()
        for status, job in run_parallel(_transfer_message_files_to_s3, attachments, processes):
            output.append(job)
Exemplo n.º 22
0
    sys.exit(0)

if options.forward_class_messages and not options.noshard:
    # Needed to get access to zephyr.lib.parallel
    sys.path.append("/home/zulip/zulip")
    if options.on_startup_command is not None:
        subprocess.call([options.on_startup_command])
    from zerver.lib.parallel import run_parallel
    print("Starting parallel zephyr class mirroring bot")
    jobs = list("0123456789abcdef")

    def run_job(shard: str) -> int:
        subprocess.call(args + ["--shard=%s" % (shard, )])
        return 0

    for (status, job) in run_parallel(run_job, jobs, threads=16):
        print("A mirroring shard died!")
    sys.exit(0)

backoff = RandomExponentialBackoff(timeout_success_equivalent=300)
while backoff.keep_going():
    print("Starting zephyr mirroring bot")
    try:
        subprocess.call(args)
    except Exception:
        traceback.print_exc()
    backoff.fail()

error_message = """
ERROR: The Zephyr mirroring bot is unable to continue mirroring Zephyrs.
This is often caused by failing to maintain unexpired Kerberos tickets
Exemplo n.º 23
0
    sys.exit(0)

if options.forward_class_messages and not options.noshard:
    # Needed to get access to zephyr.lib.parallel
    sys.path.append("/home/zulip/zulip")
    if options.on_startup_command is not None:
        subprocess.call([options.on_startup_command])
    from zerver.lib.parallel import run_parallel
    print("Starting parallel zephyr class mirroring bot")
    jobs = list("0123456789abcdef")

    def run_job(shard):
        # type: (str) -> int
        subprocess.call(args + ["--shard=%s" % (shard,)])
        return 0
    for (status, job) in run_parallel(run_job, jobs, threads=16):
        print("A mirroring shard died!")
        pass
    sys.exit(0)

backoff = RandomExponentialBackoff(timeout_success_equivalent=300)
while backoff.keep_going():
    print("Starting zephyr mirroring bot")
    try:
        subprocess.call(args)
    except Exception:
        traceback.print_exc()
    backoff.fail()


error_message = """