Esempio n. 1
0
def analyze_failed_registration_nodes():
    """ If we can just retry the archive, but we can only do that if the
    ORIGINAL node hasn't changed.
    """
    # Get the registrations that are messed up
    failed_registration_nodes = find_failed_registrations()

    # Build up a list of dictionaries with info about these failed nodes
    failed_registration_info = []
    for broken_registration in failed_registration_nodes:
        unacceptable_node_logs_after_date = list(
            broken_registration.registered_from.get_aggregate_logs_queryset(Auth(broken_registration.registered_from.creator))
            .filter(date__gt=broken_registration.registered_date)
            .exclude(action__in=fa.LOG_WHITELIST)
            .exclude(action__in=fa.LOG_GREYLIST)
            .values_list('action', flat=True)
        )

        # Does it have any addons?
        addon_list = [
            addon for addon in ADDONS_REQUESTED
            if broken_registration.registered_from.has_addon(addon)
            and addon not in {'osfstorage', 'wiki'}
        ]
        has_addons = bool(addon_list)

        # Any registrations succeeded after the stuck one?
        # Not sure why broken_registration.registered_from.registrations was always 0 locally...
        succeeded_registrations_after_failed = []
        for other_reg in Registration.objects.filter(
            registered_from=broken_registration.registered_from,
            registered_date__gt=broken_registration.registered_date
        ):
            if other_reg.sanction:
                if other_reg.sanction.is_approved:
                    succeeded_registrations_after_failed.append(other_reg._id)
            else:
                succeeded_registrations_after_failed.append(other_reg._id)

        can_be_reset = fa.verify(broken_registration)
        logger.info('Found broken registration {}'.format(broken_registration._id))
        failed_registration_info.append(
            {
                'registration': broken_registration._id,
                'registered_date': broken_registration.registered_date,
                'original_node': broken_registration.registered_from._id,
                'logs_on_original_after_registration_date': unacceptable_node_logs_after_date,
                'has_addons': has_addons,
                'addon_list': addon_list,
                'succeeded_registrations_after_failed': succeeded_registrations_after_failed,
                'can_be_reset': can_be_reset,
                'registered_from_public': broken_registration.registered_from.is_public,
            }
        )

    return failed_registration_info
def analyze_failed_registration_nodes():
    """ If we can just retry the archive, but we can only do that if the
    ORIGINAL node hasn't changed.
    """
    # Get the registrations that are messed up
    failed_registration_nodes = Registration.find_failed_registrations()

    # Build up a list of dictionaries with info about these failed nodes
    failed_registration_info = []
    for broken_registration in failed_registration_nodes:
        unacceptable_node_logs_after_date = list(
            broken_registration.registered_from.get_logs_queryset(Auth(broken_registration.registered_from.creator))
            .filter(date__gt=broken_registration.registered_date)
            .exclude(action__in=fa.LOG_WHITELIST)
            .exclude(action__in=fa.LOG_GREYLIST)
            .values_list('action', flat=True)
        )

        # Does it have any addons?
        addon_list = [
            addon for addon in ADDONS_REQUESTED
            if broken_registration.registered_from.has_addon(addon)
            and addon not in {'osfstorage', 'wiki'}
        ]
        has_addons = bool(addon_list)

        # Any registrations succeeded after the stuck one?
        # Not sure why broken_registration.registered_from.registrations was always 0 locally...
        succeeded_registrations_after_failed = []
        for other_reg in Registration.objects.filter(
            registered_from=broken_registration.registered_from,
            registered_date__gt=broken_registration.registered_date
        ):
            if other_reg.sanction:
                if other_reg.sanction.is_approved:
                    succeeded_registrations_after_failed.append(other_reg._id)
            else:
                succeeded_registrations_after_failed.append(other_reg._id)

        can_be_reset = fa.verify(broken_registration)
        logger.info('Found broken registration {}'.format(broken_registration._id))
        failed_registration_info.append(
            {
                'registration': broken_registration._id,
                'registered_date': broken_registration.registered_date,
                'original_node': broken_registration.registered_from._id,
                'logs_on_original_after_registration_date': unacceptable_node_logs_after_date,
                'has_addons': has_addons,
                'addon_list': addon_list,
                'succeeded_registrations_after_failed': succeeded_registrations_after_failed,
                'can_be_reset': can_be_reset,
                'registered_from_public': broken_registration.registered_from.is_public,
            }
        )

    return failed_registration_info
Esempio n. 3
0
    def post(self, request, *args, **kwargs):
        from osf.management.commands.force_archive import archive, verify
        stuck_reg = self.get_object()
        if verify(stuck_reg):
            try:
                archive(stuck_reg)
                messages.success(request, 'Registration archive processes has restarted')
            except Exception as exc:
                messages.error(request, 'This registration cannot be unstuck due to {} '
                                        'if the problem persists get a developer to fix it.'.format(exc.__class__.__name__))

        else:
            messages.error(request, 'This registration may not technically be stuck,'
                                    ' if the problem persists get a developer to fix it.')

        return redirect(reverse_node(self.kwargs.get('guid')))
Esempio n. 4
0
    def post(self, request, *args, **kwargs):
        from osf.management.commands.force_archive import archive, verify
        stuck_reg = self.get_object()
        if verify(stuck_reg):
            try:
                archive(stuck_reg)
                messages.success(request, 'Registration archive processes has restarted')
            except Exception as exc:
                messages.error(request, 'This registration cannot be unstuck due to {} '
                                        'if the problem persists get a developer to fix it.'.format(exc.__class__.__name__))

        else:
            messages.error(request, 'This registration may not technically be stuck,'
                                    ' if the problem persists get a developer to fix it.')

        return redirect(reverse_node(self.kwargs.get('guid')))
Esempio n. 5
0
    def post(self, request, *args, **kwargs):
        # Prevents circular imports that cause admin app to hang at startup
        from osf.management.commands.force_archive import archive, verify
        stuck_reg = self.get_object()
        if verify(stuck_reg):
            try:
                archive(stuck_reg)
                messages.success(
                    request, 'Registration archive processes has restarted')
            except Exception as exc:
                messages.error(
                    request,
                    f'This registration cannot be unstuck due to {exc.__class__.__name__} '
                    f'if the problem persists get a developer to fix it.')
        else:
            messages.error(
                request, 'This registration may not technically be stuck,'
                ' if the problem persists get a developer to fix it.')

        return redirect(self.get_success_url())
Esempio n. 6
0
def tarchive(reg_id):
    start_time = timezone.now()
    dst = Registration.load(reg_id)
    if not dst or not dst.archiving:
        raise Exception('Invalid registration _id')
    assert verify(dst), 'Unable to verify registration'
    target = dst.archive_job.get_target('github')
    if not target or target.done:
        raise Exception('Invalid archive job target')
    src = dst.registered_from
    ghns = src.get_addon('github')
    cli = github3.login(token=ghns.external_account.oauth_key)
    cli.set_client_id(github_settings.CLIENT_ID, github_settings.CLIENT_SECRET)
    repo = cli.repository(ghns.user, ghns.repo)
    logger.info('Downloading tarball of repository...')
    assert repo.archive('tarball', TAR_PATH)
    logger.info('Download complete.')
    with tarfile.open(TAR_PATH) as tf:
        logger.info('Extracting tarball to {} ...'.format(EXTRACTED_PATH))
        tf.extractall(EXTRACTED_PATH)
        logger.info('Extraction complete.')
    logger.info('Preparing node for upload...')
    if dst.files.exclude(type='osf.trashedfolder').filter(
            name=node_settings.archive_folder_name.replace('/', '-')).exists():
        dst.files.exclude(type='osf.trashedfolder').get(
            name=node_settings.archive_folder_name.replace('/', '-')).delete()
    logger.info('Preparing to upload...')
    dst_osfs = dst.get_addon('osfstorage')
    recursive_upload(reg,
                     EXTRACTED_PATH,
                     dst_osfs.get_root(),
                     name=dst_osfs.archive_folder_name)
    logger.info('Archive upload complete\nMarking target as archived...')
    complete_archive_target(dst, 'github')
    if reg.logs.filter(date__gte=start_time).exists():
        logger.info('Cleaning up logs...')
        reg.logs.filter(date__gte=start_time).update(should_hide=True)