def handle(self, *args, **options): weekly_index = Weekly.objects.get() site = Site.objects.get() # Move weekly articles into the index for article in WeeklyArticle.objects.all(): print("moving article", article) Redirect.objects.create( old_path=Redirect.normalise_path(article.relative_url(site)), redirect_page=article, ) # Find a slug that isn't taken under the index old_slug = article.slug article.slug = find_available_slug(weekly_index, article.slug) if article.slug != old_slug: print("changed article slug", old_slug, "=>", article.slug) article.save(update_fields=['slug']) article.move(weekly_index, pos='last-child') # Convert editions into redirects to weekly index for edition in WeeklyEdition.objects.all(): print("deleting edition", edition) Redirect.objects.create( old_path=Redirect.normalise_path(edition.relative_url(site)), redirect_page=weekly_index, ) edition.delete()
def test_filtering_by_type(self): temp_redirect = Redirect.add_redirect("/from", "/to", False) perm_redirect = Redirect.add_redirect("/cat", "/dog", True) response = self.get(params={"is_permanent": "True"}) self.assertContains(response, perm_redirect.old_path) self.assertNotContains(response, temp_redirect.old_path)
def test_filtering_by_site(self): site = Site.objects.first() site_redirect = Redirect.add_redirect("/cat", "/dog") site_redirect.site = site site_redirect.save() nosite_redirect = Redirect.add_redirect("/from", "/to") response = self.get(params={"site": site.pk}) self.assertContains(response, site_redirect.old_path) self.assertNotContains(response, nosite_redirect.old_path)
def test_csv_export(self): Redirect.add_redirect("/from", "/to", False) response = self.get(params={"export": "csv"}) self.assertEqual(response.status_code, 200) csv_data = response.getvalue().decode().split("\n") csv_header = csv_data[0] csv_entries = csv_data[1:] csv_entries = csv_entries[:-1] # Drop empty last line self.assertEqual(csv_header, "From,Site,To,Type\r") self.assertEqual(len(csv_entries), 1) self.assertEqual(csv_entries[0], "/from,None,/to,temporary\r")
def test_xlsx_export(self): Redirect.add_redirect("/from", "/to", True) response = self.get(params={"export": "xlsx"}) self.assertEqual(response.status_code, 200) workbook_data = response.getvalue() worksheet = load_workbook(filename=BytesIO(workbook_data))["Sheet1"] cell_array = [[cell.value for cell in row] for row in worksheet.rows] self.assertEqual(cell_array[0], ["From", "Site", "To", "Type"]) self.assertEqual(len(cell_array), 2) self.assertEqual(cell_array[1], ["/from", "None", "/to", "permanent"])
def test_handling_of_existing_redirects(self): # the page we'll be triggering the change for here is... test_subject = self.event_index descendants = test_subject.get_descendants().live() # but before we do, let's add some redirects that we'll expect to conflict # with ones created by the signal handler redirect1 = Redirect.objects.create( old_path=Redirect.normalise_path(descendants.first().specific.url), site=self.site, redirect_link="/some-place", automatically_created=False, ) redirect2 = Redirect.objects.create( old_path=Redirect.normalise_path(descendants.last().specific.url), site=self.site, redirect_link="/some-other-place", automatically_created=True, ) self.trigger_page_slug_changed_signal(test_subject) # pre-existing manually-created redirects should be preserved from_db = Redirect.objects.get(id=redirect1.id) self.assertEqual( ( redirect1.old_path, redirect1.site_id, redirect1.is_permanent, redirect1.redirect_link, redirect1.redirect_page, ), ( from_db.old_path, from_db.site_id, from_db.is_permanent, from_db.redirect_link, from_db.redirect_page, ), ) # pre-existing automatically-created redirects should be replaced completely self.assertFalse(Redirect.objects.filter(pk=redirect2.pk).exists()) self.assertTrue( Redirect.objects.filter( old_path=redirect2.old_path, site_id=redirect2.site_id, ).exists())
def save(self, *args, **kwargs): super(PublicationPage, self).save(*args, **kwargs) old_path = '/%s' % self.slug redirect = Redirect.objects.filter(old_path=old_path).first() if not redirect: Redirect(old_path=old_path, redirect_page=self).save()
def handle(self, path, **options): pages = list(Page.objects.filter(url_path__startswith=path)) if not pages: self.stdout.write("No pages match the given URL path prefix") return self.stdout.write(f"This will create redirects for {len(pages)} pages. Continue? [y/n]") should_continue = input() if should_continue != 'y': self.stdout.write("Quitting.") return for page in pages: site_id, site_root, path = page.get_url_parts() self.stdout.write(f"Saving redirect '{path}' to '{page.title}'") Redirect.objects.update_or_create( old_path=Redirect.normalise_path(path), site_id=site_id, defaults={ 'redirect_page': page, } )
def read_and_save_data(self, file_contents, site): errors = ErrorList() try: book = xlrd.open_workbook(file_contents=file_contents.read()) except IOError: errors.append(_("Something went wrong while reading the file.")) return errors sheet = book.sheets()[0] with transaction.atomic(): for row_id in range(0, sheet.nrows): data = sheet.row_values(row_id) old_path, redirect_link = data if old_path and redirect_link: if old_path.startswith('/') and redirect_link.startswith('/'): # Based on the wagtail.contrib.redirects form validation # https://github.com/wagtail/wagtail/blob/master/wagtail/contrib/redirects/forms.py#L34 _old_path = Redirect.normalise_path(old_path) duplicates = Redirect.objects.filter(old_path=_old_path, site=site) if duplicates: errors.append( _( "Row: {} - Skipped import: the old path is " "a duplicate of an earlier record.".format(row_id + 1) ) ) else: Redirect.objects.create(old_path=old_path, redirect_link=redirect_link, site=site) else: errors.append( _("Row: {} - The old path and new path, must both start with /".format(row_id + 1)) ) else: errors.append(_("Row: {} - The old path and new path, must both be filled in.".format(row_id + 1))) return errors
def create_redirect_object_if_slug_changed(sender, **kwargs): instance = kwargs['instance'] # The main part is getting the old URL from which the redirect is required. # Wagtail keeps the record of every page change in terms of revisions. # This will help to keep track of every change made to page including # page slug. The next part is determining the revision is for draft or # published page. For example, an admin user start editing the page # (with slug /original) change Url (/original-changed) and save as draft. # On next edit, user again change the URL to something else # (/original-desired) and then publish the page. So, in this case, redirect # should be created from /original to /original-desired. Page object that # has has_unpublished_changes value True, is draft revision. Interestingly # when admin user edit a page, user is editing the page object created from # JSON and value is stored as JSON in revision. page_revisions = instance.revisions.order_by('-created_at', '-id') for revision in page_revisions: page_obj = revision.page.specific_class.from_json( revision.content_json) # The first revision's page object that has has_published_changes # value False is the last published Page. if not page_obj.has_unpublished_changes: # Only create redirect if slug change if instance.url != page_obj.url: old_path = Redirect.normalise_path(page_obj.url) Redirect.objects.update_or_create( old_path=old_path, defaults={'redirect_page': instance}) # Also create redirect objects for children of this Page create_redirect_objects_for_children(old_path, page_obj) break
def test_redirect_is_detected(self): redirect = Redirect.add_redirect("/random/", "https://wagtail.io") response = self.client.get(reverse("nextjs:redirect_by_path:detail"), {"html_path": "/random/"}) self.assertEqual(response.status_code, 200) data = response.json() self.assertEqual(data["destination"], "https://wagtail.io")
def create_redirect_objects_for_children(parent_old_slug, parent): if not parent.get_children(): return else: for child_page in parent.get_children(): old_path = Redirect.normalise_path(parent_old_slug + '/' + child_page.slug) Redirect.objects.update_or_create( old_path=old_path, defaults={'redirect_page': child_page}) create_redirect_objects_for_children(old_path, child_page)
def create_redirect_object_after_page_move(sender, **kwargs): if kwargs['url_path_before'] == kwargs['url_path_after']: return page_after = kwargs['instance'] parent_page_before_url = kwargs['parent_page_before'].get_url() page_before_url = Redirect.normalise_path(parent_page_before_url + page_after.slug) Redirect.objects.update_or_create(old_path=page_before_url, defaults={'redirect_page': page_after}) create_redirect_objects_for_children(page_before_url, page_after)
def after_move_page_hook(request, instance): old_path = Redirect.normalise_path(instance.url) instance.refresh_from_db() if supports_automatic_redirect(instance) and instance.live: Redirect.objects.update_or_create( old_path=old_path, defaults={ 'redirect_page': instance, } ) create_redirect_objects_for_children(old_path, instance)
def get_object(self): path = self.request.GET.get("html_path", None) if path == None: raise ValidationError({"html_path": "Missing value"}) if not path.startswith("/"): path = "/" + path path = Redirect.normalise_path(path) redirect = get_redirect(self.request, path) if not redirect: raise Http404 return redirect
def run(*args): if not args: logger.error("error. Use --script-args [PATH] to specify the " + "location of the redirects csv.") else: redirects_file = args[0] dupes = [] successes = 0 deletes = 0 with open(redirects_file, "r") as csv_file: redirect_list = csv.reader(csv_file, delimiter=',') for [from_url, to_id] in redirect_list: with transaction.atomic(): # If conflicting redirects exist for this from_url, # delete them existing_redirects = Redirect.objects.filter( old_path__iexact=Redirect.normalise_path(from_url)) if len(existing_redirects) > 0: dupes.append(from_url) num, _ = existing_redirects.delete() deletes += num logger.debug(f"Removed duplicate redirect: {from_url}") # Add the desired redirect page = Page.objects.get(id=to_id) Redirect.add_redirect(from_url, redirect_to=page, is_permanent=True) logger.info(f"Added redirect: {from_url} -> {page.title}") successes += 1 logger.info(f"Done! Added {successes} redirects") if len(dupes) > 0: logger.debug(f"Redirects already existed for these urls: {dupes}") logger.info(f"Replaced {deletes} redirects with updated ones")
def save(self, *args, **kwargs): old_path = '/%s' % self.uuid # using Redirect to enforce uuid uniqueness as using a unique field is prone to validation errors on page revisions existing_redirect = Redirect.objects.filter(old_path=old_path).first() if existing_redirect and existing_redirect.redirect_page.id == self.id: super(UUIDMixin, self).save(*args, **kwargs) else: self.uuid = uid() super(UUIDMixin, self).save(*args, **kwargs) old_path = '/%s' % self.uuid redirect = Redirect.objects.filter(old_path=old_path).first() if not redirect: Redirect(old_path=old_path, redirect_page=self).save() else: self.save(*args, **kwargs)
def create_redirect_objects_for_children(parent_old_slug, parent): if not parent.get_children(): return else: for child_page in parent.get_children().specific(): old_path = Redirect.normalise_path( parent_old_slug + '/' + child_page.slug) if supports_automatic_redirect(child_page) and child_page.live: Redirect.objects.update_or_create( old_path=old_path, defaults={ 'redirect_page': child_page } ) create_redirect_objects_for_children(old_path, child_page)
def handle(self, *args, **kwargs): for src, dest in NON_WAGTAIL_REDIRECTS: r = WebRedirect(source_path=src, destination_path=dest, is_permanent=True) r.normalise_paths() WebRedirect.objects.get_or_create( source_path=r.source_path, destination_path=r.destination_path, is_permanent=True, ) for old_path, dest_slug in WAGTAIL_REDIRECTS: try: page = Page.objects.get(slug=dest_slug) except Page.DoesNotExist: print("ERROR:", dest_slug) continue Redirect.objects.get_or_create( old_path=Redirect.normalise_path(old_path), defaults={"redirect_page": page, "is_permanent": True}, )
def clean(self): """ The unique_together condition on the model is ignored if site is None, so need to check for duplicates manually """ cleaned_data = super().clean() if cleaned_data.get('site') is None: old_path = cleaned_data.get('old_path') if old_path is None: # cleaned_data['old_path'] is empty because it has already failed validation, # so don't bother with our duplicate test return old_path = Redirect.normalise_path(old_path) duplicates = Redirect.objects.filter(old_path=old_path, site__isnull=True) if self.instance.pk: duplicates = duplicates.exclude(id=self.instance.pk) if duplicates: raise forms.ValidationError(_("A redirect with this path already exists."))
def _new_get_redirect(request, path): if hasattr(request, 'LANGUAGE_CODE'): # If this path has an i18n_patterns locale prefix, remove it. locale_prefix = f'/{request.LANGUAGE_CODE}/' if path.startswith(locale_prefix): path = path.replace(locale_prefix, '/', 1) # Then hand off processing to the original redirect logic. redirect = _original_get_redirect(request, path) # Wagtail currently does not forward query arguments, so for # any URL with stripped query arguments we make a new, on-the-fly # redirect with the same path/site bindings, but an updated link # with the query arguments restored. # # See https://github.com/wagtail/wagtail/issues/7339 for more details. if redirect and request.GET and "?" not in redirect.link: redirect = Redirect( old_path=redirect.old_path, site=redirect.site, redirect_link=f'{redirect.link}?{urlencode(request.GET)}' ) return redirect
def release_html(request, site_name): try: major_frontend_version = None site_setting = SiteSettings.objects.get(uid=site_name) current_release = get_latest_live_release(site_setting.site.pk) site_id = site_setting.site.id release_id = request.GET.get('id') if release_id: release = Release.objects.get(uuid=release_id) else: if 'is_preview' in request.GET: release = get_latest_release(site_id) else: if current_release: release = current_release else: old_site_setting = SiteSettings.objects.get( uid='{}old'.format(site_name)) release = get_latest_live_release(old_site_setting.site.pk) site_id = old_site_setting.site.id except ObjectDoesNotExist: return HttpResponse('Page Not Found', status=404) if getattr(request, 'path', None): # This redirection doesn't support multisite on different domain url = urlparse(request.get_full_path()) path = url.path[:-1] if url.path[-1] == '/' else url.path site_redirects = Redirect.get_for_site(site_id) wagtail_redirect = site_redirects.filter(old_path=path).first() if wagtail_redirect: if wagtail_redirect.redirect_page: url = url._replace( path=wagtail_redirect.redirect_page.specific.link_url) redirect_path = urlunparse(url) else: # Re-direct is to a link redirect_path = wagtail_redirect.redirect_link return redirect(redirect_path, permanent=wagtail_redirect.is_permanent) frontend_name = release.get_frontend_id_display() matchObj = re.match(r'V([0-9]+)\..* - .*', frontend_name.replace('\n', ''), re.I | re.M) if matchObj: try: major_frontend_version = int(matchObj.group(1)) except ValueError: pass if release: frontend_id = release.frontend_id uuid = release.uuid else: # In this sc frontend_id = FrontendVersion.get_current_version() uuid = 'current' host = request.META['HTTP_HOST'] if settings.CONTENT_STORE_ENDPOINT: content_store_endpoint = settings.CONTENT_STORE_ENDPOINT else: content_store_endpoint = get_protocol() + host + '/api' if major_frontend_version and major_frontend_version <= 1: # legacy to render frontend index.html before multisite have been implemented index = FrontendVersion.get_html_for_version(frontend_id) substituted_index = index.replace( "/static/css/", "/{}/version/css/{}/?file_name=".format(site_name, frontend_id)) substituted_index = substituted_index.replace( "/static/js/", "/{}/version/js/{}/?file_name=".format(site_name, frontend_id)) substituted_index = substituted_index.replace( "/manifest", "/{}/public/manifest".format(site_name)) substituted_index = substituted_index.replace( "/favicon", "/{}/public/{}/favicon".format(site_name, frontend_id)) substituted_index = substituted_index.replace( "/webtrends.min.js", "/{}/public/{}/webtrends.min.js".format(site_name, frontend_id)) host = request.META['HTTP_HOST'] if settings.CONTENT_STORE_ENDPOINT: content_store_endpoint = settings.CONTENT_STORE_ENDPOINT else: content_store_endpoint = get_protocol() + host + "/api" substituted_index = substituted_index.replace("%apiurl%", content_store_endpoint) substituted_index = substituted_index.replace("%releaseid%", uuid) substituted_index = substituted_index.replace( "%adobe_tracking_url%", settings.ADOBE_TRACKING_URL) http_response = HttpResponse(substituted_index) else: template = Template(FrontendVersion.get_html_for_version(frontend_id)) context = Context({ 'site_setting': site_setting, 'api_url': content_store_endpoint, 'release_id': uuid, 'public_url': '/{}/public/{}'.format(site_name, frontend_id), 'css_path': '/{}/version/css/{}/?file_name='.format(site_name, frontend_id), 'js_path': '/{}/version/js/{}/?file_name='.format(site_name, frontend_id), }) if settings.ENV == 'local': context['public_url'] = '/static' http_response = HttpResponse(template.render(context)) if release and release.content_status == 1: http_response['Cache-Control'] = 'max-age=900' return http_response
def test_listing_contains_redirect(self): redirect = Redirect.add_redirect("/from", "/to", False) response = self.get() self.assertEqual(response.status_code, 200) self.assertContains(response, redirect.old_path)
def setUp(self): redirect = Redirect(old_path='/test', redirect_link='/') redirect.save()