Пример #1
0
def edit(request, url_param="everipedia-blank-page-template"):
    # Fetch the article object from the URL parameter
    cleanedParamList = getTheArticleObject(url_param)
    articleObject = cleanedParamList[1]

    # Pull the article HTML from the cache
    hashObject = HashCache.objects.get(
        ipfs_hash=articleObject.ipfs_hash_current)

    # Check if the article is being submitted
    if (request.GET.get('submission') == '1'):
        # Get the POSTed article HTML
        innerHTMLBlock = request.POST.get("blurbHTML")

        # Clean up and canonicalize the submitted HTML
        innerHTMLBlock = entireArticleHTMLSanitizer(innerHTMLBlock)[5:]

        # Remove temporary HTML elements that were injected into the TinyMCE in order to make the page more interactive
        theSoup = BeautifulSoup(innerHTMLBlock, "html.parser")
        try:
            badClasses = [
                'add-row-btn',
                'button-wrap',
                'add-new-ibox',
                'add-heading-wrap',
            ]
            for badClass in badClasses:
                listOfBads = theSoup.findAll(class_=badClass)
                for item in listOfBads:
                    item.extract()
        except:
            pass

        # quickTitleNode = theSoup.findAll("h1")
        # print(unicode(quickTitleNode[0]))

        # Convert the BeautifulSoup object back into a string
        innerHTMLBlock = unicode(theSoup)

        # quickBody = theSoup.findAll(class_="blurb-wrap")
        # print(quickBody[0])
        # raise

        # Render the article HTML and its wrapper as a string and save it to the variable
        resultHTML = render_to_string('enterlink/blockchain_article_wrap.html',
                                      {
                                          'innerHTMLBlock': innerHTMLBlock,
                                      })

        # Connect to the IPFS daemon and add the article HTML
        api = ipfsapi.connect('127.0.0.1', 5001)
        ipfs_hash_new = api.add_str(resultHTML)
        print("THE OFFICIAL NEW HASH IS: %s" % ipfs_hash_new)

        # Cache the article HTML
        try:
            hashTable = HashCache.objects.create(
                ipfs_hash=ipfs_hash_new,
                timestamp=datetime.datetime.now(tz=pytz.utc),
                html_blob=resultHTML,
                articletable=articleObject)
        except:
            return HttpResponse("NO CHANGES DETECTED!")

        # Set some variables
        ipfs_hash_old = articleObject.ipfs_hash_current
        ipfs_hash_grandparent = articleObject.ipfs_hash_parent

        # Need to switch this later. MAKE SURE TO FIX BLURB COMPARE TOO
        # return JsonResponse({"newIPFS": ipfs_hash_new, "oldIPFS": ipfs_hash_old, "grandparentIPFS": ipfs_hash_grandparent})
        return JsonResponse({
            "newIPFS": ipfs_hash_new,
            "oldIPFS": ipfs_hash_old,
            "grandparentIPFS": ipfs_hash_grandparent
        })

    # Verify that submission was actually recorded on the EOS chain
    if (request.GET.get('verification') == '1'):
        # Get the IPFS hash
        ipfs_hash_new = request.GET.get('newIPFS')

        # Because the EOS get tables command does not allow string lookups, convert the IPFS hash to a 64-bit unsigned integer
        proposal_id = ipfs_to_uint64_trunc(ipfs_hash_new)
        proposalID = int(proposal_id)
        proposalIDPlusOne = proposalID + 1

        # Prepare the JSON for the get table API call
        jsonDict = {
            "scope": "eparticlectr",
            "code": "eparticlectr",
            "table": "propstbl",
            "key_type": "i64",
            "index_position": "3",
            "lower_bound": proposalID,
            "upper_bound": proposalIDPlusOne,
            "json": "true"
        }

        # Make the API request and parse the JSON into a variable
        # page = requests.post('https://mainnet.libertyblock.io:7777/v1/chain/get_table_rows', headers=REQUEST_HEADER, timeout=10, verify=False, json=jsonDict)
        page = requests.post(
            'https://nodes.get-scatter.com:443/v1/chain/get_table_rows',
            headers=REQUEST_HEADER,
            timeout=10,
            verify=False,
            json=jsonDict)

        json_data = json.loads(page.text)

        # Get the status of the proposal
        proposalStatus = json_data['rows'][0]['status']

        # Make sure the proposal is actually recorded on-chain
        try:
            if (proposalStatus != 0):
                # Possibly delete the IPFS entry to prevent spamming
                pass
        except:
            return JsonResponse(
                {
                    'status': 'false',
                    'message': "NO PROPOSAL FOUND"
                },
                status=500)

        # Parse some variables from the JSON
        proposer = json_data['rows'][0]['proposer']
        currentTime = json_data['rows'][0]['starttime']
        endTime = json_data['rows'][0]['endtime']

        # Get the cached article HTML and parse it
        hashTable = HashCache.objects.get(ipfs_hash=ipfs_hash_new)
        parsedDict = parseBlockchainHTML(hashTable.html_blob)

        # Set some variables
        ipfs_hash_old = articleObject.ipfs_hash_current
        ipfs_hash_grandparent = articleObject.ipfs_hash_parent

        # Update the articleObject cache with data from the article HTML file (from the cache)
        articleObject.ipfs_hash_parent = articleObject.ipfs_hash_current
        articleObject.ipfs_hash_current = ipfs_hash_new
        articleObject.blurb_snippet = parsedDict["BLURB"]
        articleObject.page_type = parsedDict["PAGEMETADATA"]["page_type"]
        articleObject.page_title = parsedDict["PAGETITLE"]
        articleObject.lastmod_timestamp = timezone.now()
        articleObject.is_removed = parsedDict["PAGEMETADATA"]["is_removed"]
        articleObject.is_removed_from_index = False
        articleObject.is_adult_content = parsedDict["PAGEMETADATA"][
            "is_adult_content"]
        articleObject.save()

        # Record the edit proposal internally. This should match all the proposals that are on-chain.
        EditProposal.objects.create(
            id=ipfs_to_uint64_trunc(ipfs_hash_new),
            proposed_article_hash=ipfs_hash_new,
            old_article_hash=ipfs_hash_old,
            grandparent_hash=ipfs_hash_grandparent,
            proposer=proposer,
            proposer_64t=encodeNameSwappedEndian(proposer),
            starttime=currentTime,
            endtime=endTime,
            status=0,
            article=articleObject)

        # Need to switch this later. MAKE SURE TO FIX BLURB COMPARE TOO
        # return JsonResponse({"newIPFS": ipfs_hash_new, "oldIPFS": ipfs_hash_old, "grandparentIPFS": ipfs_hash_grandparent})
        return JsonResponse({
            "newIPFS": ipfs_hash_new,
            "oldIPFS": ipfs_hash_old,
            "grandparentIPFS": ipfs_hash_grandparent
        })

    # Temporary
    # if(articleObject.id < 18682257):
    #     return HttpResponseRedirect("/editing-disabled/")
    if 'draft' in request.GET:
        account_name = request.GET.get('draft')
        draft = SavedDraft.objects.get(article_slug=articleObject.slug,
                                       account_name=account_name)
        hashObject.html_blob = draft.html_blob

    # Update the Django templating dictionary for the edit page
    contextDictionary = {}
    contextDictionary.update({"ARTICLE_BLOB": hashObject.html_blob})
    contextDictionary.update({"ARTICLE_NAME": articleObject.page_title})
    contextDictionary.update({"ARTICLE_SLUG": articleObject.slug})
    contextDictionary.update({"ARTICLE_SLUG_ALT": articleObject.slug_alt})
    contextDictionary.update({"ARTICLE_IS_REMOVED": articleObject.is_removed})
    contextDictionary.update({"ARTICLE_PHOTO_URL": articleObject.photo_url})
    contextDictionary.update(
        {"ARTICLE_THUMB_URL": articleObject.photo_thumb_url})
    contextDictionary.update({"ARTICLE_PAGE_TYPE": articleObject.page_type})
    # contextDictionary.update({"ARTICLE_PAGEVIEWS": articleObject.pageviews})
    contextDictionary.update({"newlinkfileform": NewlinkFileForm()})
    contextDictionary.update({"linkform": LinkForm()})
    contextDictionary.update({
        "pagemetaform":
        PageMetaForm(
            initial={
                'page_type': articleObject.page_type,
                'sub_page_type': articleObject.page_sub_type,
                'is_removed': articleObject.is_removed,
                'is_adult_content': articleObject.is_adult_content
            })
    })

    # Return the HTML for the editing page
    return render(request, 'enterlink/edit.html', contextDictionary)
Пример #2
0
def AJAX_Hoverlink(request, url_param):
    # Get the article from the url parameter
    cleanedParamList = getTheArticleObject(url_param)
    articleObject = cleanedParamList[1]

    # Fail if the article has been removed
    try:
        if articleObject.is_removed == True:
            return HttpResponseRedirect('/error/')
    except:
        pass

    # Determine whether to use the lightbox (if desktop or tablet) or the hover bubble (if mobile AMP)
    useLightBox = False
    if request.GET.get('lightbox') == "1":
        useLightBox = True

    # Determine which area to parse
    try:
        mediaType = request.GET.get('media_type')
    except:
        mediaType = ""


    # Get the link to use
    try:
        linkURL = request.GET['target_url']
        linkURL = urllib.unquote_plus(linkURL)
    except:
        linkURL = ""
        print("LinkURL not found")

    # Get the cached HTML for the article
    cacheObject = HashCache.objects.get(ipfs_hash=articleObject.ipfs_hash_current)

    # Parse the HTML
    resultDictionary = parseBlockchainHTML(cacheObject.html_blob)

    # Check for YouTube
    youtubeResult = getYouTubeIdIfPresent(linkURL)

    # Get all the citations from the parsed article and loop through them until the requested one is found
    # When found, save the JSON for that citation
    citationObject = ""
    for citation in resultDictionary["CITATION_OBJECTS"]:
        if citation["url"] == linkURL:
            citationObject = citation
            break
        if youtubeResult:
            if youtubeResult in citation["url"]:
                citationObject = citation
                break

    # Fill the Django template context with relevant data from both the article...
    contextDictionary = {}
    contextDictionary.update({"ARTICLE_NAME": articleObject.page_title})
    contextDictionary.update({"ARTICLE_SLUG": articleObject.slug})
    contextDictionary.update({"ARTICLE_SLUG_ALT": articleObject.slug_alt})
    contextDictionary.update({"ARTICLE_IS_REMOVED": articleObject.is_removed})
    contextDictionary.update({"ARTICLE_PHOTO_URL": articleObject.photo_url})
    contextDictionary.update({"ARTICLE_THUMB_URL": articleObject.photo_thumb_url})
    contextDictionary.update({"ARTICLE_PAGE_TYPE": articleObject.page_type})
    contextDictionary.update({"BLURB_SNIPPET": articleObject.blurb_snippet})

    # ... and the citation JSON
    try:
        # Try the main citations first
        contextDictionary.update({"CITATION_DESCRIPTION": citationObject["description"]})
        contextDictionary.update({"CITATION_TIMESTAMP": citationObject["timestamp"]})
        contextDictionary.update({"CITATION_URL": citationObject["url"]})
        contextDictionary.update({"CITATION_THUMB": citationObject["thumb"]})
        contextDictionary.update({"CITATION_MIME": citationObject["mime"]})
        contextDictionary.update({"CITATION_CATEGORY": citationObject["category"]})
        contextDictionary.update({"CITATION_YOUTUBE_ID": youtubeResult})
    except:
        # Otherwise try the media ones
        mediaObject = ""
        for mediaItem in resultDictionary["MEDIA_OBJECTS"]:
            # print(mediaItem)
            # print(linkURL)
            if mediaItem["url"] == linkURL or mediaItem["thumb"] == linkURL:
                mediaObject = mediaItem
                break
            if youtubeResult:
                if youtubeResult in mediaItem["url"]:
                    mediaObject = mediaItem
                    break
        contextDictionary.update({"CITATION_DESCRIPTION": mediaObject["caption"]})
        contextDictionary.update({"CITATION_TIMESTAMP": mediaObject["timestamp"]})
        contextDictionary.update({"CITATION_URL": mediaObject["url"]})
        contextDictionary.update({"CITATION_THUMB": mediaObject["thumb"]})
        contextDictionary.update({"CITATION_MIME": mediaObject["mime"]})
        contextDictionary.update({"CITATION_CATEGORY": mediaObject["class"]})
        contextDictionary.update({"CITATION_YOUTUBE_ID": youtubeResult})

    # Render the hoverlink bubble appropriately
    if (useLightBox):
        # Desktop and Tablet
        return render(request, "enterlink/hoverlink_iframe_blockchain.html", contextDictionary)
    else:
        # Mobile
        return render(request, 'enterlink/hoverlink_ajax_blockchain.html', contextDictionary)
Пример #3
0
def template_handler_blockchain(request, url_param='url_param'):
    # Handle blank requests
    if ("/None" in url_param):
        return HttpResponse("")

    # Get the article object from the url parameter
    cleanedParamList = getTheArticleObject(url_param)
    articleObject = cleanedParamList[1]

    if(articleObject.is_removed):
        return HttpResponseRedirect('/error/')

    # See if the request is from mobile or tablet
    useMobile = False
    if (request.mobile or '/amp/' in request.path) and not request.tablet :
        useMobile = True

    # Get the last activity time
    lastActivityTime = articleObject.lastmod_timestamp

    # Get the relevant cache depending on the request type
    if useMobile:
        styledHTMLCacheTime = articleObject.mobile_cache_timestamp
    else:
        styledHTMLCacheTime = articleObject.desktop_cache_timestamp

    # Assume an old cache time if it is NULL, since comparing a datetime with NULL gives errors
    if (styledHTMLCacheTime is None):
        styledHTMLCacheTime = dateTimeImportClass(2000, 1, 1, 1, 1, 1, tzinfo=pytz.timezone('UTC'))
    useStyledHTMLCache = (lastActivityTime <= styledHTMLCacheTime)

    # Get the hash cache time
    try:
        hashCacheHTMLTime = HashCache.objects.get(ipfs_hash=articleObject.ipfs_hash_current).timestamp

        # Assume an old cache time if it is NULL, since comparing a datetime with NULL gives errors
        if (hashCacheHTMLTime is None):
            hashCacheHTMLTime = dateTimeImportClass(2000, 1, 1, 1, 1, 1, tzinfo=pytz.timezone('UTC'))
    except:
        hashCacheHTMLTime = dateTimeImportClass(2000, 1, 1, 1, 1, 1, tzinfo=pytz.timezone('UTC'))

    # Detect the incoming language
    incomingLanguage = translation.get_language()

    # Determine whether to pull the cached CSS-stylized pages from Azure blob storage, or to generate new ones
    if useStyledHTMLCache == True:
        # Determing to pull either the mobile or desktop / tablet pages
        if useMobile:
            fetchURL = 'https://epcdndisk.blob.core.windows.net/mobile-template-blockchain/%s.html.gz' % (articleObject.ipfs_hash_current)
        else:
            fetchURL = 'https://epcdndisk.blob.core.windows.net/desktop-template-blockchain/%s.html.gz' % (articleObject.ipfs_hash_current)

        # Fetch the cached page
        response = urllib2.urlopen(fetchURL)

        # Read the page and unzip it
        responseNugget = response.read()
        content = gzip.GzipFile(fileobj=StringIO.StringIO(responseNugget)).read()

        # Return the HTML
        response = HttpResponse(content)
        return response
    else:
        # Set the user as anonymous
        request.user = AnonymousUser()

        # Fetch the language that the article was created in
        renderLang = articleObject.page_lang

        # Set the session language to the page language. This is temporary so the page is generated with the correct language
        request = nonPOSTSetLanguage(request, renderLang)

        # Fetch the site notice
        try:
            # Try the language specific version
            theNotice = SiteNotice.objects.get(id=2, lang=renderLang)
        except:
            # Default to English if that fails
            theNotice = SiteNotice.objects.get(id=2, lang="en")



        # Get the raw, unstyled HTML for the page
        unstyledHTML = get_article_raw_html(ipfs_hash=articleObject.ipfs_hash_current, lastactivity=lastActivityTime, articletable=articleObject )
        if useMobile:
            # Parse the HTML for relevant data
            newDictionary = parseBlockchainHTML(unstyledHTML, useAMP=True)

            # Set some variables
            newDictionary.update({"CURRENT_IPFS_HASH": articleObject.ipfs_hash_current})
            newDictionary.update({"LANG_OVERRIDE": renderLang})
            newDictionary.update({'SITE_NOTICE': theNotice.mobile_html})
            newDictionary.update({'IS_REMOVED_FROM_INDEX': articleObject.is_removed_from_index})
            newDictionary.update({'CURRENT_PAGEVIEWS': articleObject.pageviews})

            # Generate the CSS-styled page from the template, filling in variables parsed from the unstyled/raw HTML
            styledHTMLResponse =  render(request, 'enterlink/template_blockchain_styled_amp.html', newDictionary)

            # Store the CSS-styled mobile page to Azure blob
            refreshTemplateCacheBlockchain(articleObject.ipfs_hash_current, styledHTMLResponse.content, 'mobile-template-blockchain')

            # Set the cache timestamp
            articleObject.mobile_cache_timestamp = timezone.now()
            articleObject.save()
        else:
            # Parse the HTML for relevant data
            newDictionary = parseBlockchainHTML(unstyledHTML, useAMP=False)
            newDictionary.update({'SITE_NOTICE': theNotice.desktop_html})

            # Set some variables
            newDictionary.update({"CURRENT_IPFS_HASH": articleObject.ipfs_hash_current})
            newDictionary.update({"LANG_OVERRIDE": renderLang})
            newDictionary.update({'IS_REMOVED_FROM_INDEX': articleObject.is_removed_from_index})
            newDictionary.update({'CURRENT_PAGEVIEWS': articleObject.pageviews})
            newDictionary.update({'isTemplatePage': True})

            # Generate the CSS-styled page from the template, filling in variables parsed from the unstyled/raw HTML
            styledHTMLResponse = render(request, 'enterlink/template_blockchain_styled_desktop.html', newDictionary)

            # Store the CSS-styled desktop/tablet page to Azure blob
            refreshTemplateCacheBlockchain(articleObject.ipfs_hash_current, styledHTMLResponse.content, 'desktop-template-blockchain')

            # Set the cache timestamp
            articleObject.desktop_cache_timestamp = timezone.now()
            articleObject.save()

        # Construct a blurb snippet
        miniBlurb = blurbSplitter(newDictionary["BLURB"], truncateLimit=2048, miniblurb=True)[0]
        miniBlurb = whiteSpaceStripper(miniBlurb)

        # Update the article object with info from the HTML
        ArticleTable.objects.filter(slug__iexact=articleObject.slug).update(
            page_title=newDictionary["PAGETITLE"],
            blurb_snippet=miniBlurb,
            photo_url=newDictionary["PHOTOOBJECT"]["url"],
            photo_thumb_url=newDictionary["PHOTOOBJECT"]["thumb"],
            page_type=newDictionary["PAGEMETADATA"]["page_type"],
            page_sub_type=newDictionary["PAGEMETADATA"]["sub_page_type"],
            is_removed=newDictionary["PAGEMETADATA"]["is_removed"],
            is_removed_from_index=newDictionary["PAGEMETADATA"]["is_indexed"],
            bing_index_override=newDictionary["PAGEMETADATA"]["bing_index_override"],
            is_adult_content=newDictionary["PAGEMETADATA"]["is_adult_content"]
        )

        # Set the pageviews
        if articleObject.pageviews == None:
            articleObject.pageviews = newDictionary["PAGEMETADATA"]["pageviews"]
            articleObject.save()

        # Set the language back to what it was before
        nonPOSTSetLanguage(request, incomingLanguage)

        # Return the HTML
        return styledHTMLResponse
Пример #4
0
def edit(request, url_param="everipedia-blank-page-template", lang_param=""):
    MERGED_FROM_HASH = ""

    # Pull the article HTML from the cache
    if "from_hash" in request.GET:
        fromHash = request.GET.get("from_hash")
        toHash = request.GET.get("to_hash")

        if toHash == "":
            return HttpResponseRedirect(u"/wiki/lang_%s/%s/edit/" %
                                        (lang_param, url_param))

        MERGED_FROM_HASH = fromHash

        # Fetch the article object from the URL parameter
        cleanedParamList = getTheArticleObject(toHash)
        articleObject = cleanedParamList[1]

        if articleObject.slug != url_param and articleObject.slug_alt != url_param:
            return HttpResponseRedirect(
                u"/wiki/lang_%s/%s/edit/?from_hash=%s&to_hash=%s" %
                (articleObject.page_lang, articleObject.slug, fromHash,
                 toHash))

        blobHTML = merge_page(fromHash, toHash)

    else:
        # Fetch the article object from the URL parameter
        cleanedParamList = getTheArticleObject(url_param,
                                               passedLang=lang_param)
        articleObject = cleanedParamList[1]
        hashObject = HashCache.objects.get(
            ipfs_hash=articleObject.ipfs_hash_current)
        blobHTML = hashObject.html_blob

    # Check if the article is being submitted
    if (request.GET.get('submission') == '1'):
        # Get the POSTed article HTML
        innerHTMLBlock = request.POST.get("blurbHTML")

        # Clean up and canonicalize the submitted HTML.
        innerHTMLBlock = entireArticleHTMLSanitizer(innerHTMLBlock)

        # Remove temporary HTML elements that were injected into the TinyMCE in order to make the page more interactive
        theSoup = BeautifulSoup(innerHTMLBlock, "html.parser")
        try:
            badClasses = [
                'add-row-btn',
                'button-wrap',
                'add-new-ibox',
                'add-heading-wrap',
            ]
            for badClass in badClasses:
                listOfBads = theSoup.findAll(class_=badClass)
                for item in listOfBads:
                    item.extract()
        except:
            pass

        # Update the timestamp
        theTimeStamp = unicode(datetime.datetime.now(tz=pytz.utc))
        modTimeParent = theSoup.find_all("tr",
                                         attrs={"data-key": "last_modified"})
        modTimeTds = modTimeParent[0].find_all("td")
        modTimeTds[1].string = theTimeStamp

        # quickTitleNode = theSoup.findAll("h1")
        # print(unicode(quickTitleNode[0]))

        # Convert the BeautifulSoup object back into a string
        innerHTMLBlock = unicode(theSoup)

        # quickBody = theSoup.findAll(class_="blurb-wrap")
        # print(quickBody[0])
        # raise

        # Render the article HTML and its wrapper as a string and save it to the variable
        resultHTML = render_to_string('enterlink/blockchain_article_wrap.html',
                                      {
                                          'innerHTMLBlock': innerHTMLBlock,
                                      })

        # Connect to the IPFS daemon and add the article HTML
        api = ipfsapi.connect('127.0.0.1', 5001)
        ipfs_hash_new = api.add_str(resultHTML)
        print("THE OFFICIAL NEW HASH IS: %s" % ipfs_hash_new)

        # Cache the article HTML
        try:
            hashTable = HashCache.objects.create(ipfs_hash=ipfs_hash_new,
                                                 timestamp=theTimeStamp,
                                                 html_blob=resultHTML,
                                                 articletable=articleObject)
        except:
            return HttpResponse("NO CHANGES DETECTED!")

        # Set some variables
        ipfs_hash_old = articleObject.ipfs_hash_current
        ipfs_hash_grandparent = articleObject.ipfs_hash_parent

        # Need to switch this later. MAKE SURE TO FIX BLURB COMPARE TOO
        # return JsonResponse({"newIPFS": ipfs_hash_new, "oldIPFS": ipfs_hash_old, "grandparentIPFS": ipfs_hash_grandparent})
        return JsonResponse({
            "newIPFS": ipfs_hash_new,
            "oldIPFS": ipfs_hash_old,
            "grandparentIPFS": ipfs_hash_grandparent
        })

    # Verify that submission was actually recorded on the EOS chain
    if (request.GET.get('verification') == '1'):

        # Get the IPFS hash
        ipfs_hash_new = request.GET.get('newIPFS')

        MERGED_FROM_HASH = request.GET.get('merged_from_hash')
        if MERGED_FROM_HASH != "":
            mergeSourceArticle = ArticleTable.objects.get(
                ipfs_hash_current=MERGED_FROM_HASH)
            mergeSourceArticle.is_removed = 1
            mergeSourceArticle.redirect_page_id = articleObject.id
            mergeSourceArticle.save()
            # print("BEEEE %s" % MERGED_FROM_HASH)

        # Because the EOS get tables command does not allow string lookups, convert the IPFS hash to a 64-bit unsigned integer
        proposal_id = ipfs_to_uint64_trunc(ipfs_hash_new)
        proposalID = int(proposal_id)
        proposalIDPlusOne = proposalID + 1

        # This errors out more often than it prevents bad submissions so I've commented it out - Kedar

        ## Prepare the JSON for the get table API call
        #jsonDict = {"scope": "eparticlectr", "code": "eparticlectr", "table": "propstbl", "key_type": "i64",
        #            "index_position": "3", "lower_bound": proposalID, "upper_bound": proposalIDPlusOne, "json": "true"}

        ## Make the API request and parse the JSON into a variable
        #count = 0
        #while count < 5:
        #    # page = requests.post('https://mainnet.libertyblock.io:7777/v1/chain/get_table_rows', headers=REQUEST_HEADER, timeout=10, verify=False, json=jsonDict)
        #    page = requests.post('https://proxy.eosnode.tools/v1/chain/get_table_rows', headers=REQUEST_HEADER, timeout=10, verify=False, json=jsonDict)
        #    if 200 <= page.status_code <= 299:
        #        json_data = json.loads(page.text)
        #        break
        #    else:
        #        # Sleep for 1sec and try again
        #        print("EOS API hit failed. Trying again in 500ms.")
        #        time.sleep(1)
        #        count += 1

        ## Get the status of the proposal
        #proposalStatus = json_data['rows'][0]['status']

        ## Make sure the proposal is actually recorded on-chain
        #try:
        #    if (proposalStatus != 0):
        #        # Possibly delete the IPFS entry to prevent spamming
        #        pass
        #except:
        #    return JsonResponse({'status': 'false', 'message': "NO PROPOSAL FOUND"}, status=500)

        # Parse some variables from the JSON
        proposer = request.GET.get('proposer')
        currentTime = int(time.time())
        endTime = currentTime + 6 * 3600

        # Get the cached article HTML and parse it
        hashTable = HashCache.objects.get(ipfs_hash=ipfs_hash_new)
        parsedDict = parseBlockchainHTML(hashTable.html_blob,
                                         articleObj=articleObject)

        # Set some variables
        ipfs_hash_old = articleObject.ipfs_hash_current
        ipfs_hash_grandparent = articleObject.ipfs_hash_parent

        miniBlurb = blurbSplitter(parsedDict["BLURB"], 2048,
                                  minimizeHTML=True)[0]
        miniBlurb = whiteSpaceStripper(miniBlurb)

        # Update the articleObject cache with data from the article HTML file (from the cache)
        articleObject.ipfs_hash_parent = articleObject.ipfs_hash_current
        articleObject.ipfs_hash_current = ipfs_hash_new
        articleObject.blurb_snippet = miniBlurb
        articleObject.page_type = (
            None if parsedDict["PAGEMETADATA"]["page_type"] == "None" else
            parsedDict["PAGEMETADATA"]["page_type"])
        articleObject.page_title = parsedDict["PAGETITLE"]
        articleObject.lastmod_timestamp = timezone.now()
        articleObject.is_removed = parsedDict["PAGEMETADATA"]["is_removed"]
        articleObject.is_removed_from_index = False
        articleObject.is_adult_content = parsedDict["PAGEMETADATA"][
            "is_adult_content"]
        articleObject.page_lang = parsedDict["PAGEMETADATA"]["page_lang"]
        articleObject.save()

        # Update the index
        updateElasticsearch(articleObject, u"PAGE_UPDATED_OR_CREATED")

        # Record the edit proposal internally. This should match all the proposals that are on-chain.
        EditProposal.objects.create(
            id=ipfs_to_uint64_trunc(ipfs_hash_new),
            proposed_article_hash=ipfs_hash_new,
            old_article_hash=ipfs_hash_old,
            grandparent_hash=ipfs_hash_grandparent,
            proposer=proposer,
            proposer_64t=encodeNameSwappedEndian(proposer),
            starttime=currentTime,
            endtime=endTime,
            status=0,
            article=articleObject)

        # Need to switch this later. MAKE SURE TO FIX BLURB COMPARE TOO
        # return JsonResponse({"newIPFS": ipfs_hash_new, "oldIPFS": ipfs_hash_old, "grandparentIPFS": ipfs_hash_grandparent})
        return JsonResponse({
            "newIPFS": ipfs_hash_new,
            "oldIPFS": ipfs_hash_old,
            "grandparentIPFS": ipfs_hash_grandparent
        })

    # Temporary
    # if(articleObject.id < 18682257):
    #     return HttpResponseRedirect("/editing-disabled/")
    if 'draft' in request.GET:
        account_name = request.GET.get('draft')
        draft = SavedDraft.objects.get(article_slug=articleObject.slug,
                                       account_name=account_name)
        hashObject.html_blob = draft.html_blob

    formattedArticleBlob = prettifyCorrector(blobHTML)
    formattedArticleBlob = editorStructureCorrector(blobHTML,
                                                    passedLang=lang_param)

    # print(formattedArticleBlob)

    # Update the Django templating dictionary for the edit page
    contextDictionary = {}
    contextDictionary.update({"ARTICLE_BLOB": formattedArticleBlob})
    contextDictionary.update({"ARTICLE_NAME": articleObject.page_title})
    contextDictionary.update({"PAGE_LANG": articleObject.page_lang})
    contextDictionary.update({"ARTICLE_SLUG": articleObject.slug})
    contextDictionary.update({"ARTICLE_SLUG_ALT": articleObject.slug_alt})
    contextDictionary.update({"ARTICLE_IS_REMOVED": articleObject.is_removed})
    contextDictionary.update({"ARTICLE_PHOTO_URL": articleObject.photo_url})
    contextDictionary.update(
        {"ARTICLE_THUMB_URL": articleObject.photo_thumb_url})
    contextDictionary.update({"ARTICLE_PAGE_TYPE": articleObject.page_type})
    contextDictionary.update({"ARTICLE_LANG": articleObject.page_lang})
    contextDictionary.update(
        {"ARTICLE_CURRENT_HASH": articleObject.ipfs_hash_current})
    contextDictionary.update({"MERGED_FROM_HASH": MERGED_FROM_HASH})
    contextDictionary.update({"newlinkfileform": NewlinkFileForm()})
    contextDictionary.update({"linkform": LinkForm()})
    contextDictionary.update({
        "pagemetaform":
        PageMetaForm(
            initial={
                'page_type': articleObject.page_type,
                'sub_page_type': articleObject.page_sub_type,
                'is_removed': articleObject.is_removed,
                'is_adult_content': articleObject.is_adult_content,
                'page_lang': articleObject.page_lang
            })
    })

    # Return the HTML for the editing page
    return render(request, 'enterlink/edit.html', contextDictionary)