Пример #1
0
def run_flush_extract_cache(opts, **kwargs):
    """
    Flush the extraction cache.
    """
    URLCache.flush()
    ExtractCache.flush()
    ThumbnailCache.flush()
    echo('Extraction caches flushed.', no_color=opts.no_color)
Пример #2
0
def run_flush_extract_cache(opts, **kwargs):
    """
    Flush the extraction cache.
    """
    from newslynx.models import URLCache, ExtractCache, ThumbnailCache

    URLCache.flush()
    ExtractCache.flush()
    ThumbnailCache.flush()
    log.info('Extraction caches flushed.')
Пример #3
0
def run_flush_extract_cache(opts, **kwargs):
    """
    Flush the extraction cache.
    """
    from newslynx.models import URLCache, ExtractCache, ThumbnailCache

    URLCache.flush()
    ExtractCache.flush()
    ThumbnailCache.flush()
    log.info('Extraction caches flushed.')
Пример #4
0
from psycopg2 import IntegrityError

from newslynx.core import gen_session
from newslynx.models import ExtractCache
from newslynx.models import (ContentItem, Tag, Recipe, Author)
from newslynx.models.util import (get_table_columns, fetch_by_id_or_field)
from newslynx.views.util import validate_content_item_types
from newslynx.exc import RequestError
from newslynx.tasks import ingest_util

extract_cache = ExtractCache()


def ingest(obj,
           org_id,
           url_fields=['body'],
           requires=['url', 'type'],
           extract=True,
           kill_session=True):
    """
    Ingest an Event.
    """

    # distinct session for this eventlet.
    session = gen_session()

    # check required fields
    ingest_util.check_requires(obj, requires, type='Content Item')

    # validate type
    validate_content_item_types(obj['type'])
Пример #5
0
import unittest
import re
from faker import Faker
from random import choice

from newslynx.client import API
from newslynx.models import ExtractCache
from newslynx.constants import CONTENT_ITEM_FACETS

# flush the cache to ensure fresh results.
ExtractCache.flush()

fake = Faker()


class TestContentAPI(unittest.TestCase):
    org = 1
    api = API(org=1)

    def test_create_content_item_manual_extract(self):
        c = {
            'url':
            'https://projects.propublica.org/killing-the-colorado/story/wasting-water-out-west-use-it-or-lose-it',
            'type': 'article',
            'tag_ids': [13, 14]
        }
        c = self.api.content.create(extract=True, **c)
        assert (len(c['subject_tag_ids']) == 2)
        assert (len(c['authors']) == 1)
        assert (c['provenance'] == 'manual')
Пример #6
0
def flush_work_cache():
    URLCache.flush()
    ExtractCache.flush()
    ThumbnailCache.flush()
Пример #7
0
import unittest
import re
from faker import Faker
from random import choice

from newslynx.client import API
from newslynx.models import ExtractCache
from newslynx.constants import CONTENT_ITEM_FACETS

# flush the cache to ensure fresh results.
ExtractCache.flush()


fake = Faker()


class TestContentAPI(unittest.TestCase):
    org = 1
    api = API(org=1)

    def test_create_content_item_manual_extract(self):
        c = {
            'url': 'https://projects.propublica.org/killing-the-colorado/story/wasting-water-out-west-use-it-or-lose-it',
            'type': 'article',
            'tag_ids': [13, 14]
        }
        c = self.api.content.create(extract=True, **c)
        assert(len(c['subject_tag_ids']) == 2)
        assert(len(c['authors']) == 1)
        assert(c['provenance'] == 'manual')