def run_flush_extract_cache(opts, **kwargs): """ Flush the extraction cache. """ URLCache.flush() ExtractCache.flush() ThumbnailCache.flush() echo('Extraction caches flushed.', no_color=opts.no_color)
def run_flush_extract_cache(opts, **kwargs): """ Flush the extraction cache. """ from newslynx.models import URLCache, ExtractCache, ThumbnailCache URLCache.flush() ExtractCache.flush() ThumbnailCache.flush() log.info('Extraction caches flushed.')
import unittest import re from faker import Faker from random import choice from newslynx.client import API from newslynx.models import ExtractCache from newslynx.constants import CONTENT_ITEM_FACETS # flush the cache to ensure fresh results. ExtractCache.flush() fake = Faker() class TestContentAPI(unittest.TestCase): org = 1 api = API(org=1) def test_create_content_item_manual_extract(self): c = { 'url': 'https://projects.propublica.org/killing-the-colorado/story/wasting-water-out-west-use-it-or-lose-it', 'type': 'article', 'tag_ids': [13, 14] } c = self.api.content.create(extract=True, **c) assert (len(c['subject_tag_ids']) == 2) assert (len(c['authors']) == 1) assert (c['provenance'] == 'manual')
def flush_work_cache(): URLCache.flush() ExtractCache.flush() ThumbnailCache.flush()
import unittest import re from faker import Faker from random import choice from newslynx.client import API from newslynx.models import ExtractCache from newslynx.constants import CONTENT_ITEM_FACETS # flush the cache to ensure fresh results. ExtractCache.flush() fake = Faker() class TestContentAPI(unittest.TestCase): org = 1 api = API(org=1) def test_create_content_item_manual_extract(self): c = { 'url': 'https://projects.propublica.org/killing-the-colorado/story/wasting-water-out-west-use-it-or-lose-it', 'type': 'article', 'tag_ids': [13, 14] } c = self.api.content.create(extract=True, **c) assert(len(c['subject_tag_ids']) == 2) assert(len(c['authors']) == 1) assert(c['provenance'] == 'manual')