Esempio n. 1
0
    def construct_channel(self, **kwargs):
        """
        Create ChannelNode and build topic tree.
        """
        # create channel
        channel = self.get_channel()

        # TODO: This is the wrong license
        license = licenses.CC_BY_SALicense(copyright_holder="GITTA elml.org")
        logging.basicConfig(level=logging.INFO)

        with tempfile.TemporaryDirectory() as extract_path:
            imscp_dict = extract_from_zip(
                    'examples/gitta_ims.zip', license, extract_path)
            for topic_dict in imscp_dict['organizations']:
                topic_tree = make_topic_tree(license, topic_dict,
                        extract_path, scraper_class=DefaultScraper,
                        temp_dir=kwargs.get('temp_dir'))
                print('Adding topic tree to channel:', topic_tree)
                channel.add_child(topic_tree)

        print('--- metadata ---')
        import json
        print(json.dumps(imscp_dict['metadata'], indent=4))

        return channel
Esempio n. 2
0
    def construct_channel(self, **kwargs):
        """
        Create ChannelNode and build topic tree.
        """
        # create channel
        channel = self.get_channel()

        license = licenses.CC_BY_SALicense(copyright_holder="CeDeC")
        logging.basicConfig(level=logging.INFO)

        with tempfile.TemporaryDirectory() as extract_path:
            imscp_dict = extract_from_zip(os.path.join(script_dir, 'eventos.zip'), license,
                    extract_path)
            for topic_dict in imscp_dict['organizations']:
                topic_tree = make_topic_tree(license, topic_dict, extract_path)
                print('Adding topic tree to channel:', topic_tree)
                channel.add_child(topic_tree)

        return channel
Esempio n. 3
0
################################################################################
# Folder to store pdfs of images
DOCUMENT_DOWNLOAD_DIR = 'documents'
if not os.path.exists(DOCUMENT_DOWNLOAD_DIR):
    os.makedirs(DOCUMENT_DOWNLOAD_DIR)

# Main page collection brandfolder
ENGLISH_COLLECTION_URL = "https://brandfolder.com/digitalmedic/covid-19"
ENGLISH_ASSETS_URL = "https://brandfolder.com/api/v4/collections/{collection}/sections/{section}/assets?sort_by=position&order=ASC&search=&fast_jsonapi=true"
EXCLUDED_TOPIC_IDS = [262354, 261412]
FILE_STORAGE_URL = "https://brandfolder.com/api/v4/assets/{id}/attachments?fields=url,thumbnail_url"

# Multi-language content constants
SLIDESHOWS_URL = "https://brandfolder.com/digitalmedic/covid-19-multiple-languages"
SLIDESHOW_ASSETS_URL = "https://brandfolder.com/api/v4/collections/{collection}/sections/{section}/assets?sort_by=position&order=ASC&strict_search=false&fast_jsonapi=true"
LICENSE = licenses.CC_BY_SALicense(
    copyright_holder="Stanford Center for Health Education")

LANGUAGE_MAP = {
    'Afrikaans': 'af',
    'Arabic': 'ar',
    'English': 'en',
    'French': 'fr',
    'Hindi': 'hi',
    'isiXhosa': 'xh',
    'isiZulu': 'zul',
    'Kiswahili': 'sw',
    'Mandarin Chinese - simple': 'zh-CN',
    'Mandarin Chinese - Traditional': 'zh-Hant',
    'Portuguese': 'pt',
    'Setswana': 'tn',
    'Spanish': 'es',
sess = requests.Session()
cache = FileCache('.webcache')
forever_adapter = CacheControlAdapter(heuristic=CacheForeverHeuristic(),
                                      cache=cache)

sess.mount('http://', forever_adapter)
sess.mount('https://', forever_adapter)

ydl = youtube_dl.YoutubeDL({
    'no_warnings': True,
    'writesubtitles': True,
    'allsubtitles': True,
    'ignoreerrors': True,  # Skip over deleted videos in a playlist
})

LICENSE = licenses.CC_BY_SALicense(
    copyright_holder='Open Osmosis (open.osmosis.org)')

# Map between assessment topic name and YouTube playlist name
QUESTION_VIDEO_MAP = {
    "Genetics": "Genetics pathology",
    "Dermatology": "Dermatology pathology",
    "Heme/Onc": "Hematology pathology",
    "Neurology": "Neurological pathology",
    "Renal": "Renal pathology",
    "Cardiology": "Cardiovascular pathology",
    "GI": "Gastrointestinal pathology",
    "Endocrine": "Endocrine pathology",
    "Reproduction": "Reproductive pathology",
    "MSK": "Musculoskeletal pathology",
    "Pediatrics": "Pediatrics videos",
    "Pulmonology": "Respiratory pathology",
Esempio n. 5
0
from ricecooker.chefs import SushiChef
from ricecooker.classes import nodes, questions, licenses

from .khan import get_khan_topic_tree

LICENSE_MAPPING = {
    "CC BY":
    licenses.CC_BYLicense(copyright_holder="Khan Academy"),
    "CC BY-NC":
    licenses.CC_BY_NCLicense(copyright_holder="Khan Academy"),
    "CC BY-NC-ND":
    licenses.CC_BY_NC_NDLicense(copyright_holder="Khan Academy"),
    "CC BY-NC-SA (KA default)":
    licenses.CC_BY_NC_SALicense(copyright_holder="Khan Academy"),
    "CC BY-SA":
    licenses.CC_BY_SALicense(copyright_holder="Khan Academy"),
    "Non-commercial/non-Creative Commons (College Board)":
    licenses.SpecialPermissionsLicense(
        copyright_holder="Khan Academy",
        description="Non-commercial/non-Creative Commons (College Board)"),
    # "Standard Youtube": licenses.ALL_RIGHTS_RESERVED,
}

SLUG_BLACKLIST = ["new-and-noteworthy", "talks-and-interviews",
                  "coach-res"]  # not relevant
SLUG_BLACKLIST += ["cs", "towers-of-hanoi"]  # not (yet) compatible
# SLUG_BLACKLIST += ["cc-third-grade-math", "cc-fourth-grade-math", "cc-fifth-grade-math", "cc-sixth-grade-math",
#                    "cc-seventh-grade-math", "cc-eighth-grade-math"]  # common core
SLUG_BLACKLIST += [
    "MoMA", "getty-museum", "stanford-medicine", "crash-course1", "mit-k12",
    "hour-of-code", "metropolitan-museum", "bitcoin", "tate", "crash-course1",