def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) log.log('Removing migration files...') os.system( 'find . -path "*/migrations/*.py" -not -name "__init__.py" -delete && find . -path "*/migrations/*.pyc" -delete && git pull' ) call_command('makemigrations') call_command('migrate')
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) log.log( 'Building files with fragile data... Please be patient as this can take some time.' ) if pathlib.Path(SAVE_DIR).exists(): # Make sure it is empty as we don't want to save any old fragile data information [file.unlink() for file in pathlib.Path(SAVE_DIR).glob('*')] if not pathlib.Path(SAVE_DIR).exists(): pathlib.Path(SAVE_DIR).mkdir(parents=True) for cat in data: dataset = data[cat]['model'].objects.all() if not dataset.count(): log.warning(data[cat]['model']._meta.object_name + ' has no objects. No file will be written.') else: d = serializers.serialize('yaml', dataset, fields=data[cat]['fields'], use_natural_primary_keys=data[cat] ['use_natural_primary_keys'], use_natural_foreign_keys=data[cat] ['use_natural_foreign_keys']) with open(data[cat]['data_file'], 'w+') as f: f.write(d) log.log( f'Saved {data[cat]["model"]._meta.object_name} fragile data in: {data[cat]["data_file"]}' )
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) input = Input(path=__file__) data = AUTO_SNIPPETS for identifier, snippetdata in data.items(): snippet, created = Snippet.objects.get_or_create( identifier=identifier) if not created and not options.get('force'): choice = input.ask( f'Snippet `{identifier}` already exists. Update with new definition? [y/N]' ) if choice.lower() != 'y': continue Snippet.objects.filter(identifier=identifier).update( snippet=PARSER.convert(snippetdata)) log.log('Added/updated snippets: ' + ', '.join([x for x in data])) if log._save(data='ingestsnippets', name='warnings.md', warnings=True) or log._save(data='ingestsnippets', name='logs.md', warnings=False, logs=True) or log._save( data='ingestsnippets', name='info.md', warnings=False, logs=False, info=True): log.log( f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
def _is_expired(path, age_checker=TEST_AGES['ROOT'], force_download=FORCE_DOWNLOAD) -> bool: """Checks the age for any path against a set expiration date (a timedelta)""" if isinstance(path, str): path = pathlib.Path(path) log = Logger(name='cache-age-check') if not path.exists() or force_download == True: return (True) file_mod_time = datetime.datetime.fromtimestamp(path.stat().st_ctime) now = datetime.datetime.today() if now - file_mod_time > age_checker: log.warning( f'Cache has expired for {path} - older than {age_checker}...') return True if CACHE_VERBOSE == True: log.log(f'Cache is OK for {path} - not older than {age_checker}....', force=True) return False
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) log.log( 'Building insight files... Please be patient as this can take some time.' ) if not pathlib.Path(SAVE_DIR).exists(): pathlib.Path(SAVE_DIR).mkdir(parents=True) loader = InsightCache(repository=INSIGHT_REPO[0], branch=INSIGHT_REPO[1], log=log) insights = list() for insight_data in loader.data: insights.append(insight_data) # Save all data with open(f'{SAVE_DIR}/{DATA_FILE}', 'w+') as file: file.write(yaml.dump(insights)) log.log(f'Saved insights data file: {SAVE_DIR}/{DATA_FILE}') if log._save(data='buildinsights', name='warnings.md', warnings=True) or log._save(data='buildinsights', name='logs.md', warnings=False, logs=True) or log._save( data='buildinsights', name='info.md', warnings=False, logs=False, info=True): log.log( f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) if options.get('all'): options['name'] = [x[0] for x in settings.AUTO_REPOS] if not options.get('name'): log.error( 'No workshop names provided. Use any of the following settings:\n --name [repository name]\n --all' ) if not options.get('branch'): branch = 'v2.0' else: branch = options.get('branch') log.log( 'Building workshop files... Please be patient as this can take some time.' ) for workshop in options.get('name'): SAVE_DIR = f'{settings.BUILD_DIR}_workshops/{workshop}' DATA_FILE = f'{workshop}.yml' if not options.get('force'): check_for_cancel(SAVE_DIR, workshop, log=log) if not pathlib.Path(SAVE_DIR).exists(): pathlib.Path(SAVE_DIR).mkdir(parents=True) # branch = 'v2.0' # TODO: #467 fix this... loader = WorkshopCache(repository=workshop, branch=branch, log=log) data = loader.data del data['raw'] data['sections'] = loader.sections data['parent_branch'] = loader.branch data['parent_repo'] = workshop data['parent_backend'] = 'Github' # Save all data with open(f'{SAVE_DIR}/{DATA_FILE}', 'w+') as file: file.write(yaml.dump(data)) log.log(f'Saved workshop datafile: `{SAVE_DIR}/{DATA_FILE}`') if log._save(data=workshop, name='warnings.md', warnings=True) or log._save(data=workshop, name='logs.md', warnings=False, logs=True) or log._save( data=workshop, name='info.md', warnings=False, logs=False, info=True): log.log( 'Log files with any warnings and logging information is now available in the' + log.LOG_DIR, force=True)
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) log.log( 'Building glossary... Please be patient as this can take some time.' ) loader = GlossaryCache(repository='glossary', branch='v2.0', log=log) # TODO: import from settings here glossary = list() for term_data in loader.data: glossary.append({ 'term': term_data['term'], 'explication': term_data['explication'], 'readings': term_data['readings'], 'tutorials': term_data['tutorials'], 'cheat_sheets': term_data['cheat_sheets'], }) if not pathlib.Path(SAVE_DIR).exists(): pathlib.Path(SAVE_DIR).mkdir(parents=True) with open(f'{SAVE_DIR}/{DATA_FILE}', 'w+') as file: file.write(yaml.dump(glossary)) log.log(f'Saved glossary datafile: {SAVE_DIR}/{DATA_FILE}.') if log._save(data='buildglossary', name='warnings.md', warnings=True) or log._save(data='buildglossary', name='logs.md', warnings=False, logs=True) or log._save( data='buildglossary', name='info.md', warnings=False, logs=False, info=True): log.log( f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) files = { x: y['data_file'] for x, y in built_data.items() if os.path.exists(y['data_file']) } raw = get_settings(files) for cat, data in raw.items(): model = built_data[cat]['model'] if model == Workshop: for obj in data: Workshop.objects.filter(name=obj['fields']['name']).update( views=obj['fields']['views']) log.log(f'Loaded Workshop fragile data ({len(data)} objects).') elif model == Progress: for obj in data: profile, created = Profile.objects.get_or_create( user__first_name=obj['fields']['profile'][0], user__last_name=obj['fields']['profile'][1]) workshop = Workshop.objects.get_by_natural_key( obj['fields']['workshop']) Progress.objects.update_or_create( profile=profile, workshop=workshop, defaults={ 'page': obj['fields']['page'], 'modified': obj['fields']['modified'] }) log.log(f'Loaded Progress fragile data ({len(data)} objects).') elif model == Issue: for obj in data: lesson = Lesson.objects.get_by_natural_key( obj['fields']['lesson']) user = User.objects.get(username=obj['fields']['user'][0]) issue, created = Issue.objects.get_or_create( workshop=workshop, lesson=lesson, user=user, website=obj['fields']['website'], open=obj['fields']['website'], comment=obj['fields']['comment']) log.log(f'Loaded Issue fragile data ({len(data)} objects).') else: log.error( f'Could not process some of the fragile data. This likely means that you have created ways to save fragile data but not built a way to ingest the fragile data back into the database. Revisit the code for ingestfragile command (backend.management.commands.ingestfragile) and ensure all is well.' ) # Delete all files with fragile data [file.unlink() for file in files.values()] log.log( f'Ingested all the fragile data back into the database. ({len(files)} files processed.)' )
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent') ) log.log('Building blurbs... Please be patient as this can take some time.') for cat in list(settings.AUTO_USERS.keys()): for u in settings.AUTO_USERS[cat]: if u.get('blurb'): text = u.get( 'blurb', {'text': None, 'workshop': None}).get('text') workshop = u.get( 'blurb', {'text': None, 'workshop': None}).get('workshop') if text and workshop: SAVE_DIR = f'{settings.BUILD_DIR}_workshops/{workshop}' if find_dir(workshop): with open(f'{SAVE_DIR}/{DATA_FILE}', 'w+') as file: file.write(yaml.dump({ 'workshop': workshop, 'user': u.get('username'), 'text': PARSER.fix_html(text) })) log.log(f'Saved blurb datafile: {SAVE_DIR}/{DATA_FILE}.') else: log.error( f'No directory available for `{workshop}` ({SAVE_DIR}). Did you run `python manage.py build --repo {workshop}` before running this script?', kill=True) if log._save(data='buildblurbs', name='warnings.md', warnings=True) or log._save(data='buildblurbs', name='logs.md', warnings=False, logs=True) or log._save(data='buildblurbs', name='info.md', warnings=False, logs=False, info=True): log.log(f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) log.log( 'Building group files... Please be patient as this can take some time.' ) if not pathlib.Path(SAVE_DIR).exists(): pathlib.Path(SAVE_DIR).mkdir(parents=True) permissions = {} for group_name in settings.AUTO_GROUPS: permissions[group_name] = list() for model_cls in settings.AUTO_GROUPS[group_name]: for perm_name in settings.AUTO_GROUPS[group_name][model_cls]: # Generate permission name as Django would generate it codename = perm_name + '_' + model_cls._meta.model_name permissions[group_name].append(codename) # Save all data with open(f'{SAVE_DIR}/{DATA_FILE}', 'w+') as file: file.write(yaml.dump(permissions)) log.log(f'Saved groups data file: {SAVE_DIR}/{DATA_FILE}') if log._save(data='buildgroups', name='warnings.md', warnings=True) or log._save(data='buildgroups', name='logs.md', warnings=False, logs=True) or log._save( data='buildgroups', name='info.md', warnings=False, logs=False, info=True): log.log( f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) input = Input(path=__file__) test_for_required_files(REQUIRED_PATHS=REQUIRED_PATHS, log=log) data = get_yaml(FULL_PATH, log=log) for termdata in data: try: term, created = Term.objects.get_or_create( term=termdata.get('term')) except IntegrityError: try: term = Term.objects.get( slug=dhri_slugify(termdata.get('term'))) except: log.error('An unknown error occurred. Try') term.term = termdata.get('term') term.explication = termdata.get('explication') term.save() if not created and not options.get('force'): choice = input.ask( f'Term `{termdata.get("term")}` already exists. Update with new definition? [y/N]' ) if choice.lower() != 'y': continue Term.objects.filter(term=termdata.get('term')).update( explication=termdata.get('explication')) term.refresh_from_db() for cat in ['tutorials', 'readings', 'cheat_sheets']: if termdata.get(cat): category, add_field = None, None if cat == 'tutorials': category = Resource.TUTORIAL add_field = term.tutorials elif cat == 'readings': category = Resource.READING add_field = term.readings elif cat == 'cheat_sheets': category = Resource.CHEATSHEET add_field = term.cheat_sheets for point in termdata.get(cat): if not add_field or not category: log.error( 'Cannot interpret category `{cat}`. Make sure the script is correct and corresponds with the database structure.' ) try: obj, created = Resource.objects.update_or_create( category=category, title=point.get('linked_text'), url=point.get('url'), annotation=point.get('annotation')) if obj not in add_field.all(): add_field.add(obj) except IntegrityError: obj = Resource.objects.get( category=category, title=point.get('linked_text'), url=point.get('url'), ) obj.annotation = point.get('annotation') if obj not in add_field.all(): add_field.add(obj) log.info( f'Another resource with the same URL, title, and category already existed so updated with a new annotation: **{point.get("linked_text")} (old)**\n{point.get("annotation")}\n-------\n**{obj.title} (new)**\n{obj.annotation}' ) log.log('Added/updated terms: ' + ', '.join([x.get('term') for x in data])) if log._save(data='ingestglossary', name='warnings.md', warnings=True) or log._save(data='ingestglossary', name='logs.md', warnings=False, logs=True) or log._save( data='ingestglossary', name='info.md', warnings=False, logs=False, info=True): log.log( f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
def __init__(self, string: str = None, log=None): if log == None: self.log = Logger(name='github-parser') else: self.log = log
SETUP[file] = yaml.safe_load(f) except FileNotFoundError: exit( f'Required settings file {file} could not be found in the correct path ({path}). Before the script can run, the correct settings must be in the right place.' ) except UnicodeDecodeError as e: SETUP[file] = {} raise RuntimeError( f'ASCII codec could not decode the data in {path}: {e}') return SETUP SETUP = get_settings() # Initiate logger log = Logger(path=__name__) # Correcting # 1. Setup tuples for AUTO_REPOS, GLOSSARY_REPO, INSTALL_REPO, INSIGHT_REPO AUTO_REPOS = [(x['repo'], x['branch']) for x in SETUP['repositories.yml']['workshops']] GLOSSARY_REPO = (SETUP['repositories.yml']['meta']['glossary']['repo'], SETUP['repositories.yml']['meta']['glossary']['branch']) INSTALL_REPO = (SETUP['repositories.yml']['meta']['install']['repo'], SETUP['repositories.yml']['meta']['install']['branch']) INSIGHT_REPO = (SETUP['repositories.yml']['meta']['insight']['repo'], SETUP['repositories.yml']['meta']['insight']['branch']) # 2. Make sure all types are correct SETUP['backend.yml']['STATIC_IMAGES'] = {
import json import pathlib import os import smartypants from backend.logger import Logger from backend.settings import AUTO_REPOS, CACHE_DIRS, FORCE_DOWNLOAD, TEST_AGES, CACHE_VERBOSE, GITHUB_TOKEN from django.utils.text import slugify from django.urls import reverse from django.urls.exceptions import NoReverseMatch from collections import OrderedDict from github import Github from bs4 import BeautifulSoup log = Logger(name='github-parser') def _is_expired(path, age_checker=TEST_AGES['ROOT'], force_download=FORCE_DOWNLOAD) -> bool: """Checks the age for any path against a set expiration date (a timedelta)""" if isinstance(path, str): path = pathlib.Path(path) log = Logger(name='cache-age-check') if not path.exists() or force_download == True: return (True) file_mod_time = datetime.datetime.fromtimestamp(path.stat().st_ctime) now = datetime.datetime.today()
class GitHubParser(): def __init__(self, string: str = None, log=None): if log == None: self.log = Logger(name='github-parser') else: self.log = log def convert(self, string): c = GitHubParserCache(string=string) return (c.data.get('markdown', '').strip()) def strip_from_p(self, html): soup = BeautifulSoup(html, 'lxml') if soup.p: return ''.join([str(x) for x in soup.p.children]) else: return html def _fix_link(self, tag): def find_workshop(elements): if elements[-1] == 'DHRI-Curriculum': return '{GH_CURRICULUM}' for element in elements: for workshop in [x[0] for x in AUTO_REPOS]: if workshop == element: return workshop return '' elements = tag['href'].split('/') if 'http:' in elements or 'https:' in elements: link_type = 'absolute' elif elements[0].startswith('#'): link_type = 'local' else: link_type = 'relative' raw_file = False if link_type == 'absolute': if 'DHRI-Curriculum' in elements: if 'glossary' in elements and 'terms' in elements: term = elements[-1].replace('.md', '') self.log.info( f'Found link to an **glossary term** and adding shortcut link to: curriculum.dhinstitutes.org/shortcuts/term/{term}' ) tag['href'] = f'https://curriculum.dhinstitutes.org/shortcuts/term/{term}' elif 'insights' in elements and 'pages' in elements: insight = elements[-1].replace(".md", "") self.log.info( f'Found link to an **insight** and adding shortcut link to: curriculum.dhinstitutes.org/shortcuts/insight/{insight}' ) tag['href'] = f'https://curriculum.dhinstitutes.org/shortcuts/insight/{insight}' elif 'install' in elements and 'guides' in elements: install = elements[-1].replace(".md", "") self.log.info( f'Found link to an **installation** and adding shortcut link to: curriculum.dhinstitutes.org/shortcuts/install/{install}' ) tag['href'] = f'https://curriculum.dhinstitutes.org/shortcuts/install/{install}' elif 'raw.githubusercontent.com' in elements: raw_link = '/'.join(elements) self.log.info( f'Found link to **raw file** and will not change link: {raw_link}' ) else: workshop = find_workshop(elements) if workshop == '{GH_CURRICULUM}': gh_link = '/'.join(elements) self.log.info( f'Link found to **the DHRI Curriculum on GitHub**, linking to it: {gh_link}' ) elif workshop == '': gh_link = '/'.join(elements) self.log.warning( f'Found link to workshop, which is not currently being loaded into the website, will therefore redirect to **workshop on GitHub**: {gh_link}' ) else: self.log.info( f'Found link to **workshop** which (will) exist(s) on website, so changing to that: curriculum.dhinstitutes.org/workshops/{workshop}' ) tag['href'] = f'https://curriculum.dhinstitutes.org/shortcuts/workshop/{workshop}' else: pass # print(tag['href']) return tag def fix_html(self, text): def has_children(tag): children = [] try: tag.children children = [x for x in tag.children] except: pass return children if not text: return '' multiline = False if '\n' in text: multiline = True # Make text into HTML... text = self.convert(text) text = smartypants.smartypants(text) # curly quote it soup = BeautifulSoup(text, 'lxml') for tag in soup.descendants: if tag.name == 'a': # if element.text == None: # TODO: Drop links that have no text tag = self._fix_link(tag) if not multiline: if len([x for x in soup.body.children]) == 1 and soup.body.p: # We only have one paragraph, so return the _text only_ from the p return ''.join([str(x) for x in soup.body.p.children]) else: # We have multiline html_string = ''.join( [str(x) for x in soup.html.body.children]) else: html_string = ''.join([str(x) for x in soup.html.body.children]) return html_string def quote_converter(self, string, reverse=False): """Takes a string and returns it with dumb quotes, single and double, replaced by smart quotes. Accounts for the possibility of HTML tags within the string.""" if string == None: return None if not isinstance(string, str): print('Not a string:') print(string) exit() if string == '': return string if reverse == True: string = string.replace('“', '"').replace('”', '"') string = string.replace('‘', "'").replace("’", "'") return string # Find dumb double quotes coming directly after letters or punctuation, # and replace them with right double quotes. string = re.sub(r'([a-zA-Z0-9.,?!;:\'\"])"', r'\1”', string) # Find any remaining dumb double quotes and replace them with # left double quotes. string = string.replace('"', '“') # Follow the same process with dumb/smart single quotes string = re.sub(r"([a-zA-Z0-9.,?!;:\"\'])'", r'\1’', string) string = string.replace("'", '‘') return string
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent') ) log.log('Building user files... Please be patient as this can take some time.') users = list() if not pathlib.Path(SAVE_DIR).exists(): pathlib.Path(SAVE_DIR).mkdir(parents=True) if not pathlib.Path(SAVE_DIR_IMG).exists(): pathlib.Path(SAVE_DIR_IMG).mkdir(parents=True) all_categories = list(settings.AUTO_USERS.keys()) for cat in all_categories: all_users = settings.AUTO_USERS[cat] log.BAR(all_users, max_value=len(all_users)) for i, u in enumerate(all_users): log.BAR.update(i) is_staff = cat == 'STAFF' is_super = cat == 'SUPER' if is_super: is_staff = True user = { 'username': u.get('username'), 'password': u.get('password', ''), 'first_name': u.get('first_name', ''), 'last_name': u.get('last_name', ''), 'email': u.get('email', ''), 'profile': { 'image': '', 'bio': '', 'pronouns': u.get('pronouns'), 'links': [] }, 'superuser': is_super, 'staff': is_staff, 'groups': u.get('groups', []) } if u.get('bio'): user['profile']['bio'] = PARSER.fix_html(u.get('bio')) if u.get('img'): if options.get('nocrop'): filename = u['img'].split('/')[-1] user['profile']['image'] = f'{SAVE_DIR_IMG}/{filename}' copyfile(u['img'], user['profile']['image']) else: filename = u['img'].split('/')[-1].split('.')[0] user['profile']['image'] = f'{SAVE_DIR_IMG}/{filename}.jpg' crop_and_save(u['img'], user['profile']['image'], MAX_SIZE) else: log.warning(f'User `{u.get("username")}` does not have an image assigned to them and will be assigned the default picture. Add filepaths to an existing file in your datafile (`{SAVE_DIR}/{DATA_FILE}`) or follow the steps in the documentation to add user images if you want to make sure the specific user has a profile picture. Then, rerun `python manage.py buildusers` or `python manage.py build`') for link in u.get('links', []): user['profile']['links'].append({ 'label': link.get('text'), 'url': link.get('url'), 'cat': link.get('cat') }) users.append(user) log.BAR.finish() # Save all data with open(f'{SAVE_DIR}/{DATA_FILE}', 'w+') as file: file.write( yaml.dump({'users': users, 'default': settings.AUTO_USER_DEFAULT})) log.log(f'Saved user datafile: {SAVE_DIR}/{DATA_FILE}.') if log._save(data='buildusers', name='warnings.md', warnings=True) or log._save(data='buildusers', name='logs.md', warnings=False, logs=True) or log._save(data='buildusers', name='info.md', warnings=False, logs=False, info=True): log.log(f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
from django.utils.text import slugify from urllib import parse as urlparser from backend.logger import Logger from backend.settings import FORCE_DOWNLOAD from backend.settings import CACHE_DIRS, TEST_AGES, DO_NOT_DOWNLOAD import requests import json import datetime from requests.exceptions import ProxyError from pathlib import Path from bs4 import BeautifulSoup log = Logger(name='webcache') class WebCache(): def _get_path_from_url(self, url): parsed = urlparser.urlparse(url) elem = parsed.netloc.split('.') elem.reverse() json_path = parsed.path if json_path.endswith('/'): json_path = json_path[:-1] if json_path.startswith('/'): json_path = json_path[1:] json_path = json_path.replace('/', '-') slugified = slugify(json_path) or 'none' return CACHE_DIRS['WEB'] / ('/'.join([x for x in elem if not x == 'www']) + '/' + slugified + '.json') def _valid_url(self):
from django.core.management import BaseCommand from backend.logger import Logger from django.core.management import execute_from_command_line from django.conf import settings log = Logger(name='localserver') class Command(BaseCommand): def __init__(self, *args, **kwargs): super(Command, self).__init__(*args, **kwargs) help = 'Runserver on localhost' def handle(self, *args, **options): if not '*' in settings.ALLOWED_HOSTS: log.warning( 'Adding \'*\' to ALLOWED_HOSTS. You might want to change ALLOWED_HOSTS in app.settings to include \'*\'.' ) settings.ALLOWED_HOSTS.append('*') args = ['name', 'runserver', '0.0.0.0:80'] execute_from_command_line(args)
def handle(self, *args, **options): log = Logger( path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent') ) input = Input(path=__file__) workshops = get_all_existing_workshops() if options.get('name'): workshops = get_all_existing_workshops(options.get('name')) for _ in workshops: name, path = _ DATAFILE = f'{path}/blurb.yml' try: data = get_yaml(DATAFILE, log=log, catch_error=True) except Exception as e: log.warning(f'Found no blurb for workshop `{name}`. Skipping and moving ahead...') continue if not data.get('user'): log.error( f'Username was not defined for the blurb for workshop {name} was not found. Check the datafile {DATAFILE} to verify the username attributed to the blurb.') if not data.get('workshop'): log.warning( f'Blurb had no workshop assigned, but will proceed with the blurb\'s parent folder ({name}) as assumed workshop. To fix this warning, you can try running python manage.py buildblurbs before running ingestblurbs.') data['workshop'] = name if not data.get('text'): log.error( f'Blurb has no text assigned, and thus could not be ingested. Check the datafile {DATAFILE} to verify the workshop attributed to the blurb.') try: user = User.objects.get(username=data.get('user')) except: log.error( f'The user attributed to the blurb ({data.get("username")}) was not found in the database. Did you try running python manage.py ingestusers before running ingestblurbs?') try: workshop = Workshop.objects.get(slug=data.get('workshop')) except: log.error( f'The blurb\'s attached workshop ({data.get("workshop")}) was not found in the database. Did you try running python manage.py ingestworkshop --name {data.get("workshop")} before running ingestblurbs?') blurb, created = Blurb.objects.get_or_create(user=user, workshop=workshop, defaults={ 'text': PARSER.fix_html(data.get('text'))}) if not created and not options.get('force'): choice = input.ask( f'Frontmatter for workshop `{workshop}` already exists. Update with new content? [y/N]') if choice.lower() != 'y': continue blurb.text = data.get('text') blurb.save() log.log('Added/updated blurbs for workshops: ' + ', '.join([x[0] for x in workshops])) if log._save(data='ingestblurbs', name='warnings.md', warnings=True) or log._save(data='ingestblurbs', name='logs.md', warnings=False, logs=True) or log._save(data='ingestblurbs', name='info.md', warnings=False, logs=False, info=True): log.log(f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) input = Input(path=__file__) test_for_required_files(REQUIRED_PATHS=REQUIRED_PATHS, log=log) data = get_yaml(FULL_PATH, log=log) for insightdata in data: # TODO: Insights and Software are also connected in a database table (insight_insight_software) but this relationship is not developed yet. insight, created = Insight.objects.update_or_create( title=insightdata.get('insight'), defaults={ 'text': insightdata.get('introduction'), 'image_alt': insightdata.get('image').get('alt') }) original_file = insightdata.get('image').get('url') if original_file: if insight_image_exists(original_file) and filecmp.cmp( original_file, get_insight_image_path(original_file), shallow=False) == True: log.log( f'Insight image already exists. Connecting existing paths to database: `{get_insight_image_path(original_file)}`' ) insight.image.name = get_insight_image_path( original_file, True) insight.save() else: with open(original_file, 'rb') as f: insight.image = File(f, name=self.os.path.basename( f.name)) insight.save() if filecmp.cmp(original_file, get_insight_image_path(original_file), shallow=False): log.info( f'Insight image has been updated and thus was copied to the media path: `{get_insight_image_path(original_file)}`' ) else: log.info( f'Insight image was not found and is copied to media path: `{get_insight_image_path(original_file)}`' ) else: log.warning( f'An image for `{insight}` does not exist. A default image will be saved instead. If you want a particular image for the installation instructions, follow the documentation.' ) insight.image.name = get_default_insight_image() insight.save() for sectiondata in insightdata.get('sections', []): title = sectiondata sectiondata = insightdata.get('sections').get(sectiondata) section, created = Section.objects.update_or_create( insight=insight, title=title, defaults={ 'order': sectiondata.get('order'), 'text': sectiondata.get('content') }) for operating_system, osdata in insightdata.get( 'os_specific').items(): related_section = Section.objects.get( title=osdata.get('related_section')) OperatingSystemSpecificSection.objects.update_or_create( section=related_section, operating_system=operating_system, defaults={'text': osdata.get('content')}) log.log('Added/updated insights: ' + ', '.join([x.get("insight") for x in data])) if log._save(data='ingestinsights', name='warnings.md', warnings=True) or log._save(data='ingestinsights', name='logs.md', warnings=False, logs=True) or log._save( data='ingestinsights', name='info.md', warnings=False, logs=False, info=True): log.log( f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) input = Input(path=__file__) workshops = get_all_existing_workshops() if options.get('name'): workshops = get_all_existing_workshops(options.get('name')) for _ in workshops: slug, path = _ DATAFILE = f'{path}/{slug}.yml' d = get_yaml(DATAFILE, log=log) # Separate out data imagedata = d.get('image') frontmatterdata = d.get('sections').get('frontmatter') praxisdata = d.get('sections').get('theory-to-practice') lessondata = d.get('sections').get('lessons') full_name = d.get('name') parent_backend = d.get('parent_backend') parent_branch = d.get('parent_branch') parent_repo = d.get('parent_repo') # 1. ENTER WORKSHOP workshop, created = Workshop.objects.update_or_create( name=full_name, slug=dhri_slugify(full_name), defaults={ 'parent_backend': parent_backend, 'parent_branch': parent_branch, 'parent_repo': parent_repo, 'image_alt': imagedata['alt'] }) def _get_valid_name(filename): return filename.replace( '@', '') # TODO: should exist a built-in for django here? def _get_media_path(valid_filename): return settings.MEDIA_ROOT + '/' + Workshop.image.field.upload_to + valid_filename def _get_media_url(valid_filename): return Workshop.image.field.upload_to + valid_filename def _image_exists(valid_filename): media_path = _get_media_path(valid_filename) return os.path.exists(media_path) def _get_default_image(): return Workshop.image.field.default if imagedata: source_file = imagedata['url'] valid_filename = _get_valid_name( slug + '-' + os.path.basename(imagedata['url'])) if not _image_exists(valid_filename) or filecmp.cmp( source_file, _get_media_path(valid_filename), shallow=False) == False: try: with open(source_file, 'rb') as f: workshop.image = File(f, name=valid_filename) workshop.save() except FileNotFoundError: log.error( f'File `{source_file}` could not be found. Did you run `python manage.py buildworkshop` before you ran this command?' ) workshop.image.name = _get_media_url(valid_filename) workshop.save() else: log.warning( f'Workshop {workshop.name} does not have an image assigned to it. Add filepaths to an existing file in your datafile ({DATAFILE}) if you want to update the specific workshop. Default workshop image (`{os.path.basename(_get_default_image())}`) will be assigned.' ) workshop.image.name = Workshop.image.field.default workshop.save() if not _image_exists( _get_valid_name(os.path.basename( _get_default_image()))): log.warning( f'Default workshop image does not exist. You will want to add it manually to the correct folder: {_get_media_path("")}' ) # Saving the slug in a format that matches the GitHub repositories (special method `save_slug`) workshop.slug = slug workshop.save_slug() # 2. ENTER FRONTMATTER frontmatter, created = Frontmatter.objects.update_or_create( workshop=workshop, defaults={ 'abstract': frontmatterdata.get('abstract'), 'estimated_time': frontmatterdata.get('estimated_time') }) if frontmatterdata.get('ethical_considerations'): for point in frontmatterdata.get('ethical_considerations'): _, created = EthicalConsideration.objects.update_or_create( frontmatter=frontmatter, label=point.get('annotation')) if frontmatterdata.get('learning_objectives'): for point in frontmatterdata.get('learning_objectives'): _, created = LearningObjective.objects.update_or_create( frontmatter=frontmatter, label=point.get('annotation')) for cat in ['projects', 'readings', 'cheat_sheets', 'datasets']: if frontmatterdata.get(cat): category, add_field = None, None if cat == 'projects': category = Resource.PROJECT add_field = frontmatter.projects elif cat == 'readings': category = Resource.READING add_field = frontmatter.readings elif cat == 'cheat_sheets': category = Resource.CHEATSHEET add_field = frontmatter.cheat_sheets elif cat == 'datasets': category = Resource.DATASET add_field = frontmatter.datasets for point in frontmatterdata.get(cat): if not add_field or not category: log.error( 'Cannot interpret category `{cat}`. Make sure the script is correct and corresponds with the database structure.' ) obj, created = Resource.objects.update_or_create( category=category, title=point.get('linked_text'), url=point.get('url'), annotation=point.get('annotation')) if obj not in add_field.all(): add_field.add(obj) if frontmatterdata.get('contributors'): for point in frontmatterdata.get('contributors'): profile = None try: profile = Profile.objects.get( user__first_name=point.get('first_name'), user__last_name=point.get('last_name')) except: for p in Profile.objects.all(): if f'{p.user.first_name} {p.user.last_name}' == point.get( 'full_name'): profile = p log.info( f'In-depth search revealed a profile matching the full name for `{workshop.name}` contributor `{point.get("first_name")} {point.get("last_name")}`. It may or may not be the correct person, so make sure you verify it manually.' ) if not p: log.info( f'Could not find user profile on the curriculum website for contributor `{point.get("full_name")}` (searching by first name `{point.get("first_name")}` and last name `{point.get("last_name")}`).' ) contributor, created = Contributor.objects.update_or_create( first_name=point.get('first_name'), last_name=point.get('last_name'), defaults={ 'url': point.get('link'), 'profile': profile }) collaboration, created = Collaboration.objects.update_or_create( frontmatter=frontmatter, contributor=contributor, defaults={ 'current': point.get('current'), 'role': point.get('role') }) # 3. ENTER PRAXIS praxis, created = Praxis.objects.update_or_create( workshop=workshop, defaults={ 'intro': praxisdata.get('intro'), }) for cat in ['discussion_questions', 'next_steps']: if praxisdata.get(cat): obj = None if cat == 'discussion_questions': obj = DiscussionQuestion elif cat == 'next_steps': obj = NextStep for order, point in enumerate( praxisdata[cat], start=1 ): # TODO: Should we pull out order manually here? Not necessary, right? obj.objects.update_or_create( praxis=praxis, label=point.get('annotation'), defaults={'order': order}) for cat in ['further_readings', 'further_projects', 'tutorials']: if praxisdata.get(cat): category, add_field = None, None if cat == 'further_readings': category = Resource.READING add_field = praxis.further_readings elif cat == 'further_projects': category = Resource.PROJECT add_field = praxis.further_projects elif cat == 'tutorials': category = Resource.TUTORIAL add_field = praxis.tutorials for point in praxisdata.get(cat): if not add_field or not category: log.error( 'Cannot interpret category `{cat}`. Make sure the script is correct and corresponds with the database structure.' ) try: obj, created = Resource.objects.update_or_create( category=category, title=point.get('linked_text'), url=point.get('url'), annotation=point.get('annotation')) if obj not in add_field.all(): add_field.add(obj) except IntegrityError: obj = Resource.objects.get( category=category, title=point.get('linked_text'), url=point.get('url'), ) obj.annotation = point.get('annotation') if obj not in add_field.all(): add_field.add(obj) log.info( f'Another resource with the same URL, title, and category already existed so updated with a new annotation: **{point.get("linked_text")} (old)**\n{point.get("annotation")}\n-------\n**{obj.title} (new)**\n{obj.annotation}' ) # 4. ENTER LESSONS for lessoninfo in lessondata: lesson, created = Lesson.objects.update_or_create( workshop=workshop, title=lessoninfo.get('header'), defaults={ 'order': lessoninfo.get('order'), 'text': lessoninfo.get('content'), }) #print(lesson) for image in lessoninfo.get('lesson_images'): #print('image time!') LessonImage.objects.update_or_create(url=image.get('path'), lesson=lesson, alt=image.get('alt')) if not lessoninfo.get('challenge') and lessoninfo.get( 'solution'): log.error( f'Lesson `{lesson.title}` (in workshop {workshop}) has a solution but no challenge. Correct the files on GitHub and rerun the buildworkshop command and then re-attempt the ingestworkshop command. Alternatively, you can change the datafile content manually.' ) if lessoninfo.get('challenge'): challenge, created = Challenge.objects.update_or_create( lesson=lesson, title=lessoninfo['challenge'].get('header'), defaults={ 'text': lessoninfo['challenge'].get('content') }) if lessoninfo.get('solution'): solution, created = Solution.objects.update_or_create( challenge=challenge, title=lessoninfo['solution'].get('header'), defaults={ 'text': lessoninfo['solution'].get('content') }) if lessoninfo.get('evaluation'): evaluation, created = Evaluation.objects.get_or_create( lesson=lesson) for point in lessoninfo['evaluation'].get('content'): question, created = Question.objects.update_or_create( evaluation=evaluation, label=point.get('question')) for is_correct, answers in point.get( 'answers').items(): is_correct = is_correct == 'correct' for answertext in answers: answer, created = Answer.objects.update_or_create( question=question, label=answertext, defaults={'is_correct': is_correct}) if lessoninfo.get('keywords'): # lessoninfo['keywords'].get('header') # TODO: not doing anything with keyword header yet for keyword in lessoninfo['keywords'].get('content'): terms = Term.objects.filter(term__iexact=keyword) if terms.count() == 1: lesson.terms.add(terms[0]) elif terms.count() == 0: log.warning( f'Keyword `{keyword}` (used in lesson `{lesson.title}`, workshop `{workshop}` cannot be found in the existing glossary. Are you sure it is in the glossary and synchronized with the database? Make sure the data file for glossary is available ({GLOSSARY_FILE}) and that the term is defined in the file. Then run python manage.py ingestglossary.' ) else: log.error( f'Multiple definitions of `{keyword}` exists in the database. Try resetting the glossary and rerun python manage.py ingestglossary before you run the ingestworkshop command again.' ) log.log('Added/updated workshops: ' + ', '.join([x[0] for x in workshops])) if not options.get('no_reminder'): log.log( 'Do not forget to run `ingestprerequisites` after running the `ingestworkshop` command (without the --name flag).', color='yellow') if log._save(data='ingestworkshop', name='warnings.md', warnings=True) or log._save(data='ingestworkshop', name='logs.md', warnings=False, logs=True) or log._save( data='ingestworkshop', name='info.md', warnings=False, logs=False, info=True): log.log( f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent') ) input = Input(path=__file__) test_for_required_files(REQUIRED_PATHS=REQUIRED_PATHS, log=log) data = get_yaml(FULL_PATH, log=log) for userdata in data.get('users', []): if not userdata.get('username'): log.error( f'Username is required. Check the datafile ({FULL_PATH}) to make sure that all the users in the file are assigned a username.') finder = User.objects.filter(username=userdata.get('username')) if finder.count(): finder.update( first_name=userdata.get('first_name'), last_name=userdata.get('last_name'), email=userdata.get('email'), is_staff=userdata.get('staff') ) user = User.objects.get(username=userdata.get('username')) else: func = User.objects.create_user if userdata.get('superuser'): func = User.objects.create_superuser user = func( username=userdata.get('username'), first_name=userdata.get('first_name'), last_name=userdata.get('last_name'), email=userdata.get('email'), is_staff=userdata.get('staff') ) user.refresh_from_db() # if None, sets to unusable password, see https://docs.djangoproject.com/en/3.1/ref/contrib/auth/#django.contrib.auth.models.User.set_password if userdata.get('password'): user.set_password(userdata['password']) else: if options.get('nopass'): user.set_unusable_password() else: _password = input.ask(f'Password for `{userdata.get("username")}`?') user.set_password(_password) user.save() if not userdata.get('profile'): log.error(f'User {userdata.get("username")} does not have profile information (bio, image, links, and/or pronouns) added. Make sure you add all this information for each user in the datafile before running this command ({FULL_PATH}).') profile, created = Profile.objects.update_or_create( user=user, defaults={ 'bio': userdata.get('profile', {}).get('bio'), 'pronouns': userdata.get('profile', {}).get('pronouns') }) if userdata.get('profile', {}).get('image'): profile_pic = userdata.get('profile', {}).get('image') if profile_picture_exists(profile_pic) and filecmp.cmp(profile_pic, get_profile_picture_path(profile_pic)): profile.image.name = get_profile_picture_path(profile_pic, True) profile.save() else: with open(profile_pic, 'rb') as f: profile.image = File(f, name=os.path.basename(f.name)) profile.save() else: profile.image.name = get_default_profile_picture() profile.save() if userdata.get('profile', {}).get('links'): for link in userdata.get('profile', {}).get('links'): if link.get('cat') == 'personal': link['cat'] = ProfileLink.PERSONAL elif link.get('cat') == 'project': link['cat'] = ProfileLink.PROJECT else: log.error( f'Link {link.get("url")} is assigned a category that has no correspondence in the database model: {link.get("cat")}. Please set the category to either `personal` or `project`.') _, _ = ProfileLink.objects.update_or_create(profile=profile, url=link.get('url'), defaults={ 'cat': link.get('cat'), 'label': link.get('label') }) if not profile_picture_exists(get_default_profile_picture(full_path=True)): if data.get('default', False) and os.path.exists(data.get('default')): from shutil import copyfile copyfile(data.get('default'), get_default_profile_picture(full_path=True)) log.log('Default profile picture added to the /media/ directory.') elif not data.get('default'): log.error( f'No default profile picture was defined in your datafile (`{FULL_PATH}`). Add the file, and then add the path to the file (relative to the `django-app` directory) in a `default` dictionary in your `users.yml` file, like this:\n' + '`default: backend/setup/profile-pictures/default.jpg`') elif not os.path.exists(data.get('default')): log.error( f'The default profile picture (`{data.get("default")}`) in your datafile (`{FULL_PATH}`) does not exist in its expected directory (`{os.path.dirname(data.get("default"))}`). Make sure it is in the directory or update the datafile accordingly, or add the file before running this command.') log.log('Added/updated users: ' + ', '.join([x.get('username') for x in data.get('users')])) if log._save(data='ingestusers', name='warnings.md', warnings=True) or log._save(data='ingestusers', name='logs.md', warnings=False, logs=True) or log._save(data='ingestusers', name='info.md', warnings=False, logs=False, info=True): log.log(f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) if options.get('reset'): if options.get('force'): i = get_or_default( f'Warning: This script is about to remove ALL OF THE OBJECTS from the database. Are you sure you want to continue?', color='red', default_variable='N') if i.lower() != 'y': log.error('User opted to stop.') for model in all_models: name = model.__name__.replace('_', ' ') if not options.get('force'): i = get_or_default( f'Warning: This will remove all the `{name}` objects. Are you sure you want to continue?', color='red', default_variable='N') if i.lower() != 'y': continue model.objects.all().delete() log.log(f'Removed all `{name}` objects.') if options.get('resetusers'): if options.get('force'): i = get_or_default( f'Warning: This script is about to remove ALL OF THE USERS from the database. Are you sure you want to continue?', color='red', default_variable='N') if i.lower() != 'y': log.error('User opted to stop.') User.objects.all().delete() log.log(f'Removed all users.') call_command('ingestgroups', force=True, silent=options.get('silent'), verbose=options.get('verbose')) call_command('ingestusers', force=True, silent=options.get('silent'), verbose=options.get('verbose')) call_command('ingestglossary', force=True, silent=options.get('silent'), verbose=options.get('verbose')) call_command('ingestinstalls', force=True, silent=options.get('silent'), verbose=options.get('verbose')) call_command('ingestinsights', force=True, silent=options.get('silent'), verbose=options.get('verbose')) call_command('ingestworkshop', force=True, silent=options.get('silent'), verbose=options.get('verbose'), no_reminder=True) call_command('ingestsnippets', force=True, silent=options.get('silent'), verbose=options.get('verbose')) call_command('ingestblurbs', force=True, silent=options.get('silent'), verbose=options.get('verbose')) call_command('ingestprerequisites', force=True, silent=options.get('silent'), verbose=options.get('verbose')) call_command('ingestfragile', force=True, silent=options.get('silent'), verbose=options.get('verbose'))
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) input = Input(path=__file__) test_for_required_files(REQUIRED_PATHS=REQUIRED_PATHS, log=log) data = get_yaml(FULL_PATH, log=log) for installdata in data: for operating_system in installdata.get('instructions'): software, created = Software.objects.get_or_create( operating_system=operating_system, software=installdata.get('software')) instruction, created = Instruction.objects.update_or_create( software=software, defaults={ 'what': installdata.get('what'), 'why': installdata.get('why') }) original_file = installdata.get('image') if original_file: if instruction_image_exists(original_file) and filecmp.cmp( original_file, get_instruction_image_path(original_file), shallow=False) == True: log.log( f'Instruction image already exists. Ensuring path is in database: `{get_instruction_image_path(original_file)}`' ) instruction.image.name = get_instruction_image_path( original_file, True) instruction.save() else: with open(original_file, 'rb') as f: instruction.image = File(f, name=os.path.basename( f.name)) instruction.save() if filecmp.cmp( original_file, get_instruction_image_path(original_file)): log.info( f'Instruction image has been updated so being copied to media path: `{get_instruction_image_path(original_file)}`' ) else: log.info( f'Instruction image has been copied to media path: `{get_instruction_image_path(original_file)}`' ) else: log.warning( f'An image for `{software}` does not exist. A default image will be saved instead. If you want a particular image for the installation instructions, follow the documentation.' ) instruction.image.name = get_default_instruction_image() instruction.save() for stepdata in installdata.get('instructions').get( operating_system): step, created = Step.objects.update_or_create( instruction=instruction, order=stepdata.get('step'), defaults={ 'header': stepdata.get('header'), 'text': stepdata.get('html') }) for order, d in enumerate(stepdata.get('screenshots'), start=1): path = d['path'] alt_text = d['alt'] if os.path.exists(get_screenshot_media_path( path)) and filecmp.cmp( path, get_screenshot_media_path(path), shallow=False) == True: s, _ = Screenshot.objects.get_or_create( step=step, alt_text=alt_text, order=order) s.image = get_screenshot_media_path( path, relative_to_upload_field=True) s.save() log.log( f'Screenshot already exists: `{get_screenshot_media_path(path)}`' ) else: s, _ = Screenshot.objects.get_or_create( step=step, alt_text=alt_text, order=order) with open(path, 'rb') as f: s.image = File(f, name=os.path.basename(f.name)) s.save() if filecmp.cmp(path, get_screenshot_media_path(path), shallow=False) == False: log.log( f'Screenshot was updated so re-saved: `{get_screenshot_media_path(path)}`' ) else: log.log( f'New screenshot saved: `{get_screenshot_media_path(path)}`' ) log.log('Added/updated installation instructions: ' + ', '.join([f'{x["software"]}' for x in data])) if log._save(data='ingestinstalls', name='warnings.md', warnings=True) or log._save(data='ingestinstalls', name='logs.md', warnings=False, logs=True) or log._save( data='ingestinstalls', name='info.md', warnings=False, logs=False, info=True): log.log( f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent') ) workshops = get_all_existing_workshops() if options.get('name'): workshops = get_all_existing_workshops(options.get('name')) for _ in workshops: slug, path = _ workshop, frontmatter = None, None DATAFILE = f'{path}/{slug}.yml' superdata = get_yaml(DATAFILE, log=log) # Separate out data frontmatterdata = superdata.get('sections').get('frontmatter') name = superdata.get('name') # 1. FIND WORKSHOP try: workshop = Workshop.objects.get(name=name) except: log.error(f'The workshop `{slug}` could not be found. Make sure you ran python manage.py ingestworkshop --name {slug} before running this command.') # 2. FIND FRONTMATTER try: frontmatter = Frontmatter.objects.get(workshop=workshop) except: log.error(f'Frontmatter for the workshop `{slug}` could not be found. Make sure you ran python manage.py ingestworkshop --name {slug} before running this command.') for prereqdata in frontmatterdata.get('prerequisites'): linked_workshop, linked_installs, linked_insight = None, None, None url = prereqdata.get('url') category = Prerequisite.EXTERNAL_LINK if prereqdata.get('type') == 'workshop': linked_workshop = search_workshop(prereqdata.get( 'potential_name'), name, log, DATAFILE) q = f'Prerequisite workshop `{linked_workshop.name}`' category = Prerequisite.WORKSHOP log.log( f'Linking workshop prerequisite for `{name}`: {linked_workshop.name}') elif prereqdata.get('type') == 'install': # currently, not using prereqdata.get('potential_slug_fragment') - might be something we want to do in the future linked_installs = search_install(prereqdata.get( 'potential_name'), name, log, DATAFILE) q = f'Prerequisite installations ' + \ ', '.join([f'`{x.software}`' for x in linked_installs]) category = Prerequisite.INSTALL log.log( f'Linking installation prerequisite for `{name}`: {[x.software for x in linked_installs]}') elif prereqdata.get('type') == 'insight': linked_insight = search_insight(prereqdata.get('potential_name'), prereqdata.get( 'potential_slug_fragment'), name, log, DATAFILE) q = f'Prerequisite insight `{linked_insight.title}`' category = Prerequisite.INSIGHT log.log( f'Linking insight prerequisite for `{name}`: {linked_insight.title}') if category == Prerequisite.EXTERNAL_LINK: label = prereqdata.get('url_text') else: label = '' clean_up(category, linked_workshop, linked_insight, url) prerequisite, created = Prerequisite.objects.update_or_create( category=category, linked_workshop=linked_workshop, linked_insight=linked_insight, url=url, defaults={ 'text': prereqdata.get('text', ''), 'required': prereqdata.get('required'), 'recommended': prereqdata.get('recommended'), 'label': label } ) if linked_installs: for software in linked_installs: through = PrerequisiteSoftware(prerequisite=prerequisite, software=software, required=prereqdata.get( 'required'), recommended=prereqdata.get('recommended')) through.save() frontmatter.prerequisites.add(prerequisite) log.log( 'Added/updated requirements for workshops: ' + ', '.join([x[0] for x in workshops])) if log._save(data='ingestprerequisites', name='warnings.md', warnings=True) or log._save(data='ingestprerequisites', name='logs.md', warnings=False, logs=True) or log._save(data='ingestprerequisites', name='info.md', warnings=False, logs=False, info=True): log.log(f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)
def handle(self, *args, **options): log = Logger(path=__file__, force_verbose=options.get('verbose'), force_silent=options.get('silent')) input = Input(path=__file__) test_for_required_files(REQUIRED_PATHS=REQUIRED_PATHS, log=log) data = get_yaml(FULL_PATH, log=log) for group_name, permission_set in data.items(): group, created = Group.objects.get_or_create(name=group_name) if not created and not options.get('force'): choice = input.ask( f'Group `{group_name}` already exists. Update with new information? [y/N]' ) if choice.lower() != 'y': continue for codename in permission_set: try: # Find permission object and add to group perm = Permission.objects.get(codename=codename) group.permissions.add(perm) log.log(f'Adding {codename} to group {group.__str__()}.') except Permission.DoesNotExist: log.error(f'{codename} not found.') if log._save(data='ingestgroups', name='warnings.md', warnings=True) or log._save(data='ingestgroups', name='logs.md', warnings=False, logs=True) or log._save( data='ingestgroups', name='info.md', warnings=False, logs=False, info=True): log.log( f'Log files with any warnings and logging information is now available in: `{log.LOG_DIR}`', force=True)