def create_production(build_dir, backups, script_dir): """Put the staging version to production hosted at register.geostandaarden.nl """ print "Building production..." logging.info("Building production...") deploy = OSFS('..') if deploy.exists(backups) == False: deploy.makedir(backups) deploy.copydir('%s/%s' % (script_dir, build_dir), 'register-new', overwrite=True) if deploy.exists('register') == True: # server refuses to recursively remove register/staging # hence we excplicitly remove symbolic link to staging try: deploy.remove('register/staging/staging') except ResourceNotFoundError: print "Warning, register/staging/staging not found..." try: deploy.removedir('register/staging') except ResourceNotFoundError: print "Warning, register/staging not found..." backup_dir = time.strftime('%Y-%m-%d-%H-%M-%S') # if deploy.exists('backups/%s' % backup_dir): # deploy.removedir('backups/%s' % backup_dir, force=True) deploy.copydir('register', 'backups/%s' % backup_dir, overwrite=True) try: deploy.movedir('register', 'register-old', overwrite=True) except ResourceNotFoundError: pass deploy.movedir('register-new', 'register', overwrite=True) # create symbolic link to standalone staging directory # fails if production is built first... deploy.makedir('register/staging') call('cd ../register/staging; ln -s ../../staging', shell=True) call('cd ../register; ln -s ../%s/log.txt' % script_dir , shell=True) try: deploy.removedir('register-old', force=True) except ResourceNotFoundError: pass call('chmod -R a+rx ../register', shell=True) print "Done building production..." logging.info("Production built successfully!")
def cmd_makepreviews(self, *params, **options): PREVIEW_ICON_SIZE = 32 WIDTH_COUNT = 11 BORDER = 5 ICON_DIMENSIONS = (BORDER*2 + PREVIEW_ICON_SIZE) preview_width = ICON_DIMENSIONS * WIDTH_COUNT media_fs = OSFS(settings.MEDIA_ROOT) media_fs.makedir('iconsetpreviews', allow_recreate=True) previews_fs = media_fs.opendir('iconsetpreviews') for catalog in IconCatalog.objects.all(): for category in catalog.get_categories(): filename = "%s.%s.jpg" % (catalog.name, category) icons = catalog.icon_set.filter(category=category).order_by('name') num_icons = icons.count() icons_height_count = (num_icons + WIDTH_COUNT-1) // WIDTH_COUNT preview_height = icons_height_count * ICON_DIMENSIONS preview_img = Image.new('RGB', (preview_width, preview_height), (255, 255, 255)) print preview_width, preview_height for i, icon in enumerate(icons): y, x = divmod(i, WIDTH_COUNT) pth = icon.path.replace('[SIZE]', str(PREVIEW_ICON_SIZE)) icon_pth = media_fs.getsyspath(pth) img = Image.open(icon_pth) if img.size[0] != img.size[1]: img = img.crop((0, 0, PREVIEW_ICON_SIZE, PREVIEW_ICON_SIZE)) try: preview_img.paste(img, (x*ICON_DIMENSIONS+BORDER, y*ICON_DIMENSIONS+BORDER), img) except ValueError: preview_img.paste(img, (x*ICON_DIMENSIONS+BORDER, y*ICON_DIMENSIONS+BORDER)) sys_filename = previews_fs.getsyspath(filename) print sys_filename preview_img.save(previews_fs.getsyspath(filename), quality=75)
def course_descriptor_inheritance_check(self, descriptor, from_date_string, unicorn_color, course_run=RUN): """ Checks to make sure that metadata inheritance on a course descriptor is respected. """ # pylint: disable=protected-access print((descriptor, descriptor._field_data)) self.assertEqual(descriptor.due, ImportTestCase.date.from_json(from_date_string)) # Check that the child inherits due correctly child = descriptor.get_children()[0] self.assertEqual(child.due, ImportTestCase.date.from_json(from_date_string)) # need to convert v to canonical json b4 comparing self.assertEqual( ImportTestCase.date.to_json( ImportTestCase.date.from_json(from_date_string)), child.xblock_kvs.inherited_settings['due']) # Now export and check things file_system = OSFS(mkdtemp()) descriptor.runtime.export_fs = file_system.makedir(u'course', recreate=True) node = etree.Element('unknown') descriptor.add_xml_to_node(node) # Check that the exported xml is just a pointer print(("Exported xml:", etree.tostring(node))) self.assertTrue(is_pointer_tag(node)) # but it's a special case course pointer self.assertEqual(node.attrib['course'], COURSE) self.assertEqual(node.attrib['org'], ORG) # Does the course still have unicorns? with descriptor.runtime.export_fs.open( u'course/{course_run}.xml'.format(course_run=course_run)) as f: course_xml = etree.fromstring(f.read()) self.assertEqual(course_xml.attrib['unicorn'], unicorn_color) # the course and org tags should be _only_ in the pointer self.assertNotIn('course', course_xml.attrib) self.assertNotIn('org', course_xml.attrib) # did we successfully strip the url_name from the definition contents? self.assertNotIn('url_name', course_xml.attrib) # Does the chapter tag now have a due attribute? # hardcoded path to child with descriptor.runtime.export_fs.open(u'chapter/ch.xml') as f: chapter_xml = etree.fromstring(f.read()) self.assertEqual(chapter_xml.tag, 'chapter') self.assertNotIn('due', chapter_xml.attrib)
def build_staging(source, destination_temp, destination): set_repeat('none') cleanup(source, destination_temp) root = OSFS('./') # 'c:\Users\<login name>' on Windows # root.makedir(source, allow_recreate=True) root.makedir(destination_temp, allow_recreate=True) # TODO: use this approach to include standards that are not managed on GitHub #standards = OSFS(source).listdir(dirs_only=True) with open('repos-dev.json') as f: standards = load(f) backend.fetch_repos(root, destination_temp, standards, source) backend.build_folders(source, destination_temp, standards, root) webpages.create_overview_page(standards, source, destination_temp) backend.create_staging(destination_temp, destination) print "Done!"
def scrape_indian_express(config): # 2006 onwards new archive page sy, ey, sm, em, sd, ed = config ofs = OSFS("archives/indian_express", create=True) for year in range(sy, ey + 1): yfs = ofs.makedir(str(year), recreate=True) for month in range(sm, em + 1): mfs = yfs.makedir(str(month), recreate=True) for day in range(sd, ed + 1): try: cd = datetime(year, month, day) except ValueError: continue dfs = mfs.makedir(str(day), recreate=True) _file = cd.strftime("%Y-%m-%d_manifest.csv") _path = dfs.getsyspath(_file) if dfs.exists(_file): continue url = "http://archive.indianexpress.com/archive/news/%d/%d/%d/" % ( day, month, year) print(url) resp = requests.get(url) doc = lxml.html.fromstring(resp.content) rows = [] section = None for el in doc.xpath( "//div[@id='box_330'] | //div[@id='box_330_rt']"): try: section = el.xpath("./h4")[0].text except: pass for art in el.xpath(".//a"): text = art.tail link = art.attrib.get("href") if link.startswith("/"): link = urljoin(url, link) rows.append({ "date": cd.strftime("%Y-%m-%d"), "text": text, "link": link, "section": section, "source": "The Indian Express" }) pandas.DataFrame(rows).to_csv(_path, index=False, encoding="utf8")
def dl_pages(): base = 'band_pages' bandpages = OSFS(base) c = 0 with requests.session() as session: with open('bandpages_original.csv', 'r') as bndin: for row in DictReader(bndin): band = row['band'] wlink = row['wlink'] malink = row['malink'] if not bandpages.isdir(band): bandpages.makedir(band) session.headers.update({'User-Agent': useragents[c]}) request = session.get(wlink) write_page(band, 'wiki', request) if malink != 'none': request = session.get(malink) write_page(band, 'ma', request) c += 1 if c == 3: c = 0 bandpages.close()
def scrape_toi(config): sy, ey, sm, em, sd, ed = config ofs = OSFS("archives/toi", create=True) # Found in the JS on the page with the calendar on it. base_date = datetime(1899, 12, 30) for year in range(sy, ey + 1): yfs = ofs.makedir(str(year), recreate=True) for month in range(sm, em + 1): mfs = yfs.makedir(str(month), recreate=True) for day in range(sd, ed + 1): try: cd = datetime(year, month, day) except ValueError: continue start = math.floor((cd - base_date).total_seconds() / 86400) if start > 43300: break dfs = mfs.makedir(str(day), recreate=True) _file = cd.strftime("%Y-%m-%d_manifest.csv") _path = dfs.getsyspath(_file) if dfs.exists(_file): continue url = "https://timesofindia.indiatimes.com/%d/%d/%d/archivelist/year-%d,month-%d,starttime-%d.cms" % ( year, month, day, year, month, start) print(url) resp = requests.get(url) doc = lxml.html.fromstring(resp.content) rows = [] for el in doc.xpath('/html/body/div[1]/table[2]//tr[2]//a'): link = el.attrib.get('href') if 'timesofindia.indiatimes.com' not in link: continue text = el.text rows.append({ "date": cd.strftime("%Y-%m-%d"), "text": text, "link": link, "source": "Times of India" }) pandas.DataFrame(rows).to_csv(_path, index=False, encoding="utf8")
def course_descriptor_inheritance_check(self, descriptor, from_date_string, unicorn_color, course_run=RUN): """ Checks to make sure that metadata inheritance on a course descriptor is respected. """ # pylint: disable=protected-access print((descriptor, descriptor._field_data)) self.assertEqual(descriptor.due, ImportTestCase.date.from_json(from_date_string)) # Check that the child inherits due correctly child = descriptor.get_children()[0] self.assertEqual(child.due, ImportTestCase.date.from_json(from_date_string)) # need to convert v to canonical json b4 comparing self.assertEqual( ImportTestCase.date.to_json(ImportTestCase.date.from_json(from_date_string)), child.xblock_kvs.inherited_settings['due'] ) # Now export and check things file_system = OSFS(mkdtemp()) descriptor.runtime.export_fs = file_system.makedir(u'course', recreate=True) node = etree.Element('unknown') descriptor.add_xml_to_node(node) # Check that the exported xml is just a pointer print(("Exported xml:", etree.tostring(node))) self.assertTrue(is_pointer_tag(node)) # but it's a special case course pointer self.assertEqual(node.attrib['course'], COURSE) self.assertEqual(node.attrib['org'], ORG) # Does the course still have unicorns? with descriptor.runtime.export_fs.open(u'course/{course_run}.xml'.format(course_run=course_run)) as f: course_xml = etree.fromstring(f.read()) self.assertEqual(course_xml.attrib['unicorn'], unicorn_color) # the course and org tags should be _only_ in the pointer self.assertNotIn('course', course_xml.attrib) self.assertNotIn('org', course_xml.attrib) # did we successfully strip the url_name from the definition contents? self.assertNotIn('url_name', course_xml.attrib) # Does the chapter tag now have a due attribute? # hardcoded path to child with descriptor.runtime.export_fs.open(u'chapter/ch.xml') as f: chapter_xml = etree.fromstring(f.read()) self.assertEqual(chapter_xml.tag, 'chapter') self.assertNotIn('due', chapter_xml.attrib)
def cmd_get(self, *params, **options): try: url = params[0] except IndexError: print "get <url>" return media_fs = OSFS(settings.MEDIA_ROOT) media_fs.makedir('favicons', allow_recreate=True) favicon_path = url_to_path(url) favicon_fs = media_fs.makeopendir(favicon_path, recursive=True) orig_favicon_fs = favicon_fs.makeopendir('originals') output_path = orig_favicon_fs.getsyspath('orig') convert_path = orig_favicon_fs.getsyspath('icon.png') try: icon_info = download_favicon(url, output_path, convert_path) except URLError, e: print "ERROR: %s (%s)" % (e, url) return
def scrape_hindu_new(config): # 2006 onwards new archive page sy, ey, sm, em, sd, ed = config ofs = OSFS("archives/hindu", create=True) for year in range(sy, ey + 1): yfs = ofs.makedir(str(year), recreate=True) for month in range(sm, em + 1): mfs = yfs.makedir(str(month), recreate=True) for day in range(sd, ed + 1): try: cd = datetime(year, month, day) except ValueError: continue dfs = mfs.makedir(str(day), recreate=True) _file = cd.strftime("%Y-%m-%d_manifest.csv") _path = dfs.getsyspath(_file) if dfs.exists(_file): continue url = "https://www.thehindu.com/archive/print/%d/%02d/%02d/" % ( year, month, day) print(url) resp = requests.get(url) doc = lxml.html.fromstring(resp.content) rows = [] for el in doc.xpath("//ul[@class='archive-list']//li//a"): link = el.attrib.get('href') text = el.text rows.append({ "date": cd.strftime("%Y-%m-%d"), "text": text, "link": link, "section": extract_section_hindu(link), "source": "The Hindu" }) pandas.DataFrame(rows).to_csv(_path, index=False, encoding="utf8")
def cmd_render(self, *params, **options): icon_sizes = ','.join(str(s) for s in sorted(settings.DESKTOP_FORCE_ICON_SIZES)) num_rendered = 0 from linkstop.threadpool import ThreadPool thread_pool = ThreadPool(3, 6) try: max_renders = int(params[0]) except IndexError: max_renders = None qs = FavIcon.objects.filter(rendered=False).order_by('pk') media_fs = OSFS(settings.MEDIA_ROOT) media_fs.makedir('favicons', allow_recreate=True) try: for favicon in qs: original_sizes = favicon.get_original_sizes() if not original_sizes: continue remaining_sizes = sorted(set(settings.DESKTOP_FORCE_ICON_SIZES).difference(favicon.get_sizes())) for size in remaining_sizes: print "Rendering %ix%i icon" % (size, size) image_path = os.path.join( settings.MEDIA_ROOT, url_to_path(favicon.url), 'icon%i.png' % original_sizes[-1] ) output_path = get_size_path(favicon.url, size) thread_pool.job( render, (size, size), image_path, output_path, settings.FAVICON_POV_SCENE ) favicon.sizes = icon_sizes favicon.rendered = True favicon.save() #favicon_path = url_to_path(favicon.url) #favicon_fs = media_fs.makeopendir(favicon_path, recursive=True) favicon_fs = OSFS(get_icon_directory(favicon.url), create=True) favicon.export(favicon_fs.open('scan.pik', 'w')) #pickle_path = favicon_fs.getsyspath('scan.pik') num_rendered += 1 if max_renders is not None and num_rendered >= max_renders: break finally: thread_pool.flush_quit() print "%i icon sets rendered" % num_rendered
def scrape_hindu_old(config): # till 2005 -- different view for archives sy, ey, sm, em, sd, ed = config ofs = OSFS("archives/hindu", create=True) for year in range(sy, ey + 1): yfs = ofs.makedir(str(year), recreate=True) for month in range(sm, em + 1): mfs = yfs.makedir(str(month), recreate=True) for day in range(sd, ed + 1): try: cd = datetime(year, month, day) except ValueError: continue dfs = mfs.makedir(str(day), recreate=True) _file = cd.strftime("%Y-%m-%d_manifest.csv") _path = dfs.getsyspath(_file) if dfs.exists(_file): continue url = "https://www.thehindu.com/%d/%02d/%02d/99hdline.htm" % ( year, month, day) resp = requests.get(url) doc = lxml.html.fromstring(resp.content) print(url) if not resp.status_code == requests.codes.ok: continue sectionhead = None rows = [] sel = doc.xpath('//td[@width=380]')[0] for child in sel.getchildren(): try: content = child.text_content() except ValueError: continue if child.tag in ["table", "p"]: sectionhead = ''.join(child.itertext()).strip() if child.tag in ['div', 'li']: frag = child.xpath(".//a")[0] rows.append({ "date": cd.strftime("%Y-%m-%d"), "text": frag.text_content(), "link": urljoin(url, frag.attrib['href']), "section": sectionhead, "source": "The Hindu" }) sh = child.xpath(".//table") if sh: sectionhead = ''.join(sh[0].itertext()).strip() print(url, len(rows)) pandas.DataFrame(rows).to_csv(_path, index=False, encoding="utf8")
def create_production(destination, backups, script_entry_path, production_path): """Put the staging version to production hosted at register.geostandaarden.nl """ ## TODO: feed this function absolute paths print "Building production..." logging.info("Building production...") production = OSFS(production_path) # if production.exists(backups) == False: # production.makedir(backups) # copy newly baked register/staging to production directory # NOTE: only build paths within script_dir are currently supported call ('cp -r %s %s' % (ospath.join(build_path, destination), ospath.join(production_path, destination + '-new')), shell=True) # production.copydir('%s/%s/%s' % (script_dir, build_path, destination), destination + '-new', overwrite=True) if production.exists(destination) == True: # server refuses to recursively remove register/staging # hence we excplicitly remove symbolic link to staging try: production.remove('%s/staging/staging' % destination) except ResourceNotFoundError: print "Warning, %s/staging/staging not found..." % destination try: production.removedir('%s/staging' % destination) except ResourceNotFoundError: print "Warning, %s/staging not found..." % destination backup_dir = time.strftime('%Y-%m-%d-%H-%M-%S') # if production.exists('backups/%s' % backup_dir): # production.removedir('backups/%s' % backup_dir, force=True) production.copydir(destination, '%s/%s' % (backups, backup_dir), overwrite=True) try: production.movedir(destination, destination + '-old', overwrite=True) except ResourceNotFoundError: pass production.movedir(destination + '-new', destination, overwrite=True) # create symbolic link to standalone staging directory # fails if production is built first... production.makedir('%s/staging' % destination) call('cd %s; ln -s %s' % (ospath.join(production_path, destination, 'staging'), ospath.join(production_path, 'staging')), shell=True) call('cd %s; ln -s %s' % (ospath.join(production_path, destination), ospath.join(script_entry_path, 'log.txt')), shell=True) try: production.removedir(destination + '-old', force=True) except ResourceNotFoundError: pass call('chmod -R a+rx %s/%s' % (production_path, destination), shell=True) print "Done building production..." logging.info("Production built successfully!")
mail.settings.server = settings.email_server mail.settings.sender = settings.email_sender mail.settings.login = settings.email_login ## configure auth policy auth.settings.controller = 'default' auth.settings.mailer = mail auth.settings.registration_requires_verification = True auth.settings.registration_requires_approval = True auth.settings.reset_password_requires_verification = True auth.settings.create_user_groups = False auth.settings.actions_disabled.append('register') osFileServer = OSFS(settings.home_dir) if not osFileServer.exists(settings.creator_dir): osFileServer.makedir(settings.creator_dir) if not osFileServer.exists(settings.product_image_dir): osFileServer.makedir(settings.product_image_dir) ## Google Api Key GOOGLE_API_KEY = "AIzaSyBFA3zO-fDW6iVg11fMqf6MANE4AwB1xRU" GCM_SEND_HOST = "android.googleapis.com" GCM_SEND_URL = "/gcm/send" db.define_table('clsb_config', Field('config_key', type='string', unique=True, notnull=True), Field('config_value', type='text')) db.define_table( 'clsb20_encrypt_product', Field('product_code', type='string', unique=True, notnull=True), Field('product_path', type='string', notnull=True),
def test_export_roundtrip(self, course_dir, mock_get): # Patch network calls to retrieve the textbook TOC mock_get.return_value.text = dedent(""" <?xml version="1.0"?><table_of_contents> <entry page="5" page_label="ii" name="Table of Contents"/> </table_of_contents> """).strip() root_dir = path(self.temp_dir) print "Copying test course to temp dir {0}".format(root_dir) data_dir = path(DATA_DIR) shutil.copytree(data_dir / course_dir, root_dir / course_dir) print "Starting import" initial_import = XMLModuleStore(root_dir, source_dirs=[course_dir], xblock_mixins=(XModuleMixin,)) courses = initial_import.get_courses() self.assertEquals(len(courses), 1) initial_course = courses[0] # export to the same directory--that way things like the custom_tags/ folder # will still be there. print "Starting export" file_system = OSFS(root_dir) initial_course.runtime.export_fs = file_system.makedir(course_dir, recreate=True) root = lxml.etree.Element('root') initial_course.add_xml_to_node(root) with initial_course.runtime.export_fs.open('course.xml', 'wb') as course_xml: lxml.etree.ElementTree(root).write(course_xml, encoding='utf-8') print "Starting second import" second_import = XMLModuleStore(root_dir, source_dirs=[course_dir], xblock_mixins=(XModuleMixin,)) courses2 = second_import.get_courses() self.assertEquals(len(courses2), 1) exported_course = courses2[0] print "Checking course equality" # HACK: filenames change when changing file formats # during imports from old-style courses. Ignore them. strip_filenames(initial_course) strip_filenames(exported_course) self.assertTrue(blocks_are_equivalent(initial_course, exported_course)) self.assertEquals(initial_course.id, exported_course.id) course_id = initial_course.id print "Checking key equality" self.assertItemsEqual( initial_import.modules[course_id].keys(), second_import.modules[course_id].keys() ) print "Checking module equality" for location in initial_import.modules[course_id].keys(): print("Checking", location) self.assertTrue(blocks_are_equivalent( initial_import.modules[course_id][location], second_import.modules[course_id][location] ))
from fs.osfs import OSFS from fs.errors import ResourceNotFoundError import settings as s from backend import fetch_repo, deploy_register, build_register from utils import load_repos root_fs = OSFS(s.root_path) root_fs.makedir(s.build_path, recursive=True, allow_recreate=True) build_fs = OSFS(s.build_path) build_fs.makedir(s.sources_path, allow_recreate=True) build_fs.makedir(s.register_path, allow_recreate=True) # create production directory if needed try: production_fs = OSFS(s.production_path) except ResourceNotFoundError: # grab production dir's parent dir path = s.production_path.split('/')[-2] print path production_fs = OSFS(s.production_path[:len(s.production_path) - (len(path) + 1)]).makeopendir(path) print production_fs if not production_fs.exists(s.backups_path): production_fs.makedir(s.backups_path) # fetch repos from GitHub for repo in load_repos(s.repos_path)[0].values(): print 'Fetching %s for the first time' % repo['id'] fetch_repo(root_fs, repo['id'], repo['url']) build_register(repo['id'])
from fs.memoryfs import MemoryFS from fs.expose import fuse fs = MemoryFS() # create an in memory file system fs.createfile('filename.txt') # creating an empty file fs.setcontents('filename.txt', 'contents of file') # putting content into the file. from fs.osfs import OSFS home_fs = OSFS('/') # home_fs.makedir( '/home/dave/scratch/ramdrive', allow_recreate=True ) # have to make a directory for us to mount our memory file system on. mp = fuse.mount( fs, '/home/dave/scratch/ramdrive' ) # exposes fs to everything else on machine. (ie: other system calls can see these files) mp.path # in case you need the path to the files created. mp.unmount() # files are no longer being exposed via fuse home_fs.removedir('/home/dave/scratch/ramdrive/' ) #remove the real file system directory when done. fs.remove('filename.txt') home_fs.close() fs.close() # creating a ramdrive like this wont work for my desired task, as other external applications cannot write to the directory. They only have read access.
def test_export_roundtrip(self, course_dir, mock_get): # Patch network calls to retrieve the textbook TOC mock_get.return_value.text = dedent(""" <?xml version="1.0"?><table_of_contents> <entry page="5" page_label="ii" name="Table of Contents"/> </table_of_contents> """).strip() root_dir = path(self.temp_dir) print "Copying test course to temp dir {0}".format(root_dir) data_dir = path(DATA_DIR) shutil.copytree(data_dir / course_dir, root_dir / course_dir) print "Starting import" initial_import = XMLModuleStore(root_dir, source_dirs=[course_dir], xblock_mixins=(XModuleMixin, )) courses = initial_import.get_courses() self.assertEquals(len(courses), 1) initial_course = courses[0] # export to the same directory--that way things like the custom_tags/ folder # will still be there. print "Starting export" file_system = OSFS(root_dir) initial_course.runtime.export_fs = file_system.makedir(course_dir, recreate=True) root = lxml.etree.Element('root') initial_course.add_xml_to_node(root) with initial_course.runtime.export_fs.open('course.xml', 'wb') as course_xml: lxml.etree.ElementTree(root).write(course_xml, encoding='utf-8') print "Starting second import" second_import = XMLModuleStore(root_dir, source_dirs=[course_dir], xblock_mixins=(XModuleMixin, )) courses2 = second_import.get_courses() self.assertEquals(len(courses2), 1) exported_course = courses2[0] print "Checking course equality" # HACK: filenames change when changing file formats # during imports from old-style courses. Ignore them. strip_filenames(initial_course) strip_filenames(exported_course) self.assertTrue(blocks_are_equivalent(initial_course, exported_course)) self.assertEquals(initial_course.id, exported_course.id) course_id = initial_course.id print "Checking key equality" self.assertItemsEqual(initial_import.modules[course_id].keys(), second_import.modules[course_id].keys()) print "Checking module equality" for location in initial_import.modules[course_id].keys(): print("Checking", location) self.assertTrue( blocks_are_equivalent( initial_import.modules[course_id][location], second_import.modules[course_id][location]))
from fs.errors import ResourceNotFoundError import settings as s from backend import fetch_repo, create_production, build from utils import load_repos root_fs = OSFS(s.root_path) build_fs = root_fs.makeopendir(s.build_path) build_fs.makedir(s.sources_path) build_fs.makedir(s.staging_path) build_fs.makedir(s.register_path) # create production directory if needed try: production_fs = OSFS(s.production_path) except ResourceNotFoundError: # grap production dir's parent dir path = s.production_path.split('/')[-2] print path production_fs = OSFS(s.production_path[:len(s.production_path) - (len(path) + 1)]).makeopendir(path) print production_fs if production_fs.exists(s.backups_path) == False: production_fs.makedir(s.backups_path) # fetch repos from GitHub for repo in load_repos(s.repos_path)[0].values(): print 'Fetching %s for the first time' % repo['id'] fetch_repo(root_fs, s.sources_path, repo['id'], repo['url'], s.build_path) build(s.sources_path, s.register_path, root_fs, repo['id']) create_production(s.register_path, s.backups_path, s.script_entry_path, s.production_path)