def export(self):
        """
        Perform the export given the parameters handed to this class at init.
        """
        with self.modulestore.bulk_operations(self.courselike_key):

            fsm = OSFS(self.root_dir)
            root = lxml.etree.Element("unknown")

            # export only the published content
            with self.modulestore.branch_setting(ModuleStoreEnum.Branch.published_only, self.courselike_key):
                courselike = self.get_courselike()
                export_fs = courselike.runtime.export_fs = fsm.makeopendir(self.target_dir)

                # change all of the references inside the course to use the xml expected key type w/o version & branch
                xml_centric_courselike_key = self.get_key()
                adapt_references(courselike, xml_centric_courselike_key, export_fs)
                courselike.add_xml_to_node(root)

            # Make any needed adjustments to the root node.
            self.process_root(root, export_fs)

            # Process extra items-- drafts, assets, etc
            root_courselike_dir = self.root_dir + "/" + self.target_dir
            self.process_extra(root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs)

            # Any last pass adjustments
            self.post_process(root, export_fs)
Beispiel #2
0
def detect_format(source):
    fs = OSFS(source)
    if fs.walkfiles('/', '*.morph'):
        return 'baserock-morphologies'
    if fs.walkfiles('/', '*.cida'):
        return 'cida-definitions'
    return None
def build_folders(source, destination_temp, standard, root):
    """Transform the repos' folder structure to that of the register
    and build HTML pages for each standard.
    """

    source_fs = OSFS(source)

    print "Processing %s ... " % standard['id']
    standard_fs = source_fs.opendir(standard['id'])

    # list all artifacts of a standard
    artifacts = standard_fs.listdir(dirs_only=True)
    if '.git' in artifacts: artifacts.remove(".git")

    for artifact in artifacts:
        # check whether artifact folder exists in destination_temp 
        if root.exists('%s/%s' % (destination_temp, artifact)) == False:
            root.makedir('%s/%s' % (destination_temp, artifact))

        # copy standard folders from source to destination_temp in desired structure
        root.copydir('%s/%s/%s' % (source, standard['id'], artifact),  '%s/%s/%s' % (destination_temp, artifact, standard['id']))

    html = create_standard_webpage(standard, artifacts)

    # check whether register/standard exists
    if root.exists('%s/%s' % (destination_temp, standard['id'])) == False:
        root.makedir('%s/%s' % (destination_temp, standard['id']))
    
    # write standard HTML page to register/standard/index.html
    with codecs.open('%s/%s/index.html' % (destination_temp, standard['id']), 'w', encoding='utf8') as f:
        f.write(html)

    # copy web assets
    root.copydir('web/assets', '%s/r' % destination_temp, overwrite=True)
Beispiel #4
0
    def cmd_scan(self, *params, **options):

        favicons_fs = OSFS(settings.MEDIA_ROOT).makeopendir('favicons')

        for path in favicons_fs.walkdirs(wildcard="???"):
            icon_fs = favicons_fs.opendir(path)

            if icon_fs.isfile('scan.pik'):

                icon_sizes = ','.join(str(s) for s in sorted(int(p.split('.')[0][4:]) for p in icon_fs.listdir(wildcard='icon*.png')))
                if not icon_sizes:
                    continue

                favicon, created = FavIcon.import_(icon_fs.open('scan.pik'))
                if favicon is None:
                    continue

                old_sizes = favicon.sizes
                favicon.sizes = icon_sizes
                favicon.update()
                favicon.save()

                if created:
                    print "New object:\t", path
                else:
                    print path
                if old_sizes != favicon.sizes:
                    print "Icon sizes changed!\t", path
                    favicon.export(icon_fs.open('scan.pik', 'w'))
Beispiel #5
0
class Recent(Files):
    def __init__(self):
        super(Recent, self).__init__()
        self._paths = []
        # http://python.6.n6.nabble.com/Access-Most-Recently-Used-MRU-entries-td1953541.html
        self.mru_path = shell.SHGetSpecialFolderPath(0, shellcon.CSIDL_RECENT, 0)
        self.mrufs = OSFS(self.mru_path)
        self.watcher = None

    def setup(self):
        self._update_path()
        self.watcher = self.mrufs.add_watcher(lambda e: self._update_path())

    def _update_path(self):
        self._paths = sorted(
            [os.path.join(self.mru_path, f) for f in self.mrufs.listdir()], key=os.path.getmtime, reverse=True
        )
        self.path_list_changed()

    def teardown(self):
        if self.watcher:
            self.mrufs.del_watcher(self.watcher)

    @property
    def paths(self):
        return self._paths

    @property
    def name(self):
        return "re"

    def lit(self, *args, **kargs):
        return super(Recent, self).lit(*args, **kargs)
Beispiel #6
0
def deploy_register():
    """Put the staging version to production hosted at 
    register.geostandaarden.nl
    """

    ## TODO: feed this function absolute paths

    print "Deploying production..."
    logging.info("Deploying production...")

    production = OSFS(production_path)

    # NOTE: only build paths within script_dir are currently supported
    call ('cp -r %s %s' % (ospath.join(build_path, register_path), ospath.join(production_path, register_path + '-new')), shell=True)

    if production.exists(register_path):
        backup_dir = time.strftime('%Y-%m-%d-%H-%M-%S')

        production.copydir(register_path, '%s/%s' % (backups_path, backup_dir), overwrite=True)
        
        try:
            production.movedir(register_path, register_path + '-old', overwrite=True)
        except ResourceNotFoundError:
            pass

    production.movedir(register_path + '-new', register_path, overwrite=True)

    try:
        production.removedir(register_path + '-old', force=True)
    except ResourceNotFoundError:
        pass

    call('chmod -R a+rx %s/%s' % (production_path, register_path), shell=True)

    logging.info("Production built successfully!")
Beispiel #7
0
    def export(self):
        """
        Perform the export given the parameters handed to this class at init.
        """
        with self.modulestore.bulk_operations(self.courselike_key):
            # depth = None: Traverses down the entire course structure.
            # lazy = False: Loads and caches all block definitions during traversal for fast access later
            #               -and- to eliminate many round-trips to read individual definitions.
            # Why these parameters? Because a course export needs to access all the course block information
            # eventually. Accessing it all now at the beginning increases performance of the export.
            fsm = OSFS(self.root_dir)
            courselike = self.get_courselike()
            export_fs = courselike.runtime.export_fs = fsm.makeopendir(self.target_dir)
            root_courselike_dir = self.root_dir + '/' + self.target_dir

            root = lxml.etree.Element('unknown')  # pylint: disable=no-member

            # export only the published content
            with self.modulestore.branch_setting(ModuleStoreEnum.Branch.published_only, self.courselike_key):
                # change all of the references inside the course to use the xml expected key type w/o version & branch
                xml_centric_courselike_key = self.get_key()
                adapt_references(courselike, xml_centric_courselike_key, export_fs)
                courselike.add_xml_to_node(root)

            # Make any needed adjustments to the root node.
            self.process_root(root, export_fs)

            # Process extra items-- drafts, assets, etc
            self.process_extra(root, courselike, root_courselike_dir, xml_centric_courselike_key, export_fs)

            # Any last pass adjustments
            self.post_process(root, export_fs)
Beispiel #8
0
def write_chunk_metafile(defs, chunk):
    '''Writes a chunk .meta file to the baserock dir of the chunk

    The split rules are used to divide up the installed files for the chunk
    into artifacts in the 'products' list

    '''
    app.log(chunk['name'], 'splitting chunk')
    rules, splits = compile_rules(defs, chunk)

    install_dir = chunk['install']
    fs = OSFS(install_dir)
    files = fs.walkfiles('.', search='depth')
    dirs = fs.walkdirs('.', search='depth')

    for path in files:
        for artifact, rule in rules:
            if rule.match(path):
                splits[artifact].append(path)
                break

    all_files = [a for x in splits.values() for a in x]
    for path in dirs:
        if not any(map(lambda y: y.startswith(path),
                   all_files)) and path != '':
            for artifact, rule in rules:
                if rule.match(path) or rule.match(path + '/'):
                    splits[artifact].append(path)
                    break

    write_metafile(rules, splits, chunk)
def build_folders(source, destination_temp, standards, root):
    print "Building register..."

    source_fs = OSFS(source)

    # iterate over all standards in source directory
    for standard in standards:
        print "Processing %s ... " % standard['id']
        standard_fs = source_fs.opendir(standard['id'])

        # list all sub standards of a standard
        artifacts = standard_fs.listdir(dirs_only=True)
        if '.git' in artifacts: artifacts.remove(".git")

        for artifact in artifacts:
            # check whether artifact folder exists in destination_temp 
            if root.exists('%s/%s' % (destination_temp, artifact)) == False:
                root.makedir('%s/%s' % (destination_temp, artifact))
                
            # copy standard folders from source to destination_temp in desired structure
            root.copydir('%s/%s/%s' % (source, standard['id'], artifact),  '%s/%s/%s' % (destination_temp, artifact, standard['id']))

        # create standard HTML page
        html = create_standard_webpage(standard, artifacts)

        # check whether standard folder exists in register root
        if root.exists('%s/%s' % (destination_temp, standard['id'])) == False:
            root.makedir('%s/%s' % (destination_temp, standard['id']))
        
        # write standard HTML page to register/standard/index.html
        with codecs.open('%s/%s/index.html' % (destination_temp, standard['id']), 'w', encoding='utf8') as f:
            f.write(html)
def main():
    parser = argparse.ArgumentParser(description='Create free editor.slf')
    parser.add_argument('original', help="Original editor.slf")
    parser.add_argument(
        '-o',
        '--output',
        default='build/editor.slf',
        help="Where to store the created slf file"
    )
    args = parser.parse_args()

    if not os.path.exists(os.path.dirname(args.output)):
        os.makedirs(os.path.dirname(args.output))

    target_fs = BufferedSlfFS()
    replacement_fs = OSFS('editor')
    with open(args.original, 'rb') as source_file:
        source_fs = SlfFS(source_file)

        target_fs.library_name = source_fs.library_name
        target_fs.library_path = source_fs.library_path
        target_fs.version = source_fs.version
        target_fs.sort = source_fs.sort

        for directory in source_fs.walkdirs():
            if directory == '/':
                continue
            target_fs.makedir(directory)
        for file in source_fs.walkfiles():
            base_name, _ = os.path.splitext(file)
            with source_fs.open(file, 'rb') as source, target_fs.open(file, 'wb') as target:
                ja2_images = load_8bit_sti(source)
                replacement_path = base_name + '.gif'
                replacement_file_exists = replacement_fs.isfile(replacement_path)
                replacement_dir = file
                replacement_dir_exists = replacement_fs.isdir(replacement_dir)
                if len(ja2_images) == 1 and replacement_file_exists:
                    print("Replacing {0} with {1}".format(file, replacement_path))
                    replacement_img = Image.open(replacement_fs.open(replacement_path, 'rb'))
                    ja2_images._palette = replacement_img.palette
                    ja2_images.images[0]._image = replacement_img
                elif len(ja2_images) > 1 and replacement_dir_exists:
                    for i in range(len(ja2_images)):
                        replacement_path = replacement_dir + '/{}.gif'.format(i)

                        print("Replacing {0} with {1}".format(file, replacement_path))
                        replacement_img = Image.open(replacement_fs.open(replacement_path, 'rb'))
                        ja2_images._palette = replacement_img.palette
                        ja2_images.images[i]._image = replacement_img
                else:
                    print("Replacing {0} with nothingness".format(file))
                    for sub_image in ja2_images.images:
                        width, height = sub_image.image.size
                        sub_image._image = Image.new('P', (width, height), color=54)

                save_8bit_sti(ja2_images, target)

    with open(args.output, 'wb') as target_file:
        target_fs.save(target_file)
    def check_export_roundtrip(self, data_dir, course_dir, mock_get):

        # Patch network calls to retrieve the textbook TOC
        mock_get.return_value.text = dedent("""
            <?xml version="1.0"?><table_of_contents>
            <entry page="5" page_label="ii" name="Table of Contents"/>
            </table_of_contents>
        """).strip()

        root_dir = path(self.temp_dir)
        print("Copying test course to temp dir {0}".format(root_dir))

        data_dir = path(data_dir)
        shutil.copytree(data_dir / course_dir, root_dir / course_dir)

        print("Starting import")
        initial_import = XMLModuleStore(root_dir, course_dirs=[course_dir])

        courses = initial_import.get_courses()
        self.assertEquals(len(courses), 1)
        initial_course = courses[0]

        # export to the same directory--that way things like the custom_tags/ folder
        # will still be there.
        print("Starting export")
        fs = OSFS(root_dir)
        export_fs = fs.makeopendir(course_dir)

        xml = initial_course.export_to_xml(export_fs)
        with export_fs.open('course.xml', 'w') as course_xml:
            course_xml.write(xml)

        print("Starting second import")
        second_import = XMLModuleStore(root_dir, course_dirs=[course_dir])

        courses2 = second_import.get_courses()
        self.assertEquals(len(courses2), 1)
        exported_course = courses2[0]

        print("Checking course equality")

        # HACK: filenames change when changing file formats
        # during imports from old-style courses.  Ignore them.
        strip_filenames(initial_course)
        strip_filenames(exported_course)

        self.assertEquals(initial_course, exported_course)
        self.assertEquals(initial_course.id, exported_course.id)
        course_id = initial_course.id

        print("Checking key equality")
        self.assertEquals(sorted(initial_import.modules[course_id].keys()),
                          sorted(second_import.modules[course_id].keys()))

        print("Checking module equality")
        for location in initial_import.modules[course_id].keys():
            print("Checking", location)
            self.assertEquals(initial_import.modules[course_id][location],
                              second_import.modules[course_id][location])
Beispiel #12
0
    def run(self):
        args = self.args
        device_class = args.device_class
        conf_path = constants.CONF_PATH

        if not os.path.exists(conf_path):
            sys.stderr.write('{} does not exist.\n'.format(conf_path))
            sys.stderr.write("please run 'dataplicity init' first\n")
            return -1

        print("reading conf from {}".format(conf_path))
        cfg = settings.read(conf_path)
        serial = cfg.get('device', 'serial')
        auth_token = cfg.get('device', 'auth')
        server_url = cfg.get('server', 'url', constants.SERVER_URL)

        remote = jsonrpc.JSONRPC(server_url)

        print("downloading firmware...")
        with remote.batch() as batch:
            batch.call_with_id('register_result',
                               'device.register',
                               auth_token=auth_token,
                               name=args.name or serial,
                               serial=serial,
                               device_class_name=device_class)
            batch.call_with_id('auth_result',
                               'device.check_auth',
                               device_class=device_class,
                               serial=serial,
                               auth_token=auth_token)
            batch.call_with_id('firmware_result',
                               'device.get_firmware')
        batch.get_result('register_result')
        batch.get_result('auth_result')
        fw = batch.get_result('firmware_result')

        if not fw['firmware']:
            sys.stderr.write('no firmware available!\n')
            return -1
        version = fw['version']

        firmware_bin = b64decode(fw['firmware'])
        firmware_file = BytesIO(firmware_bin)
        firmware_fs = ZipFS(firmware_file)

        dst_fs = OSFS(constants.FIRMWARE_PATH, create=True)

        firmware.install(device_class,
                         version,
                         firmware_fs,
                         dst_fs)

        fw_path = dst_fs.getsyspath('/')
        print("installed firmware {} to {}".format(version, fw_path))

        firmware.activate(device_class, version, dst_fs)
        print("activated {}".format(version))
def get_artifacts(root, build_path, sources_path, standard):
    source_fs = OSFS(ospath.join(root.getsyspath('.'),  build_path, sources_path))

    # print "Processing %s ... " % standard['id']
    standard_fs = source_fs.opendir(standard['id'])
    artifacts = standard_fs.listdir(dirs_only=True)
    if '.git' in artifacts: artifacts.remove(".git")

    return artifacts
Beispiel #14
0
    def check_export_roundtrip(self, data_dir, course_dir):
        root_dir = path(self.temp_dir)
        print("Copying test course to temp dir {0}".format(root_dir))

        data_dir = path(data_dir)
        shutil.copytree(data_dir / course_dir, root_dir / course_dir)

        print("Starting import")
        initial_import = XMLModuleStore(root_dir, course_dirs=[course_dir])

        courses = initial_import.get_courses()
        self.assertEquals(len(courses), 1)
        initial_course = courses[0]

        # export to the same directory--that way things like the custom_tags/ folder
        # will still be there.
        print("Starting export")
        fs = OSFS(root_dir)
        export_fs = fs.makeopendir(course_dir)

        xml = initial_course.export_to_xml(export_fs)
        with export_fs.open('course.xml', 'w') as course_xml:
            course_xml.write(xml)

        print("Starting second import")
        second_import = XMLModuleStore(root_dir, course_dirs=[course_dir])

        courses2 = second_import.get_courses()
        self.assertEquals(len(courses2), 1)
        exported_course = courses2[0]

        print("Checking course equality")

        # HACK: filenames change when changing file formats
        # during imports from old-style courses.  Ignore them.
        strip_filenames(initial_course)
        strip_filenames(exported_course)

        self.assertEquals(initial_course, exported_course)
        self.assertEquals(initial_course.id, exported_course.id)
        course_id = initial_course.id

        print("Checking key equality")
        self.assertEquals(sorted(initial_import.modules[course_id].keys()),
                          sorted(second_import.modules[course_id].keys()))

        print("Checking module equality")
        for location in initial_import.modules[course_id].keys():
            print("Checking", location)
            if location.category == 'html':
                print(
                    "Skipping html modules--they can't import in"
                    " final form without writing files..."
                )
                continue
            self.assertEquals(initial_import.modules[course_id][location],
                              second_import.modules[course_id][location])
def export_to_xml(modulestore, contentstore, course_location, root_dir, course_dir, draft_modulestore=None):

    course = modulestore.get_item(course_location)

    fs = OSFS(root_dir)
    export_fs = fs.makeopendir(course_dir)

    xml = course.export_to_xml(export_fs)
    with export_fs.open("course.xml", "w") as course_xml:
        course_xml.write(xml)

    # export the static assets
    contentstore.export_all_for_course(course_location, root_dir + "/" + course_dir + "/static/")

    # export the static tabs
    export_extra_content(export_fs, modulestore, course_location, "static_tab", "tabs", ".html")

    # export the custom tags
    export_extra_content(export_fs, modulestore, course_location, "custom_tag_template", "custom_tags")

    # export the course updates
    export_extra_content(export_fs, modulestore, course_location, "course_info", "info", ".html")

    # export the 'about' data (e.g. overview, etc.)
    export_extra_content(export_fs, modulestore, course_location, "about", "about", ".html")

    # export the grading policy
    policies_dir = export_fs.makeopendir("policies")
    course_run_policy_dir = policies_dir.makeopendir(course.location.name)
    with course_run_policy_dir.open("grading_policy.json", "w") as grading_policy:
        grading_policy.write(dumps(course.grading_policy))

    # export all of the course metadata in policy.json
    with course_run_policy_dir.open("policy.json", "w") as course_policy:
        policy = {"course/" + course.location.name: own_metadata(course)}
        course_policy.write(dumps(policy))

    # export draft content
    # NOTE: this code assumes that verticals are the top most draftable container
    # should we change the application, then this assumption will no longer
    # be valid
    if draft_modulestore is not None:
        draft_verticals = draft_modulestore.get_items(
            [None, course_location.org, course_location.course, "vertical", None, "draft"]
        )
        if len(draft_verticals) > 0:
            draft_course_dir = export_fs.makeopendir("drafts")
            for draft_vertical in draft_verticals:
                parent_locs = draft_modulestore.get_parent_locations(draft_vertical.location, course.location.course_id)
                # Don't try to export orphaned items.
                if len(parent_locs) > 0:
                    logging.debug("parent_locs = {0}".format(parent_locs))
                    draft_vertical.xml_attributes["parent_sequential_url"] = Location(parent_locs[0]).url()
                    sequential = modulestore.get_item(Location(parent_locs[0]))
                    index = sequential.children.index(draft_vertical.location.url())
                    draft_vertical.xml_attributes["index_in_children_list"] = str(index)
                    draft_vertical.export_to_xml(draft_course_dir)
def export_to_xml(modulestore, contentstore, course_location, root_dir, course_dir, draft_modulestore=None):

    course = modulestore.get_item(course_location)

    fs = OSFS(root_dir)
    export_fs = fs.makeopendir(course_dir)

    xml = course.export_to_xml(export_fs)
    with export_fs.open('course.xml', 'w') as course_xml:
        course_xml.write(xml)

    # export the static assets
    contentstore.export_all_for_course(course_location, root_dir + '/' + course_dir + '/static/')

    # export the static tabs
    export_extra_content(export_fs, modulestore, course_location, 'static_tab', 'tabs', '.html')

    # export the custom tags
    export_extra_content(export_fs, modulestore, course_location, 'custom_tag_template', 'custom_tags')

    # export the course updates
    export_extra_content(export_fs, modulestore, course_location, 'course_info', 'info', '.html')

    # export the 'about' data (e.g. overview, etc.)
    export_extra_content(export_fs, modulestore, course_location, 'about', 'about', '.html')

    # export the grading policy
    policies_dir = export_fs.makeopendir('policies')
    course_run_policy_dir = policies_dir.makeopendir(course.location.name)
    with course_run_policy_dir.open('grading_policy.json', 'w') as grading_policy:
        grading_policy.write(dumps(course.grading_policy, cls=EdxJSONEncoder))

    # export all of the course metadata in policy.json
    with course_run_policy_dir.open('policy.json', 'w') as course_policy:
        policy = {'course/' + course.location.name: own_metadata(course)}
        course_policy.write(dumps(policy, cls=EdxJSONEncoder))

    # export draft content
    # NOTE: this code assumes that verticals are the top most draftable container
    # should we change the application, then this assumption will no longer
    # be valid
    if draft_modulestore is not None:
        draft_verticals = draft_modulestore.get_items([None, course_location.org, course_location.course,
                                                       'vertical', None, 'draft'])
        if len(draft_verticals) > 0:
            draft_course_dir = export_fs.makeopendir('drafts')
            for draft_vertical in draft_verticals:
                parent_locs = draft_modulestore.get_parent_locations(draft_vertical.location, course.location.course_id)
                # Don't try to export orphaned items.
                if len(parent_locs) > 0:
                    logging.debug('parent_locs = {0}'.format(parent_locs))
                    draft_vertical.xml_attributes['parent_sequential_url'] = Location(parent_locs[0]).url()
                    sequential = modulestore.get_item(Location(parent_locs[0]))
                    index = sequential.children.index(draft_vertical.location.url())
                    draft_vertical.xml_attributes['index_in_children_list'] = str(index)
                    draft_vertical.export_to_xml(draft_course_dir)
Beispiel #17
0
 def remove_dir(self, path):
     """Remove a folder at a given path."""
     try:
         file_to_delete = os.path.basename(path)
         to_delete_from = OSFS(os.path.dirname(path))
         to_delete_from.removedir(file_to_delete,
                                  recursive=True,
                                  force=True)
     except ResourceNotFoundError:
         raise ArchiverError("Folder %s not found" % path)
    def verify_content_existence(self, modulestore, root_dir, location, dirname, category_name, filename_suffix=''):
        filesystem = OSFS(root_dir / 'test_export')
        self.assertTrue(filesystem.exists(dirname))

        query_loc = Location('i4x', location.org, location.course, category_name, None)
        items = modulestore.get_items(query_loc)

        for item in items:
            filesystem = OSFS(root_dir / ('test_export/' + dirname))
            self.assertTrue(filesystem.exists(item.location.name + filename_suffix))
Beispiel #19
0
def list_photos(folder):
	try:
		photo_fs = OSFS('~/Pictures/Playground/' + folder)
		photos = []

		for photo in photo_fs.walkfiles():
			if is_photo(photo):
				photos.append(Photo(photo, photo_fs))

		return jsonify(photos)
	except ResourceNotFoundError as err:
		return jsonify({'error': unicode(err)})
Beispiel #20
0
    def export(self, location, output_directory):
        content = self.find(location)

        if content.import_path is not None:
            output_directory = output_directory + '/' + os.path.dirname(content.import_path)

        if not os.path.exists(output_directory):
            os.makedirs(output_directory)

        disk_fs = OSFS(output_directory)

        with disk_fs.open(content.name, 'wb') as asset_file:
            asset_file.write(content.data)
Beispiel #21
0
    def cmd_import(self, *params, **options):

        """ Imports an icon set """

        media_fs = OSFS(settings.MEDIA_ROOT)
        iconsets_fs = media_fs.opendir('iconsets')

        try:
            iconset_name = params[0]
            iconsets = iconsets_fs.listdir(wildcard = iconset_name, dirs_only=True)

        except IndexError, e:
            print "<catalog name, or wildcard>"
            return
Beispiel #22
0
def build(fs, settings_path="settings.ini", rebuild=False, archive=None, master_settings=None):
    """Build a project"""
    if isinstance(fs, string_types):
        if '://' in fs:
            fs = fsopendir(fs)
        else:
            fs = OSFS(fs)

    if isinstance(settings_path, string_types):
        settings_path = [settings_path]
    if archive is None:
        archive = Archive(fs)
    context = Context()

    syspath = fs.getsyspath('/', allow_none=True)

    cwd = os.getcwd()

    if syspath is not None:
        os.chdir(syspath)

    try:
        root = context.root
        root['libs'] = archive.libs
        root['apps'] = archive.apps
        root['fs'] = FSWrapper(fs)

        log.debug("reading settings from {}".format(textual_list(settings_path)))
        archive.cfg = SettingsContainer.read(fs, settings_path, master=master_settings)
        root['settings'] = SettingsContainer.from_dict(archive.cfg['settings'])
        startup_path = archive.cfg.get('project', 'startup')
        docs_location = archive.cfg.get('project', 'location')

        archive.init_settings()
        root['console'] = archive.console
        root['debug'] = archive.debug
        root['_rebuild'] = rebuild

        parser = Parser(archive, fs.opendir(docs_location), startup_path)
        doc = parser.parse()

        if doc is None:
            raise errors.StartupFailedError('unable to parse "{}"'.format(startup_path))

        archive.build(doc, fs=fs)

        return archive, context, doc

    finally:
        os.chdir(cwd)
    def course_descriptor_inheritance_check(self, descriptor, from_date_string, unicorn_color, course_run=RUN):
        """
        Checks to make sure that metadata inheritance on a course descriptor is respected.
        """
        # pylint: disable=protected-access
        print((descriptor, descriptor._field_data))
        self.assertEqual(descriptor.due, ImportTestCase.date.from_json(from_date_string))

        # Check that the child inherits due correctly
        child = descriptor.get_children()[0]
        self.assertEqual(child.due, ImportTestCase.date.from_json(from_date_string))
        # need to convert v to canonical json b4 comparing
        self.assertEqual(
            ImportTestCase.date.to_json(ImportTestCase.date.from_json(from_date_string)),
            child.xblock_kvs.inherited_settings['due']
        )

        # Now export and check things
        file_system = OSFS(mkdtemp())
        descriptor.runtime.export_fs = file_system.makedir(u'course', recreate=True)
        node = etree.Element('unknown')
        descriptor.add_xml_to_node(node)

        # Check that the exported xml is just a pointer
        print(("Exported xml:", etree.tostring(node)))
        self.assertTrue(is_pointer_tag(node))
        # but it's a special case course pointer
        self.assertEqual(node.attrib['course'], COURSE)
        self.assertEqual(node.attrib['org'], ORG)

        # Does the course still have unicorns?
        with descriptor.runtime.export_fs.open(u'course/{course_run}.xml'.format(course_run=course_run)) as f:
            course_xml = etree.fromstring(f.read())

        self.assertEqual(course_xml.attrib['unicorn'], unicorn_color)

        # the course and org tags should be _only_ in the pointer
        self.assertNotIn('course', course_xml.attrib)
        self.assertNotIn('org', course_xml.attrib)

        # did we successfully strip the url_name from the definition contents?
        self.assertNotIn('url_name', course_xml.attrib)

        # Does the chapter tag now have a due attribute?
        # hardcoded path to child
        with descriptor.runtime.export_fs.open(u'chapter/ch.xml') as f:
            chapter_xml = etree.fromstring(f.read())
        self.assertEqual(chapter_xml.tag, 'chapter')
        self.assertNotIn('due', chapter_xml.attrib)
Beispiel #24
0
    def cmd_makepreviews(self, *params, **options):

        PREVIEW_ICON_SIZE = 32
        WIDTH_COUNT = 11
        BORDER = 5

        ICON_DIMENSIONS = (BORDER*2 + PREVIEW_ICON_SIZE)

        preview_width = ICON_DIMENSIONS * WIDTH_COUNT

        media_fs = OSFS(settings.MEDIA_ROOT)
        media_fs.makedir('iconsetpreviews', allow_recreate=True)
        previews_fs = media_fs.opendir('iconsetpreviews')

        for catalog in IconCatalog.objects.all():

            for category in catalog.get_categories():

                filename =  "%s.%s.jpg" % (catalog.name, category)

                icons = catalog.icon_set.filter(category=category).order_by('name')
                num_icons = icons.count()

                icons_height_count = (num_icons + WIDTH_COUNT-1) // WIDTH_COUNT
                preview_height = icons_height_count * ICON_DIMENSIONS

                preview_img = Image.new('RGB', (preview_width, preview_height), (255, 255, 255))
                print preview_width, preview_height

                for i, icon in enumerate(icons):

                    y, x = divmod(i, WIDTH_COUNT)

                    pth = icon.path.replace('[SIZE]', str(PREVIEW_ICON_SIZE))

                    icon_pth = media_fs.getsyspath(pth)

                    img = Image.open(icon_pth)
                    if img.size[0] != img.size[1]:
                        img = img.crop((0, 0, PREVIEW_ICON_SIZE, PREVIEW_ICON_SIZE))
                    try:
                        preview_img.paste(img, (x*ICON_DIMENSIONS+BORDER, y*ICON_DIMENSIONS+BORDER), img)
                    except ValueError:
                        preview_img.paste(img, (x*ICON_DIMENSIONS+BORDER, y*ICON_DIMENSIONS+BORDER))


                sys_filename = previews_fs.getsyspath(filename)
                print sys_filename
                preview_img.save(previews_fs.getsyspath(filename), quality=75)
def application(environ, start_response):
    fs = OSFS(join(dirname(__file__), "static"))
    path = environ["PATH_INFO"]      
    if path in ("", "/"):        
        path = "index.html"
    if path == "/getbbcode":
        bbcode = unicode(environ["wsgi.input"].read(), 'utf-8')
        html = render_bbcode(bbcode, clean=True, paragraphs=True, render_unknown_tags=True)
        start_response("200 OK", [("Content-type", "text/html; charset=utf-8")])
        return [html.encode("utf-8")]
    mime_type, _encoding = mimetypes.guess_type(basename(path))
    if not fs.isfile(path):
        start_response("404 NOT FOUND", [])
        return ["Nobody here but us chickens: %s" % path]
    start_response("200 OK", [("Content-type", mime_type)])    
    return [fs.getcontents(path)]
Beispiel #26
0
 def __init__(self):
     super(Recent, self).__init__()
     self._paths = []
     # http://python.6.n6.nabble.com/Access-Most-Recently-Used-MRU-entries-td1953541.html
     self.mru_path = shell.SHGetSpecialFolderPath(0, shellcon.CSIDL_RECENT, 0)
     self.mrufs = OSFS(self.mru_path)
     self.watcher = None
Beispiel #27
0
    def export(self, location, output_directory):
        content = self.find(location)

        filename = content.name
        if content.import_path is not None:
            output_directory = output_directory + '/' + os.path.dirname(content.import_path)

        if not os.path.exists(output_directory):
            os.makedirs(output_directory)

        # Escape invalid char from filename.
        export_name = escape_invalid_characters(name=filename, invalid_char_list=['/', '\\'])

        disk_fs = OSFS(output_directory)

        with disk_fs.open(export_name, 'wb') as asset_file:
            asset_file.write(content.data)
Beispiel #28
0
    def _zip(self, destination=None):
        """Compresse a bagit file."""
        # Removes the final forwardslash if there is one
        destination = destination or cfg["ARCHIVER_TMPDIR"]
        if destination.endswith(os.path.sep):
            destination = destination[:-len(os.path.sep)]
        filename = os.path.join(destination, "{0}.zip".format(self.name))

        # Create and FS object
        with OSFS(self.folder) as to_zip_fs:
            with ZipFS(filename, mode='w') as zip_fs:
                copydir(to_zip_fs, zip_fs, overwrite=True)
            file_to_delete = os.path.basename(self.folder)
            to_delete_from = OSFS(os.path.dirname(self.folder))
            to_delete_from.removedir(file_to_delete, recursive=True,
                                     force=True)
        return filename
Beispiel #29
0
    def deploy(self):
        """Deploy latest firmware"""
        self.log.info("requesting firmware...")
        with self.remote.batch() as batch:
            batch.call_with_id('register_result',
                               'device.register',
                               auth_token=self.auth_token,
                               name=self.name or self.serial,
                               serial=self.serial,
                               device_class_name=self.device_class)
            batch.call_with_id('auth_result',
                               'device.check_auth',
                               device_class=self.device_class,
                               serial=self.serial,
                               auth_token=self.auth_token)
            batch.call_with_id('firmware_result',
                               'device.get_firmware')
        try:
            batch.get_result('register_result')
        except Exception as e:
            self.log.warning(e)
        batch.get_result('auth_result')

        fw = batch.get_result('firmware_result')
        if not fw['firmware']:
            self.log.warning('no firmware available!')
            return False
        version = fw['version']

        firmware_bin = b64decode(fw['firmware'])
        firmware_file = StringIO(firmware_bin)
        firmware_fs = ZipFS(firmware_file)

        dst_fs = OSFS(constants.FIRMWARE_PATH, create=True)

        firmware.install(self.device_class,
                         version,
                         firmware_fs,
                         dst_fs)

        fw_path = dst_fs.getsyspath('/')
        self.log.info("installed firmware {:010} to {}".format(version, fw_path))

        firmware.activate(self.device_class, version, dst_fs)
        self.log.info("activated firmware {:010}".format(version))
Beispiel #30
0
    def view(self, archive, source_fs):
        args = self.args
        import tempfile
        docs_output_path = os.path.join(tempfile.tempdir, '__moyadocs__')
        output_fs = OSFS(docs_output_path, create=True)
        out_path = output_fs.desc('/')

        if args.theme is None:
            from ... import docgen
            theme_path = os.path.join(os.path.dirname(docgen.__file__), 'themes/default')
        else:
            theme_path = args.theme
        theme_fs = self.get_fs(theme_path)

        from ...docgen.builder import Builder
        builder = Builder(source_fs, output_fs, theme_fs)
        index = builder.build()
        import webbrowser
        webbrowser.open(index)
Beispiel #31
0
import logging, os, sys, time
import threading
from Config import config

if config.debug: # Only load pyfilesytem if using debug mode
	try:
		from fs.osfs import OSFS 
		pyfilesystem = OSFS("src")
		pyfilesystem_plugins = OSFS("plugins")
	except Exception, err:
		logging.debug("%s: For autoreload please download pyfilesystem (https://code.google.com/p/pyfilesystem/)" % err)
		pyfilesystem = False
else:
	pyfilesystem = False

class DebugReloader:
	def __init__ (self, callback, directory = "/"):
		self.last_chaged = 0
		if pyfilesystem:
			self.directory = directory
			self.callback = callback
			logging.debug("Adding autoreload: %s, cb: %s" % (directory, callback))
			thread = threading.Thread(target=self.addWatcher)
			thread.daemon = True
			thread.start()


	def addWatcher(self, recursive=True):
		try:
			time.sleep(1) # Wait for .pyc compiles
			pyfilesystem.add_watcher(self.changed, path=self.directory, events=None, recursive=recursive)
Beispiel #32
0
    def process_extra(self, root, courselike, root_courselike_dir,
                      xml_centric_courselike_key, export_fs):
        # Export the modulestore's asset metadata.
        asset_dir = root_courselike_dir + '/' + AssetMetadata.EXPORTED_ASSET_DIR + '/'
        if not os.path.isdir(asset_dir):
            os.makedirs(asset_dir)
        asset_root = lxml.etree.Element(AssetMetadata.ALL_ASSETS_XML_TAG)
        course_assets = self.modulestore.get_all_asset_metadata(
            self.courselike_key, None)
        for asset_md in course_assets:
            # All asset types are exported using the "asset" tag - but their asset type is specified in each asset key.
            asset = lxml.etree.SubElement(asset_root,
                                          AssetMetadata.ASSET_XML_TAG)
            asset_md.to_xml(asset)
        with OSFS(asset_dir).open(AssetMetadata.EXPORTED_ASSET_FILENAME,
                                  'w') as asset_xml_file:
            lxml.etree.ElementTree(asset_root).write(asset_xml_file)

        # export the static assets
        policies_dir = export_fs.makeopendir('policies')
        if self.contentstore:
            self.contentstore.export_all_for_course(
                self.courselike_key,
                root_courselike_dir + '/static/',
                root_courselike_dir + '/policies/assets.json',
            )

            # If we are using the default course image, export it to the
            # legacy location to support backwards compatibility.
            if courselike.course_image == courselike.fields[
                    'course_image'].default:
                try:
                    course_image = self.contentstore.find(
                        StaticContent.compute_location(
                            courselike.id, courselike.course_image), )
                except NotFoundError:
                    pass
                else:
                    output_dir = root_courselike_dir + '/static/images/'
                    if not os.path.isdir(output_dir):
                        os.makedirs(output_dir)
                    with OSFS(output_dir).open('course_image.jpg',
                                               'wb') as course_image_file:
                        course_image_file.write(course_image.data)

        # export the static tabs
        export_extra_content(export_fs, self.modulestore, self.courselike_key,
                             xml_centric_courselike_key, 'static_tab', 'tabs',
                             '.html')

        # export the custom tags
        export_extra_content(export_fs, self.modulestore, self.courselike_key,
                             xml_centric_courselike_key, 'custom_tag_template',
                             'custom_tags')

        # export the course updates
        export_extra_content(export_fs, self.modulestore, self.courselike_key,
                             xml_centric_courselike_key, 'course_info', 'info',
                             '.html')

        # export the 'about' data (e.g. overview, etc.)
        export_extra_content(export_fs, self.modulestore, self.courselike_key,
                             xml_centric_courselike_key, 'about', 'about',
                             '.html')

        course_policy_dir_name = courselike.location.run
        if courselike.url_name != courselike.location.run and courselike.url_name == 'course':
            # Use url_name for split mongo because course_run is not used when loading policies.
            course_policy_dir_name = courselike.url_name

        course_run_policy_dir = policies_dir.makeopendir(
            course_policy_dir_name)

        # export the grading policy
        with course_run_policy_dir.open('grading_policy.json',
                                        'w') as grading_policy:
            grading_policy.write(
                dumps(courselike.grading_policy,
                      cls=EdxJSONEncoder,
                      sort_keys=True,
                      indent=4))

        # export all of the course metadata in policy.json
        with course_run_policy_dir.open('policy.json', 'w') as course_policy:
            policy = {
                'course/' + courselike.location.name: own_metadata(courselike)
            }
            course_policy.write(
                dumps(policy, cls=EdxJSONEncoder, sort_keys=True, indent=4))

        # xml backed courses don't support drafts!
        if courselike.runtime.modulestore.get_modulestore_type(
        ) != ModuleStoreEnum.Type.xml:
            _export_drafts(self.modulestore, self.courselike_key, export_fs,
                           xml_centric_courselike_key)
Beispiel #33
0
 def __init__(self, source, options):
     from fs.osfs import OSFS
     super().__init__(source, lambda: OSFS(options["path"]))
Beispiel #34
0
def create_virtual_filesystem(local, mount):
    repo_fs = OSFS(local)
    mount_point = dokan.mount(repo_fs, mount)
    return mount_point
Beispiel #35
0
def make_osfs(url):
    """Construct OSFS from url."""
    if url.scheme != "file":
        raise ValueError("Scheme must be == 'file'")
    return OSFS("/")
Beispiel #36
0
def mv(f, path_new, overwrite=True):
    with OSFS('/') as fs:
        fs.move(f.path.rel, path_new, overwrite)
Beispiel #37
0
class XmlImportFactory(Factory):
    """
    Factory for generating XmlImportData's, which can hold all the data needed
    to run an XModule XML import
    """
    class Meta:
        model = XmlImportData

    filesystem = OSFS(mkdtemp())
    xblock_mixins = (InheritanceMixin, XModuleMixin, HierarchyMixin)
    xblock_select = only_xmodules
    url_name = Sequence(str)
    attribs = {}
    policy = {}
    inline_xml = True
    tag = 'unknown'
    course_id = 'edX/xml_test_course/101'

    @classmethod
    def _adjust_kwargs(cls, **kwargs):
        """
        Adjust the kwargs to be passed to the generated class.

        Any kwargs that match :fun:`XmlImportData.__init__` will be passed
        through. Any other unknown `kwargs` will be treated as XML attributes

        :param tag: xml tag for the generated :class:`Element` node
        :param text: (Optional) specifies the text of the generated :class:`Element`.
        :param policy: (Optional) specifies data for the policy json file for this node
        :type policy: dict
        :param attribs: (Optional) specify attributes for the XML node
        :type attribs: dict
        """
        tag = kwargs.pop('tag', 'unknown')
        kwargs['policy'] = {
            '{tag}/{url_name}'.format(tag=tag, url_name=kwargs['url_name']):
            kwargs['policy']
        }

        kwargs['xml_node'].text = kwargs.pop('text', None)

        kwargs['xml_node'].attrib.update(kwargs.pop('attribs', {}))

        # Make sure that the xml_module doesn't try and open a file to find the contents
        # of this node.
        inline_xml = kwargs.pop('inline_xml')

        if inline_xml:
            kwargs['xml_node'].set('not_a_pointer', 'true')

        for key in list(kwargs.keys()):
            if key not in XML_IMPORT_ARGS:
                kwargs['xml_node'].set(key, kwargs.pop(key))

        if not inline_xml:
            kwargs['xml_node'].write(kwargs['filesystem'].open(
                '{}/{}.xml'.format(kwargs['tag'], kwargs['url_name'])),
                                     encoding='utf-8')

        return kwargs

    @lazy_attribute
    def xml_node(self):
        """An :class:`xml.etree.Element`"""
        return etree.Element(self.tag)

    @post_generation
    def parent(self, _create, extracted, **_):
        """Hook to merge this xml into a parent xml node"""
        if extracted is None:
            return

        extracted._xml_node.append(self._xml_node)  # pylint: disable=no-member, protected-access
        extracted.policy.update(self.policy)
 def test_fs(self):
     """test reading logging from fs"""
     fs = OSFS(self.path)
     loggingconf.init_logging_fs(fs, 'logging.ini')
     loggingconf.init_logging_fs(fs, 'extend.ini')
Beispiel #39
0
This
is a test file
{{%- if readme %}}
@readme.txt
Readme file
-----------
${{message}}
{{%- endif %}}
@templates/base.html
<h1>${title}</h1>
<ul>
    {% for fruit in fruits %}
    <li>${fruit}</li>
    {% endfor %}
</ul>
@settings/production.ini
@foo/bar/baz/
@author
Bob
    """

    from fs.osfs import OSFS
    from fs.memoryfs import MemoryFS

    fs = OSFS("__test__", create=True)
    fs = MemoryFS()
    td = dict(message="Hello, World!", readme=True)
    compile_fs_template(fs, template, td)

    fs.tree()
Beispiel #40
0
from fs.osfs import OSFS
from json import load, dumps, loads
from utils import run, cleanup, load_repos
from sys import stdin, exit
from os import path as ospath
import codecs
import time
import webpages
import backend
import logging
import settings
from queue import FifoSQLiteQueue

root_path = settings.root_path
root = OSFS(root_path)
repos_path = settings.repos_path
source = settings.sources_path
staging_build = settings.staging_path
backups = settings.backups_path
build_path = settings.build_path
register_path = settings.register_path
script_entry_path = settings.script_entry_path
production_path = settings.production_path
assets_path = settings.assets_path

logging.basicConfig(filename='log.txt',
                    level=logging.DEBUG,
                    format='%(asctime)s %(levelname)s: %(message)s',
                    datefmt='%m/%d/%Y %I:%M:%S %p')
Beispiel #41
0
from fs.osfs import OSFS
import fs.path
import logging

from dataflow.utils import input_protection
import backend
import secret
import config

from .common import File

logger = logging.getLogger(__name__)

madokami_auth = HTTPBasicAuth(secret.madokami_uname, secret.madokami_pass)
# noinspection PyUnresolvedReferences
pyfs = OSFS(config.storage_dir)


@input_protection()
def download_file(input: File, output: Queue):
    try:
        logger.debug('Entering download_file')
        subdir = fs.path.join(*fs.path.split(input.location)[:-1])
        if not pyfs.isdir(subdir):
            pyfs.makedirs(subdir)

        logger.info('Starting download for {manga_id}: {name}'.format(
            manga_id=input.manga_id,
            name=input.location,
        ))
        data = requests.get(url=input.url, auth=madokami_auth, stream=True)
Beispiel #42
0
This
is a test file
{{%- if readme %}}
@readme.txt
Readme file
-----------
${{message}}
{{%- endif %}}
@templates/base.html
<h1>${title}</h1>
<ul>
    {% for fruit in fruits %}
    <li>${fruit}</li>
    {% endfor %}
</ul>
@settings/production.ini
@foo/bar/baz/
@author
Bob
    """

    from fs.osfs import OSFS
    from fs.memoryfs import MemoryFS

    fs = OSFS('__test__', create=True)
    fs = MemoryFS()
    td = dict(message="Hello, World!", readme=True)
    compile_fs_template(fs, template, td)

    fs.tree()
Beispiel #43
0
    def run(self):
        parser = self.get_argparse()
        args = parser.parse_args(sys.argv[1:])

        if args.version is None:
            major, minor = __version__.split('.')[:2]
            version = "{}.{}".format(major, minor)
        else:
            version = args.version

        try:
            with open(expanduser(args.settings), 'rt') as f_ini:
                cfg = SettingsContainer.read_from_file(f_ini)
                print("Read settings from {}".format(args.settings))
        except IOError:
            cfg = SettingsContainer()

        from ..docgen.extracter import Extracter
        from ..docgen.builder import Builder
        from ..command import doc_project
        location = dirname(doc_project.__file__)

        try:
            base_docs_fs = OSFS('text')
        except FSError:
            sys.stderr.write('run me from moya/docs directory\n')
            return -1
        extract_fs = OSFS(join('doccode', version), create=True)
        languages = [
            d for d in base_docs_fs.listdir(dirs_only=True) if len(d) == 2
        ]

        def do_extract():
            print("Extracting docs v{}".format(version))
            utils.remove_all(extract_fs, '/')
            try:
                archive, context, doc = moya_build.build_server(
                    location, 'settings.ini')
            except Exception:
                raise
                return -1

            extract_fs.makedir("site/docs", recursive=True)
            extract_fs.makedir("site/tags", recursive=True)
            #extract_fs.makedir("libs")

            with extract_fs.opendir('site/tags') as tags_fs:
                extracter = Extracter(archive, tags_fs)
                const_data = {}
                builtin_tags = []
                for namespace in self.builtin_namespaces:
                    xmlns = getattr(namespaces, namespace, None)
                    if xmlns is None:
                        raise ValueError(
                            "XML namespace '{}' is not in namespaces.py".
                            format(namespace))
                    namespace_tags = archive.registry.get_elements_in_xmlns(
                        xmlns).values()
                    builtin_tags.extend(namespace_tags)

                extracter.extract_tags(builtin_tags, const_data=const_data)

            for language in languages:
                with extract_fs.makedirs("site/docs",
                                         recreate=True) as language_fs:
                    doc_extracter = Extracter(None, language_fs)
                    docs_fs = base_docs_fs.opendir(language)
                    doc_extracter.extract_site_docs(docs_fs, dirname=language)

        if args.extract:
            do_extract()

        if args.build:
            theme_path = cfg.get('paths', 'theme', None)
            dst_path = join('html', version)
            if theme_path is None:
                theme_fs = OSFS('theme')
            else:
                theme_fs = open_fs(theme_path)

            output_path = cfg.get('paths', 'output', None)

            if output_path is None:
                output_base_fs = OSFS(dst_path, create=True)
            else:
                output_root_base_fs = open_fs(output_path)
                output_base_fs = output_root_base_fs.makedirs(dst_path,
                                                              recreate=True)

            #output_base_fs = OSFS(join('html', version), create=True)
            utils.remove_all(output_base_fs, '/')

            def do_build():
                print("Building docs v{}".format(version))
                lib_info = {}
                lib_paths = {}
                for long_name, lib in self.document_libs:
                    lib_info[long_name] = moya_build.get_lib_info(lib)
                    lib_paths[long_name] = output_base_fs.getsyspath(
                        join('libs', long_name, 'index.html'))
                for language in languages:
                    docs_fs = base_docs_fs.makedirs(language)
                    output_fs = output_base_fs.makedirs(language)
                    utils.remove_all(output_fs, '/')

                    with extract_fs.opendir("site") as extract_site_fs:
                        builder = Builder(extract_site_fs, output_fs, theme_fs)
                        from ..tools import timer
                        with timer('render time'):
                            builder.build({
                                "libs": lib_info,
                                "lib_paths": lib_paths
                            })

                    # output_base_fs.makedir("libs", allow_recreate=True)
                    # for long_name, lib in self.document_libs:
                    #     source_path = extract_fs.getsyspath(join("libs", long_name))
                    #     output_path = output_base_fs.getsyspath('libs')
                    #     cmd_template = 'moya --debug doc build {} --theme libtheme --source "{}" --output "{}"'
                    #     cmd = cmd_template.format(lib, source_path, output_path)
                    #     os.system(cmd)

            def extract_build():
                do_extract()
                do_build()

            do_build()

            if not args.nobrowser:
                import webbrowser
                index_url = "file://" + output_base_fs.getsyspath(
                    'en/index.html')
                print(index_url)
                webbrowser.open(index_url)

            if args.watch:
                print("Watching for changes...")
                observer = Observer()
                path = base_docs_fs.getsyspath('/')

                reload_watcher = ReloadChangeWatcher(base_docs_fs,
                                                     extract_build)
                observer.schedule(reload_watcher, path, recursive=True)
                observer.start()

                while 1:
                    try:
                        time.sleep(0.1)
                    except:
                        break

        return 0
Beispiel #44
0
def encode_file_into_luby_blocks_func(
        folder_containing_art_image_and_metadata_files):
    global block_redundancy_factor
    global desired_block_size_in_bytes
    file_paths_in_folder = glob.glob(
        folder_containing_art_image_and_metadata_files + '*')
    for current_file_path in file_paths_in_folder:
        if current_file_path.split('.')[-1] in ['zst', 'tar']:
            try:
                os.remove(current_file_path)
            except Exception as e:
                print('Error: ' + str(e))
    c_constant = 0.1  #Don't touch
    delta_constant = 0.5  #Don't touch
    start_time = time()
    ramdisk_object = MemoryFS()
    c_constant = 0.1
    delta_constant = 0.5
    seed = random.randint(0, 1 << 31 - 1)
    compressed_output_file_path, compressed_file_hash = add_art_image_files_and_metadata_to_zstd_compressed_tar_file_func(
        folder_containing_art_image_and_metadata_files)
    final_art_file__original_size_in_bytes = os.path.getsize(
        compressed_output_file_path)
    output_blocks_list = [
    ]  #Process compressed file into a stream of encoded blocks, and save those blocks as separate files in the output folder:
    print('Now encoding file ' + compressed_output_file_path + ' (' +
          str(round(final_art_file__original_size_in_bytes / 1000000)) +
          'mb)\n\n')
    total_number_of_blocks_to_generate = ceil(
        (1.00 * block_redundancy_factor *
         final_art_file__original_size_in_bytes) / desired_block_size_in_bytes)
    print(
        'Total number of blocks to generate for target level of redundancy: ' +
        str(total_number_of_blocks_to_generate))
    with open(compressed_output_file_path, 'rb') as f:
        compressed_data = f.read()
    compressed_data_size_in_bytes = len(compressed_data)
    blocks = [
        int.from_bytes(
            compressed_data[ii:ii + desired_block_size_in_bytes].ljust(
                desired_block_size_in_bytes, b'0'), 'little') for ii in
        range(0, compressed_data_size_in_bytes, desired_block_size_in_bytes)
    ]
    prng = PRNG(params=(len(blocks), delta_constant, c_constant))
    prng.set_seed(seed)
    output_blocks_list = list()
    number_of_blocks_generated = 0
    while number_of_blocks_generated < total_number_of_blocks_to_generate:
        random_seed, d, ix_samples = prng.get_src_blocks()
        block_data = 0
        for ix in ix_samples:
            block_data ^= blocks[ix]
        block_data_bytes = int.to_bytes(block_data,
                                        desired_block_size_in_bytes, 'little')
        block_data_hash = hashlib.sha3_256(block_data_bytes).digest()
        block = (compressed_data_size_in_bytes, desired_block_size_in_bytes,
                 random_seed, block_data_hash, block_data_bytes)
        header_bit_packing_pattern_string = '<3I32s'
        bit_packing_pattern_string = header_bit_packing_pattern_string + str(
            desired_block_size_in_bytes) + 's'
        length_of_header_in_bytes = struct.calcsize(
            header_bit_packing_pattern_string)
        packed_block_data = pack(bit_packing_pattern_string, *block)
        if number_of_blocks_generated == 0:  #Test that the bit-packing is working correctly:
            with io.BufferedReader(io.BytesIO(packed_block_data)) as f:
                header_data = f.read(length_of_header_in_bytes)
                #first_generated_block_raw_data = f.read(desired_block_size_in_bytes)
            compressed_input_data_size_in_bytes_test, desired_block_size_in_bytes_test, random_seed_test, block_data_hash_test = unpack(
                header_bit_packing_pattern_string, header_data)
            if block_data_hash_test != block_data_hash:
                print(
                    'Error! Block data hash does not match the hash reported in the block header!'
                )
        output_blocks_list.append(packed_block_data)
        number_of_blocks_generated = number_of_blocks_generated + 1
        hash_of_block = get_sha256_hash_of_input_data_func(packed_block_data)
        output_block_file_path = 'FileHash__' + compressed_file_hash + '__Block__' + '{0:09}'.format(
            number_of_blocks_generated
        ) + '__BlockHash_' + hash_of_block + '.block'
        try:
            with ramdisk_object.open(output_block_file_path, 'wb') as f:
                f.write(packed_block_data)
        except Exception as e:
            print('Error: ' + str(e))
    duration_in_seconds = round(time() - start_time, 1)
    print('\n\nFinished processing in ' + str(duration_in_seconds) +
          ' seconds! \nOriginal zip file was encoded into ' +
          str(number_of_blocks_generated) + ' blocks of ' +
          str(ceil(desired_block_size_in_bytes / 1000)) +
          ' kilobytes each. Total size of all blocks is ~' + str(
              ceil((number_of_blocks_generated * desired_block_size_in_bytes) /
                   1000000)) + ' megabytes\n')
    print('Now copying encoded files from ram disk to local storage...')
    block_storage_folder_path = folder_containing_art_image_and_metadata_files + os.sep + 'block_files'
    if not os.path.isdir(block_storage_folder_path):
        os.makedirs(block_storage_folder_path)
    filesystem_object = OSFS(block_storage_folder_path)
    copy_fs(ramdisk_object, filesystem_object)
    print('Done!\n')
    ramdisk_object.close()
    return duration_in_seconds
Beispiel #45
0
            if path.lower().startswith("_autorun."):
                path = path[1:]
        return path

    def _decode(self, path):
        path = relpath(normpath(path))
        path = path.replace("__colon__", ":")
        if not self.allow_autorun:
            if path.lower().startswith("autorun."):
                path = "_" + path
        return path


if __name__ == "__main__":
    import os.path
    import tempfile
    from fs.osfs import OSFS
    from fs.memoryfs import MemoryFS
    from shutil import rmtree
    from six import b
    path = tempfile.mkdtemp()
    try:
        fs = OSFS(path)
        #fs = MemoryFS()
        fs.setcontents("test1.txt", b("test one"))
        flags = DOKAN_OPTION_DEBUG | DOKAN_OPTION_STDERR | DOKAN_OPTION_REMOVABLE
        mount(fs, "Q:\\", foreground=True, numthreads=1, flags=flags)
        fs.close()
    finally:
        rmtree(path)
Beispiel #46
0
def touch(f):
    with OSFS('/') as fs:
        fs.create(f.path.rel)
Beispiel #47
0
    def test_export_roundtrip(self, course_dir, mock_get):

        # Patch network calls to retrieve the textbook TOC
        mock_get.return_value.text = dedent("""
            <?xml version="1.0"?><table_of_contents>
            <entry page="5" page_label="ii" name="Table of Contents"/>
            </table_of_contents>
        """).strip()

        root_dir = path(self.temp_dir)
        print("Copying test course to temp dir {0}".format(root_dir))

        data_dir = path(DATA_DIR)
        shutil.copytree(data_dir / course_dir, root_dir / course_dir)

        print("Starting import")
        initial_import = XMLModuleStore(root_dir,
                                        course_dirs=[course_dir],
                                        xblock_mixins=(XModuleMixin, ))

        courses = initial_import.get_courses()
        self.assertEquals(len(courses), 1)
        initial_course = courses[0]

        # export to the same directory--that way things like the custom_tags/ folder
        # will still be there.
        print("Starting export")
        file_system = OSFS(root_dir)
        initial_course.runtime.export_fs = file_system.makeopendir(course_dir)
        root = lxml.etree.Element('root')

        initial_course.add_xml_to_node(root)
        with initial_course.runtime.export_fs.open('course.xml',
                                                   'w') as course_xml:
            lxml.etree.ElementTree(root).write(course_xml)

        print("Starting second import")
        second_import = XMLModuleStore(root_dir,
                                       course_dirs=[course_dir],
                                       xblock_mixins=(XModuleMixin, ))

        courses2 = second_import.get_courses()
        self.assertEquals(len(courses2), 1)
        exported_course = courses2[0]

        print("Checking course equality")

        # HACK: filenames change when changing file formats
        # during imports from old-style courses.  Ignore them.
        strip_filenames(initial_course)
        strip_filenames(exported_course)

        self.assertTrue(blocks_are_equivalent(initial_course, exported_course))
        self.assertEquals(initial_course.id, exported_course.id)
        course_id = initial_course.id

        print("Checking key equality")
        self.assertItemsEqual(initial_import.modules[course_id].keys(),
                              second_import.modules[course_id].keys())

        print("Checking module equality")
        for location in initial_import.modules[course_id].keys():
            print("Checking", location)
            self.assertTrue(
                blocks_are_equivalent(
                    initial_import.modules[course_id][location],
                    second_import.modules[course_id][location]))
Beispiel #48
0
def rm(f):
    with OSFS('/') as fs:
        fs.remove(f.path.rel)
Beispiel #49
0
    def __init__(self,
                 xmlstore,
                 course_id,
                 course_dir,
                 error_tracker,
                 load_error_modules=True,
                 target_course_id=None,
                 **kwargs):
        """
        A class that handles loading from xml.  Does some munging to ensure that
        all elements have unique slugs.

        xmlstore: the XMLModuleStore to store the loaded modules in
        """
        self.unnamed = defaultdict(
            int)  # category -> num of new url_names for that category
        self.used_names = defaultdict(set)  # category -> set of used url_names

        # Adding the course_id as passed in for later reference rather than
        # having to recombine the org/course/url_name
        self.course_id = course_id
        self.load_error_modules = load_error_modules
        self.modulestore = xmlstore

        def process_xml(xml):
            """Takes an xml string, and returns a XBlock created from
            that xml.
            """
            def make_name_unique(xml_data):
                """
                Make sure that the url_name of xml_data is unique.  If a previously loaded
                unnamed descriptor stole this element's url_name, create a new one.

                Removes 'slug' attribute if present, and adds or overwrites the 'url_name' attribute.
                """
                # VS[compat]. Take this out once course conversion is done (perhaps leave the uniqueness check)

                # tags that really need unique names--they store (or should store) state.
                need_uniq_names = ('problem', 'sequential', 'video', 'course',
                                   'chapter', 'videosequence', 'poll_question',
                                   'vertical')

                attr = xml_data.attrib
                tag = xml_data.tag
                id = lambda x: x
                # Things to try to get a name, in order  (key, cleaning function, remove key after reading?)
                lookups = [('url_name', id, False), ('slug', id, True),
                           ('name', Location.clean, False),
                           ('display_name', Location.clean, False)]

                url_name = None
                for key, clean, remove in lookups:
                    if key in attr:
                        url_name = clean(attr[key])
                        if remove:
                            del attr[key]
                        break

                def looks_like_fallback(url_name):
                    """Does this look like something that came from fallback_name()?"""
                    return (url_name is not None and url_name.startswith(tag)
                            and re.search('[0-9a-fA-F]{12}$', url_name))

                def fallback_name(orig_name=None):
                    """Return the fallback name for this module.  This is a function instead of a variable
                    because we want it to be lazy."""
                    dog_stats_api.increment(
                        DEPRECATION_VSCOMPAT_EVENT,
                        tags=(
                            "location:import_system_fallback_name",
                            u"name:{}".format(orig_name),
                        ))

                    if looks_like_fallback(orig_name):
                        # We're about to re-hash, in case something changed, so get rid of the tag_ and hash
                        orig_name = orig_name[len(tag) + 1:-12]
                    # append the hash of the content--the first 12 bytes should be plenty.
                    orig_name = "_" + orig_name if orig_name not in (
                        None, "") else ""
                    xml_bytes = xml.encode('utf8')
                    return tag + orig_name + "_" + hashlib.sha1(
                        xml_bytes).hexdigest()[:12]

                # Fallback if there was nothing we could use:
                if url_name is None or url_name == "":
                    url_name = fallback_name()
                    # Don't log a warning--we don't need this in the log.  Do
                    # put it in the error tracker--content folks need to see it.

                    if tag in need_uniq_names:
                        error_tracker(
                            u"PROBLEM: no name of any kind specified for {tag}.  Student "
                            u"state will not be properly tracked for this module.  Problem xml:"
                            u" '{xml}...'".format(tag=tag, xml=xml[:100]))
                    else:
                        # TODO (vshnayder): We may want to enable this once course repos are cleaned up.
                        # (or we may want to give up on the requirement for non-state-relevant issues...)
                        # error_tracker("WARNING: no name specified for module. xml='{0}...'".format(xml[:100]))
                        pass

                # Make sure everything is unique
                if url_name in self.used_names[tag]:
                    # Always complain about modules that store state.  If it
                    # doesn't store state, don't complain about things that are
                    # hashed.
                    if tag in need_uniq_names:
                        msg = (
                            u"Non-unique url_name in xml.  This may break state tracking for content."
                            u"  url_name={0}.  Content={1}".format(
                                url_name, xml[:100]))
                        error_tracker("PROBLEM: " + msg)
                        log.warning(msg)
                        # Just set name to fallback_name--if there are multiple things with the same fallback name,
                        # they are actually identical, so it's fragile, but not immediately broken.

                        # TODO (vshnayder): if the tag is a pointer tag, this will
                        # break the content because we won't have the right link.
                        # That's also a legitimate attempt to reuse the same content
                        # from multiple places.  Once we actually allow that, we'll
                        # need to update this to complain about non-unique names for
                        # definitions, but allow multiple uses.
                        url_name = fallback_name(url_name)

                self.used_names[tag].add(url_name)
                xml_data.set('url_name', url_name)

            try:
                # VS[compat]
                # TODO (cpennington): Remove this once all fall 2012 courses
                # have been imported into the cms from xml
                xml = clean_out_mako_templating(xml)
                xml_data = etree.fromstring(xml)

                make_name_unique(xml_data)

                descriptor = self.xblock_from_node(
                    xml_data,
                    None,  # parent_id
                    id_manager,
                )
            except Exception as err:  # pylint: disable=broad-except
                if not self.load_error_modules:
                    raise

                # Didn't load properly.  Fall back on loading as an error
                # descriptor.  This should never error due to formatting.

                msg = "Error loading from xml. %s"
                log.warning(
                    msg,
                    unicode(err)[:200],
                    # Normally, we don't want lots of exception traces in our logs from common
                    # content problems.  But if you're debugging the xml loading code itself,
                    # uncomment the next line.
                    # exc_info=True
                )

                msg = msg % (unicode(err)[:200])

                self.error_tracker(msg)
                err_msg = msg + "\n" + exc_info_to_str(sys.exc_info())
                descriptor = ErrorDescriptor.from_xml(xml, self, id_manager,
                                                      err_msg)

            descriptor.data_dir = course_dir

            if descriptor.scope_ids.usage_id in xmlstore.modules[course_id]:
                # keep the parent pointer if any but allow everything else to overwrite
                other_copy = xmlstore.modules[course_id][
                    descriptor.scope_ids.usage_id]
                descriptor.parent = other_copy.parent
                if descriptor != other_copy:
                    log.warning("%s has more than one definition",
                                descriptor.scope_ids.usage_id)
            xmlstore.modules[course_id][
                descriptor.scope_ids.usage_id] = descriptor

            if descriptor.has_children:
                for child in descriptor.get_children():
                    # parent is alphabetically least
                    if child.parent is None or child.parent > descriptor.scope_ids.usage_id:
                        child.parent = descriptor.location
                        child.save()

            # After setting up the descriptor, save any changes that we have
            # made to attributes on the descriptor to the underlying KeyValueStore.
            descriptor.save()
            return descriptor

        render_template = lambda template, context: u''

        # TODO (vshnayder): we are somewhat architecturally confused in the loading code:
        # load_item should actually be get_instance, because it expects the course-specific
        # policy to be loaded.  For now, just add the course_id here...
        def load_item(usage_key, for_parent=None):
            """Return the XBlock for the specified location"""
            return xmlstore.get_item(usage_key, for_parent=for_parent)

        resources_fs = OSFS(xmlstore.data_dir / course_dir)

        id_manager = CourseImportLocationManager(course_id, target_course_id)

        super(ImportSystem, self).__init__(load_item=load_item,
                                           resources_fs=resources_fs,
                                           render_template=render_template,
                                           error_tracker=error_tracker,
                                           process_xml=process_xml,
                                           id_generator=id_manager,
                                           id_reader=id_manager,
                                           **kwargs)
Beispiel #50
0
def main():
    parser = argparse.ArgumentParser(description='Create free editor.slf')
    parser.add_argument('--original', help="Original editor.slf")
    parser.add_argument('-o',
                        '--output',
                        default='build/editor.slf',
                        help="Where to store the created slf file")
    parser.add_argument('--name', help="Library name")
    args = parser.parse_args()

    if not os.path.exists(os.path.dirname(args.output)):
        os.makedirs(os.path.dirname(args.output))

    if args.original is None:
        target_fs = create_free_editorslf(args.name)
        with open(args.output, 'wb') as target_file:
            target_fs.save(target_file)
        generate_md5_file(args.output)
        return

    # create editor.slf by replacing images in the original editor.slf
    target_fs = BufferedSlfFS()
    replacement_fs = OSFS('editor')
    with open(args.original, 'rb') as source_file:
        source_fs = SlfFS(source_file)

        target_fs.library_name = args.name or source_fs.library_name
        target_fs.library_path = source_fs.library_path
        target_fs.version = source_fs.version
        target_fs.sort = source_fs.sort

        for directory in source_fs.walkdirs():
            if directory == '/':
                continue
            target_fs.makedir(directory)
        for file in source_fs.walkfiles():
            base_name, _ = os.path.splitext(file)
            with source_fs.open(file, 'rb') as source, target_fs.open(
                    file, 'wb') as target:
                ja2_images = load_8bit_sti(source)
                replacement_path = base_name + '.gif'
                replacement_file_exists = replacement_fs.isfile(
                    replacement_path)
                replacement_dir = file
                replacement_dir_exists = replacement_fs.isdir(replacement_dir)
                if len(ja2_images) == 1 and replacement_file_exists:
                    print("Replacing {0} with {1}".format(
                        file, replacement_path))
                    replacement_img = Image.open(
                        replacement_fs.open(replacement_path, 'rb'))
                    ja2_images._palette = replacement_img.palette
                    ja2_images.images[0]._image = replacement_img
                elif len(ja2_images) > 1 and replacement_dir_exists:
                    for i in range(len(ja2_images)):
                        replacement_path = replacement_dir + '/{}.gif'.format(
                            i)

                        print("Replacing {0} with {1}".format(
                            file, replacement_path))
                        replacement_img = Image.open(
                            replacement_fs.open(replacement_path, 'rb'))
                        ja2_images._palette = replacement_img.palette
                        ja2_images.images[i]._image = replacement_img
                else:
                    print("Replacing {0} with nothingness".format(file))
                    for sub_image in ja2_images.images:
                        width, height = sub_image.image.size
                        sub_image._image = Image.new('P', (width, height),
                                                     color=54)

                save_8bit_sti(ja2_images, target)

    with open(args.output, 'wb') as target_file:
        target_fs.save(target_file)
    generate_md5_file(args.output)
def export_to_xml(modulestore,
                  contentstore,
                  course_location,
                  root_dir,
                  course_dir,
                  draft_modulestore=None):

    course = modulestore.get_item(course_location)

    fs = OSFS(root_dir)
    export_fs = fs.makeopendir(course_dir)

    xml = course.export_to_xml(export_fs)
    with export_fs.open('course.xml', 'w') as course_xml:
        course_xml.write(xml)

    # export the static assets
    contentstore.export_all_for_course(
        course_location, root_dir + '/' + course_dir + '/static/')

    # export the static tabs
    export_extra_content(export_fs, modulestore, course_location, 'static_tab',
                         'tabs', '.html')

    # export the custom tags
    export_extra_content(export_fs, modulestore, course_location,
                         'custom_tag_template', 'custom_tags')

    # export the course updates
    export_extra_content(export_fs, modulestore, course_location,
                         'course_info', 'info', '.html')

    # export the 'about' data (e.g. overview, etc.)
    export_extra_content(export_fs, modulestore, course_location, 'about',
                         'about', '.html')

    # export the grading policy
    policies_dir = export_fs.makeopendir('policies')
    course_run_policy_dir = policies_dir.makeopendir(course.location.name)
    with course_run_policy_dir.open('grading_policy.json',
                                    'w') as grading_policy:
        grading_policy.write(dumps(course.grading_policy, cls=EdxJSONEncoder))

    # export all of the course metadata in policy.json
    with course_run_policy_dir.open('policy.json', 'w') as course_policy:
        policy = {'course/' + course.location.name: own_metadata(course)}
        course_policy.write(dumps(policy, cls=EdxJSONEncoder))

    # export draft content
    # NOTE: this code assumes that verticals are the top most draftable container
    # should we change the application, then this assumption will no longer
    # be valid
    if draft_modulestore is not None:
        draft_verticals = draft_modulestore.get_items([
            None, course_location.org, course_location.course, 'vertical',
            None, 'draft'
        ])
        if len(draft_verticals) > 0:
            draft_course_dir = export_fs.makeopendir('drafts')
            for draft_vertical in draft_verticals:
                parent_locs = draft_modulestore.get_parent_locations(
                    draft_vertical.location, course.location.course_id)
                # Don't try to export orphaned items.
                if len(parent_locs) > 0:
                    logging.debug('parent_locs = {0}'.format(parent_locs))
                    draft_vertical.xml_attributes[
                        'parent_sequential_url'] = Location(
                            parent_locs[0]).url()
                    sequential = modulestore.get_item(Location(parent_locs[0]))
                    index = sequential.children.index(
                        draft_vertical.location.url())
                    draft_vertical.xml_attributes[
                        'index_in_children_list'] = str(index)
                    draft_vertical.export_to_xml(draft_course_dir)
Beispiel #52
0
from fs.osfs import OSFS
from wsgi import serve_fs
osfs = OSFS('~/')
application = serve_fs(osfs)
Beispiel #53
0
def cp(f, path_new, overwrite=True):
    with OSFS('/') as fs:
        fs.copy(f.path.rel, path_new, overwrite)
def log_folder():
    log_dir = log_folder_directories['0']
    folder = OSFS(log_dir)
    test_n = len(list(n for n in folder.listdir() if n.startswith('test')))
    return log_dir + "/test" + str(test_n + 1)
Beispiel #55
0
class QualityStandard:
    """Stores information about a quality standard."""
    def __init__(self, resource_root_dir: types_path_like):
        """Create a ``QualityStandard`` instance.

        Parameters
        ----------
        resource_root_dir :
            The path to the resource root directory of the standard

        """
        from fs.osfs import OSFS

        self._name = None
        self._max_version = None
        self._versions = {}

        if isinstance(resource_root_dir, Path):
            resource_root_dir = resource_root_dir.as_posix()

        if isinstance(resource_root_dir, str):
            self._filesystem = OSFS(resource_root_dir)
        else:
            self._filesystem = resource_root_dir

        manifest_dir = self._filesystem.opendir("manifests")
        manifest_files = [
            file.name for file in self._filesystem.filterdir(
                "manifests", ["*.yml", "*.yaml"])
        ]

        for filename in manifest_files:
            # stem of pyfilesystem cuts after first .
            qs_name, version = split_tag_version(
                filename[:filename.rindex(".")])

            if self._name is None:
                self._name = qs_name
                self._max_version = version
            else:
                if qs_name != self._name:
                    raise ValueError("Inconsistent naming of manifest files")
                if self._max_version < version:
                    self._max_version = version

            with manifest_dir.open(filename, "r") as stream:
                content = yaml.load(stream, Loader=yaml.SafeLoader)
                self._versions[version] = {
                    "manifest_file_mapping": {
                        content["id"]: filename
                    },
                    "schema_file_mapping": {
                        mapping["uri"]: (f"{mapping['file']}.yaml")
                        for mapping in content["tags"]
                    },
                }

    def _map_file_content(self, file_mapping: dict, directory: str,
                          version: AsdfVersion) -> ResourceMappingProxy:
        """Get a mapping between an URI and a file content.

        Parameters
        ----------
        file_mapping : Dict
            A dictionary containing the mapping between URI and the file path
        directory:
            Directory that contains the files. This is either 'schemas' or 'mappings'
        version : AsdfVersion
            The version of the standard.

        Returns
        -------
        ResourceMappingProxy :
            Mapping between an URI and a file content

        """
        content_mapping = {
            uri: self._filesystem.open(f"{directory}/{filename}").read()
            for uri, filename in file_mapping.items()
        }

        return ResourceMappingProxy(content_mapping,
                                    package_name=self._name,
                                    package_version=version)

    @property
    def name(self) -> str:
        """Get the quality standards name."""
        return self._name

    def get_mappings(self, version: Union[AsdfVersion, str] = None):
        """Get the manifest and schema mapping for the specified version.

        Parameters
        ----------
        version : Union[AsdfVersion, str]
            Requested standard version. If `None` is provided, the latest will be used.

        Returns
        -------
        ResourceMappingProxy :
            Manifest mapping
        ResourceMappingProxy :
            Schema mapping

        """
        if version is None:
            version = self._max_version
        elif not isinstance(version, AsdfVersion):
            version = AsdfVersion(version)

        file_mappings = self._versions[version]
        manifest_mapping = self._map_file_content(
            file_mappings["manifest_file_mapping"], "manifests", version)
        schema_mapping = self._map_file_content(
            file_mappings["schema_file_mapping"], "schemas", version)

        return manifest_mapping, schema_mapping
Beispiel #56
0
 def setUp(self):
     path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'data')
     self.data = OSFS(path)
def export_to_xml(modulestore, contentstore, course_key, root_dir, course_dir, draft_modulestore=None):
    """
    Export all modules from `modulestore` and content from `contentstore` as xml to `root_dir`.

    `modulestore`: A `ModuleStore` object that is the source of the modules to export
    `contentstore`: A `ContentStore` object that is the source of the content to export, can be None
    `course_key`: The `CourseKey` of the `CourseModuleDescriptor` to export
    `root_dir`: The directory to write the exported xml to
    `course_dir`: The name of the directory inside `root_dir` to write the course content to
    `draft_modulestore`: An optional `DraftModuleStore` that contains draft content, which will be exported
        alongside the public content in the course.
    """

    course = modulestore.get_course(course_key)

    fsm = OSFS(root_dir)
    export_fs = course.runtime.export_fs = fsm.makeopendir(course_dir)

    root = lxml.etree.Element('unknown')
    course.add_xml_to_node(root)

    with export_fs.open('course.xml', 'w') as course_xml:
        lxml.etree.ElementTree(root).write(course_xml)

    # export the static assets
    policies_dir = export_fs.makeopendir('policies')
    if contentstore:
        contentstore.export_all_for_course(
            course_key,
            root_dir + '/' + course_dir + '/static/',
            root_dir + '/' + course_dir + '/policies/assets.json',
        )

        # If we are using the default course image, export it to the
        # legacy location to support backwards compatibility.
        if course.course_image == course.fields['course_image'].default:
            try:
                course_image = contentstore.find(
                    StaticContent.compute_location(
                        course.id,
                        course.course_image
                    ),
                )
            except NotFoundError:
                pass
            else:
                output_dir = root_dir + '/' + course_dir + '/static/images/'
                if not os.path.isdir(output_dir):
                    os.makedirs(output_dir)
                with OSFS(output_dir).open('course_image.jpg', 'wb') as course_image_file:
                    course_image_file.write(course_image.data)

    # export the static tabs
    export_extra_content(export_fs, modulestore, course_key, 'static_tab', 'tabs', '.html')

    # export the custom tags
    export_extra_content(export_fs, modulestore, course_key, 'custom_tag_template', 'custom_tags')

    # export the course updates
    export_extra_content(export_fs, modulestore, course_key, 'course_info', 'info', '.html')

    # export the 'about' data (e.g. overview, etc.)
    export_extra_content(export_fs, modulestore, course_key, 'about', 'about', '.html')

    # export the grading policy
    course_run_policy_dir = policies_dir.makeopendir(course.location.name)
    with course_run_policy_dir.open('grading_policy.json', 'w') as grading_policy:
        grading_policy.write(dumps(course.grading_policy, cls=EdxJSONEncoder))

    # export all of the course metadata in policy.json
    with course_run_policy_dir.open('policy.json', 'w') as course_policy:
        policy = {'course/' + course.location.name: own_metadata(course)}
        course_policy.write(dumps(policy, cls=EdxJSONEncoder))

    # export draft content
    # NOTE: this code assumes that verticals are the top most draftable container
    # should we change the application, then this assumption will no longer
    # be valid
    if draft_modulestore is not None:
        draft_verticals = draft_modulestore.get_items(course_key, category='vertical', revision='draft')
        if len(draft_verticals) > 0:
            draft_course_dir = export_fs.makeopendir(DRAFT_DIR)
            for draft_vertical in draft_verticals:
                parent_locs = draft_modulestore.get_parent_locations(draft_vertical.location)
                # Don't try to export orphaned items.
                if len(parent_locs) > 0:
                    logging.debug('parent_locs = {0}'.format(parent_locs))
                    draft_vertical.xml_attributes['parent_sequential_url'] = parent_locs[0].to_deprecated_string()
                    sequential = modulestore.get_item(parent_locs[0])
                    index = sequential.children.index(draft_vertical.location)
                    draft_vertical.xml_attributes['index_in_children_list'] = str(index)
                    draft_vertical.runtime.export_fs = draft_course_dir
                    node = lxml.etree.Element('unknown')
                    draft_vertical.add_xml_to_node(node)
def summary_folder(name):
    logdir = summary_folder_directories['0'] + name
    folder = OSFS(logdir)
    test_n = len(list(n for n in folder.listdir() if n.startswith('test')))
    return logdir + "/test" + str(test_n + 1)
Beispiel #59
0
    :param address: IP address to serve on
    :param port: port number

    """
    def Handler(request, client_address, server):
        return FSHTTPRequestHandler(fs, request, client_address, server)

    #class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
    #    pass
    httpd = SocketServer.TCPServer((address, port),
                                   Handler,
                                   bind_and_activate=False)
    #httpd = ThreadedTCPServer((address, port), Handler, bind_and_activate=False)
    httpd.allow_reuse_address = True
    httpd.server_bind()
    httpd.server_activate()

    server_thread = threading.Thread(target=httpd.serve_forever)
    server_thread.start()
    try:
        while True:
            time.sleep(0.1)
    except (KeyboardInterrupt, SystemExit):
        httpd.shutdown()


if __name__ == "__main__":

    from fs.osfs import OSFS
    serve_fs(OSFS('~/'))
Beispiel #60
0
            str(
                Path(__file__).parent.resolve() / "fixtures" /
                "test-fixture.zip"),
            write=False,
        ))

    check_metadata(loaded, False)
    check_data(loaded)


if __name__ == "__main__":
    # Create the test fixtures

    dirpath = Path(__file__).parent.resolve() / "fixtures"
    dirpath.mkdir(exist_ok=True)
    (dirpath / "tfd").mkdir(exist_ok=True)

    dp = create_datapackage(fs=OSFS(str(dirpath / "tfd")),
                            name="test-fixture",
                            id_="fixture-42")
    add_data(dp)
    dp.finalize_serialization()

    dp = create_datapackage(
        fs=ZipFS(str(dirpath / "test-fixture.zip"), write=True),
        name="test-fixture",
        id_="fixture-42",
    )
    add_data(dp)
    dp.finalize_serialization()