Example #1
0
    def test_metadata_import_export(self):
        """Two checks:
            - unknown metadata is preserved across import-export
            - inherited metadata doesn't leak to children.
        """
        system = self.get_system()
        v = 'March 20 17:00'
        url_name = 'test1'
        start_xml = '''
        <course org="{org}" course="{course}"
                due="{due}" url_name="{url_name}" unicorn="purple">
            <chapter url="hi" url_name="ch" display_name="CH">
                <html url_name="h" display_name="H">Two houses, ...</html>
            </chapter>
        </course>'''.format(due=v, org=ORG, course=COURSE, url_name=url_name)
        descriptor = system.process_xml(start_xml)
        compute_inherited_metadata(descriptor)

        print(descriptor, descriptor._model_data)
        self.assertEqual(descriptor.lms.due, Date().from_json(v))

        # Check that the child inherits due correctly
        child = descriptor.get_children()[0]
        self.assertEqual(child.lms.due, Date().from_json(v))
        self.assertEqual(child._inheritable_metadata, child._inherited_metadata)
        self.assertEqual(2, len(child._inherited_metadata))
        self.assertEqual('1970-01-01T00:00:00Z', child._inherited_metadata['start'])
        self.assertEqual(v, child._inherited_metadata['due'])

        # Now export and check things
        resource_fs = MemoryFS()
        exported_xml = descriptor.export_to_xml(resource_fs)

        # Check that the exported xml is just a pointer
        print("Exported xml:", exported_xml)
        pointer = etree.fromstring(exported_xml)
        self.assertTrue(is_pointer_tag(pointer))
        # but it's a special case course pointer
        self.assertEqual(pointer.attrib['course'], COURSE)
        self.assertEqual(pointer.attrib['org'], ORG)

        # Does the course still have unicorns?
        with resource_fs.open('course/{url_name}.xml'.format(url_name=url_name)) as f:
            course_xml = etree.fromstring(f.read())

        self.assertEqual(course_xml.attrib['unicorn'], 'purple')

        # the course and org tags should be _only_ in the pointer
        self.assertTrue('course' not in course_xml.attrib)
        self.assertTrue('org' not in course_xml.attrib)

        # did we successfully strip the url_name from the definition contents?
        self.assertTrue('url_name' not in course_xml.attrib)

        # Does the chapter tag now have a due attribute?
        # hardcoded path to child
        with resource_fs.open('chapter/ch.xml') as f:
            chapter_xml = etree.fromstring(f.read())
        self.assertEqual(chapter_xml.tag, 'chapter')
        self.assertFalse('due' in chapter_xml.attrib)
Example #2
0
 def test_apply(self, m):
     mfs = MemoryFS()
     d = Directory('.', mfs)
     o = ini.OpGenerateFile('test.txt')
     r = RoutineOnDirectory(d, [o])
     o.apply(r)
     with mfs.open('test.txt', 'r') as fin:
         result = fin.readlines()
     self.assertEqual(result, ['test text'])
Example #3
0
    def test_xml_export_import_cycle(self):
        """
        Test the export-import cycle.
        """
        # Children will only set after calling this.
        self.lc_block.refresh_children()
        lc_block = self.store.get_item(self.lc_block.location)

        expected_olx = (
            '<library_content display_name="{block.display_name}" max_count="{block.max_count}"'
            ' source_library_id="{block.source_library_id}" source_library_version="{block.source_library_version}">\n'
            '  <html url_name="{block.children[0].block_id}"/>\n'
            '  <html url_name="{block.children[1].block_id}"/>\n'
            '  <html url_name="{block.children[2].block_id}"/>\n'
            '  <html url_name="{block.children[3].block_id}"/>\n'
            '</library_content>\n'
        ).format(
            block=lc_block,
        )

        export_fs = MemoryFS()
        # Set the virtual FS to export the olx to.
        lc_block.runtime._descriptor_system.export_fs = export_fs  # pylint: disable=protected-access

        # Export the olx.
        node = etree.Element("unknown_root")
        lc_block.add_xml_to_node(node)

        # Read it back
        with export_fs.open('{dir}/{file_name}.xml'.format(
            dir=lc_block.scope_ids.usage_id.block_type,
            file_name=lc_block.scope_ids.usage_id.block_id
        )) as f:
            exported_olx = f.read()

        # And compare.
        self.assertEqual(exported_olx, expected_olx)

        runtime = TestImportSystem(load_error_modules=True, course_id=lc_block.location.course_key)
        runtime.resources_fs = export_fs

        # Now import it.
        olx_element = etree.fromstring(exported_olx)
        id_generator = Mock()
        imported_lc_block = LibraryContentBlock.parse_xml(olx_element, runtime, None, id_generator)

        # Check the new XBlock has the same properties as the old one.
        self.assertEqual(imported_lc_block.display_name, lc_block.display_name)
        self.assertEqual(imported_lc_block.source_library_id, lc_block.source_library_id)
        self.assertEqual(imported_lc_block.source_library_version, lc_block.source_library_version)
        self.assertEqual(imported_lc_block.mode, lc_block.mode)
        self.assertEqual(imported_lc_block.max_count, lc_block.max_count)
        self.assertEqual(imported_lc_block.capa_type, lc_block.capa_type)
        self.assertEqual(len(imported_lc_block.children), 4)
        self.assertEqual(imported_lc_block.children, lc_block.children)
Example #4
0
    def test_xml_export_import_cycle(self):
        """
        Test the export-import cycle.
        """
        split_test_block = self.store.get_item(self.split_test_block.location)

        expected_olx = (
            '<split_test group_id_to_child="{group_id_to_child}" user_partition_id="2" display_name="A Split Test">\n'
            '  <html url_name="{child_blocks[0].location.block_id}"/>\n'
            '  <html url_name="{child_blocks[1].location.block_id}"/>\n'
            '</split_test>\n').format(child_blocks=self.child_blocks,
                                      group_id_to_child=json.dumps({
                                          "0":
                                          str(self.child_blocks[0].location),
                                          "1":
                                          str(self.child_blocks[1].location),
                                      }).replace('"', '&quot;'))
        export_fs = MemoryFS()
        # Set the virtual FS to export the olx to.
        split_test_block.runtime._descriptor_system.export_fs = export_fs  # pylint: disable=protected-access

        # Export the olx.
        node = lxml.etree.Element("unknown_root")
        split_test_block.add_xml_to_node(node)

        # Read it back
        with export_fs.open('{dir}/{file_name}.xml'.format(
                dir=split_test_block.scope_ids.usage_id.block_type,
                file_name=split_test_block.scope_ids.usage_id.block_id)) as f:
            exported_olx = f.read()

        # And compare.
        assert exported_olx == expected_olx

        runtime = TestImportSystem(
            load_error_modules=True,
            course_id=split_test_block.location.course_key)
        runtime.resources_fs = export_fs

        # Now import it.
        olx_element = lxml.etree.fromstring(exported_olx)
        id_generator = Mock()
        imported_split_test_block = SplitTestBlock.parse_xml(
            olx_element, runtime, None, id_generator)

        # Check the new XBlock has the same properties as the old one.
        assert imported_split_test_block.display_name == split_test_block.display_name
        assert len(imported_split_test_block.children) == len(
            split_test_block.children)
        assert imported_split_test_block.children == split_test_block.children
        assert imported_split_test_block.user_partition_id == split_test_block.user_partition_id
        assert imported_split_test_block.group_id_to_child['0'] == str(
            split_test_block.group_id_to_child['0'])
        assert imported_split_test_block.group_id_to_child['1'] == str(
            split_test_block.group_id_to_child['1'])
Example #5
0
class LocalBoxMemoryFS():
    '''
    #
    # Class that handles in memory file system for localbox
    #
    '''

    def __init__(self):
        self.memory = MemoryFS()

        ## WINDOWS
        if sys.platform == 'win32': 
            from fs.expose import dokan
            letter = random.choice(string.letters) + ":\\"
            
            while os.path.exists(letter):
                letter = random.choice(string.letters) + ":\\"

            self.mount_directory = letter
            if not os.path.exists(letter):
                dokan.mount(self.memory, letter)

        ## LINUX
        else:
            self.mount_directory = os.path.join(os.path.expanduser('~')) +'/mtdecoded/'


    def createfile(self, path, content, wipe=True):
        self.memory.createfile(path, wipe=wipe)
        with self.memory.open(path, "wb") as f:
            f.write(content)
        
        if not os.path.exists(self.mount_directory):
            os.makedirs(self.mount_directory)
            mount(self.memory, self.mount_directory)
        # else:
        #     fuse.unmount(self.mount_directory)
        #     mount(self.memory, self.mount_directory)
        # If system is mounted with video it can't be unmounted, find a wait o update mounted resources TODO

        return self.mount_directory + path

    def destroy(self):
        time.sleep(3)
        try:
            fuse.unmount(self.mount_directory)
            os.removedirs(self.mount_directory)
        except OSError: #Mounted in use, try again
            self.destroy()
Example #6
0
    def test_xml_export_import_cycle(self):
        """
        Test the export-import cycle.
        """
        randomize_block = self.store.get_item(self.randomize_block.location)

        expected_olx = ('<randomize display_name="{block.display_name}">\n'
                        '  <html url_name="{block.children[0].block_id}"/>\n'
                        '  <html url_name="{block.children[1].block_id}"/>\n'
                        '  <html url_name="{block.children[2].block_id}"/>\n'
                        '</randomize>\n').format(block=randomize_block, )

        export_fs = MemoryFS()
        # Set the virtual FS to export the olx to.
        randomize_block.runtime._descriptor_system.export_fs = export_fs  # pylint: disable=protected-access

        # Export the olx.
        node = etree.Element("unknown_root")
        randomize_block.add_xml_to_node(node)

        # Read it back
        with export_fs.open('{dir}/{file_name}.xml'.format(
                dir=randomize_block.scope_ids.usage_id.block_type,
                file_name=randomize_block.scope_ids.usage_id.block_id)) as f:
            exported_olx = f.read()

        # And compare.
        self.assertEqual(exported_olx, expected_olx)

        runtime = TestImportSystem(
            load_error_modules=True,
            course_id=randomize_block.location.course_key)
        runtime.resources_fs = export_fs

        # Now import it.
        olx_element = etree.fromstring(exported_olx)
        id_generator = Mock()
        imported_randomize_block = RandomizeBlock.parse_xml(
            olx_element, runtime, None, id_generator)

        # Check the new XBlock has the same properties as the old one.
        self.assertEqual(imported_randomize_block.display_name,
                         randomize_block.display_name)
        self.assertEqual(len(imported_randomize_block.children), 3)
        self.assertEqual(imported_randomize_block.children,
                         randomize_block.children)
Example #7
0
def do_cairo():
    WIDTH, HEIGHT = 32, 32

    surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, WIDTH, HEIGHT)
    ctx = cairo.Context(surface)

    ctx.scale(WIDTH, HEIGHT)  # Normalizing the canvas

    pat = cairo.LinearGradient(0.0, 0.0, 0.0, 1.0)
    pat.add_color_stop_rgba(1, 0.7, 0, 0, 0.5)  # First stop, 50% opacity
    pat.add_color_stop_rgba(0, 0.9, 0.7, 0.2, 1)  # Last stop, 100% opacity

    ctx.rectangle(0, 0, 1, 1)  # Rectangle(x0, y0, x1, y1)
    ctx.set_source(pat)
    ctx.fill()

    ctx.translate(0.1, 0.1)  # Changing the current transformation matrix

    ctx.move_to(0, 0)
    # Arc(cx, cy, radius, start_angle, stop_angle)
    ctx.arc(0.2, 0.1, 0.1, -math.pi / 2, 0)
    ctx.line_to(0.5, 0.1)  # Line to (x,y)
    # Curve(x1, y1, x2, y2, x3, y3)
    ctx.curve_to(0.5, 0.2, 0.5, 0.4, 0.2, 0.8)
    ctx.close_path()

    ctx.set_source_rgb(0.3, 0.2, 0.5)  # Solid color
    ctx.set_line_width(0.02)
    ctx.stroke()

    # Prepare an _in memory_ file system and write the image to a file.
    memfs = MemoryFS()
    with memfs.open("translation.png", "wb") as fout:
        surface.write_to_png(fout)

    del ctx
    surface.finish()
    del surface

    return memfs
Example #8
0
class VirtualFilesystem(AbstractedFS):
    """Represents a virtual filesystem (currently only memory and s3 are supported)
    """
    
    def __init__(self, root, cmd_channel):
        AbstractedFS.__init__(self, root, cmd_channel)
        self.cwd = root
        self.type = cmd_channel.type
        self.s3_bucket = cmd_channel.s3_bucket
        self.aws_access_key = cmd_channel.aws_access_key
        self.aws_secret_key = cmd_channel.aws_secret_key
        self.seperator = cmd_channel.seperator
        self.thread_synchronize = cmd_channel.thread_synchronize
        self.key_sync_timeout = cmd_channel.key_sync_timeout
        if not self.cmd_channel.fs_obj:
            if self.type == "memory":
                self.fs_obj = MemoryFS()
            elif self.type == "s3":
                self.fs_obj = S3FS(bucket=self.bucket, prefix=self.prefix, aws_access_key=self.aws_access_key, aws_secret_key=self.aws_secret_key, separator=self.seperator, thread_synchronize=self.thread_synchronize, key_sync_timeout=self.key_sync_timeout)
            self.cmd_channel.fs_obj = self.fs_obj
        else:
            self.fs_obj = self.cmd_channel.fs_obj
            

    def ftp2fs(self, ftppath):
        return self.ftpnorm(ftppath)

    def fs2ftp(self, fspath):
        return fspath

    def validpath(self, path):
        # validpath was used to check symlinks escaping user home
        # directory; this is no longer necessary.
        return True
    
    def open(self, filename, mode):
            f = self.fs_obj.open(filename, mode)
            f.name=filename
            return f
    
    def mkdir(self, path):
        return self.fs_obj.makedir(path)
        
    def chdir(self, path):
        return self.fs_obj.opendir(path)
    
    def listdir(self,path):
        return self.fs_obj.listdir(path)
    
    def rmdir(self, path):
        return self.fs_obj.removedir(path)
    
    def remove(self, path):
        return self.fs_obj.remove(path)
    
    def rename(self, src, dst):
        return self.fs_obj.rename(src, dst)
    
    def chmod(self, path, mode):
        return True
    
    def readlink(self, path):
        return self.ftp2fs(path)
    
    def isfile(self, path):
        return self.fs_obj.isfile(path)
    
    def islink(self, path):
        return False
    
    def getsize(self, path):
        return self.fs_obj.getsize(path)
    
    def getmtime(self, path):
        return self.fs_obj.getinfo(path)['modified_time']
    
    def realpath(self, path):
        return path
    
    def lexists(self, path):
        return self.fs_obj.exists(path)
    
    def mkstemp(self, suffix='', prefix='', mode='wb'):
        from tempfile import _RandomNameSequence as RandomName
        name = RandomName()
        if suffix != '':
            suffix = 'tmp'
        fname = suffix + name.next()
        return self.fs_obj.open(fname,mode)
Example #9
0
    def test_metadata_import_export(self):
        """Two checks:
            - unknown metadata is preserved across import-export
            - inherited metadata doesn't leak to children.
        """
        system = self.get_system()
        v = 'March 20 17:00'
        url_name = 'test1'
        start_xml = '''
        <course org="{org}" course="{course}"
                due="{due}" url_name="{url_name}" unicorn="purple">
            <chapter url="hi" url_name="ch" display_name="CH">
                <html url_name="h" display_name="H">Two houses, ...</html>
            </chapter>
        </course>'''.format(due=v, org=ORG, course=COURSE, url_name=url_name)
        descriptor = system.process_xml(start_xml)
        compute_inherited_metadata(descriptor)

        print(descriptor, descriptor._model_data)
        self.assertEqual(descriptor.lms.due, Date().from_json(v))

        # Check that the child inherits due correctly
        child = descriptor.get_children()[0]
        self.assertEqual(child.lms.due, Date().from_json(v))
        self.assertEqual(child._inheritable_metadata,
                         child._inherited_metadata)
        self.assertEqual(2, len(child._inherited_metadata))
        self.assertEqual('1970-01-01T00:00:00Z',
                         child._inherited_metadata['start'])
        self.assertEqual(v, child._inherited_metadata['due'])

        # Now export and check things
        resource_fs = MemoryFS()
        exported_xml = descriptor.export_to_xml(resource_fs)

        # Check that the exported xml is just a pointer
        print("Exported xml:", exported_xml)
        pointer = etree.fromstring(exported_xml)
        self.assertTrue(is_pointer_tag(pointer))
        # but it's a special case course pointer
        self.assertEqual(pointer.attrib['course'], COURSE)
        self.assertEqual(pointer.attrib['org'], ORG)

        # Does the course still have unicorns?
        with resource_fs.open(
                'course/{url_name}.xml'.format(url_name=url_name)) as f:
            course_xml = etree.fromstring(f.read())

        self.assertEqual(course_xml.attrib['unicorn'], 'purple')

        # the course and org tags should be _only_ in the pointer
        self.assertTrue('course' not in course_xml.attrib)
        self.assertTrue('org' not in course_xml.attrib)

        # did we successfully strip the url_name from the definition contents?
        self.assertTrue('url_name' not in course_xml.attrib)

        # Does the chapter tag now have a due attribute?
        # hardcoded path to child
        with resource_fs.open('chapter/ch.xml') as f:
            chapter_xml = etree.fromstring(f.read())
        self.assertEqual(chapter_xml.tag, 'chapter')
        self.assertFalse('due' in chapter_xml.attrib)
class BigFS(FS):

    """A FileSystem that represents a BIG file."""
    
    _meta = { 'virtual' : False,
              'read_only' : True,
              'unicode_paths' : True,
              'case_insensitive_paths' : False,
              'network' : False,                        
             }

    def __init__(self, filename, mode="r", thread_synchronize=True):
        """Create a FS that maps on to a big file.

        :param filename: A (system) path, or a file-like object
        :param mode: Mode to open file: 'r' for reading, 'w' and 'a' not supported
        :param thread_synchronize: -- Set to True (default) to enable thread-safety

        """
        super(BigFS, self).__init__(thread_synchronize=thread_synchronize)

        if len(mode) > 1 or mode not in "r":
            raise ValueError("mode must be 'r'")
        self.file_mode = mode
        self.big_path = str(filename)

        self.entries = {}
        try:
            self.bf = open(filename, "rb")
        except IOError:
            raise ResourceNotFoundError(str(filename), msg="BIG file does not exist: %(path)s")

        self._path_fs = MemoryFS()
        if mode in 'ra':
            self._parse_resource_list(self.bf)

    def __str__(self):
        return "<BigFS: %s>" % self.big_path

    def __unicode__(self):
        return unicode(self.__str__())


    def _parse_resource_list(self, g):
        magicWord = g.read(4)
        if magicWord != "BIGF" and magicWord != "BIG4":
            raise ValueError("Magic word of BIG file invalid: " + filename + " " + repr(magicWord))
        header = g.read(12)
        header = unpack(">III", header)
        BIGSize = header[0]
        fileCount = header[1]
        bodyOffset = header[2]
        for i in range(fileCount):
            fileHeader = g.read(8)
            fileHeader = unpack(">II", fileHeader)

            pos = g.tell()
            buf = g.read(4096)
            marker = buf.find("\0")
            if marker == -1:
                raise ValueError("Could not parse filename in BIG file: Too long or invalid file")
            name = buf[:marker]
            # TODO: decode the encoding of name (or normalize the path?)
            isCompressed, uncompressedSize = self.__isCompressed(g, fileHeader[0], fileHeader[1])
            be = BIGEntry(name, fileHeader[0], fileHeader[1], isCompressed, uncompressedSize)
            name = normpath(name)
            self.entries[name] = be
            self._add_resource(name)
            g.seek(pos + marker + 1)

    def __isCompressed(self, g, offset, size):
        g.seek(offset)
        buf = g.read(2)
        magic = unpack(">H", buf)[0]
        if (magic & 0x3EFF) == 0x10FB:
            # it is compressed
            if magic & 0x8000:
                # decompressed size is uint32
                return True, unpack(">I", g.read(4))[0]
            else:
                # use only 3 bytes
                return True, unpack(">I", "\0" + g.read(3))[0]
        return False, size

    def _add_resource(self, path):
        if path.endswith('/'):
            path = path[:-1]
            if path:
                self._path_fs.makedir(path, recursive=True, allow_recreate=True)
        else:
            dirpath, filename = pathsplit(path)
            if dirpath:
                self._path_fs.makedir(dirpath, recursive=True, allow_recreate=True)
            f = self._path_fs.open(path, 'w')
            f.close()


    def close(self):
        """Finalizes the zip file so that it can be read.
        No further operations will work after this method is called."""

        if hasattr(self, 'bf') and self.bf:
            self.bf.close()
            self.bf = _ExceptionProxy()

    @synchronize
    def open(self, path, mode="r", **kwargs):
        path = normpath(relpath(path))        

        if 'r' in mode:
            if self.file_mode not in 'ra':
                raise OperationFailedError("open file", path=path, msg="Big file must be opened for reading ('r') or appending ('a')")
            try:
                return self.entries[path].getfile(self.bf)
            except KeyError:
                raise ResourceNotFoundError(path)

        if 'w' in mode:
            raise OperationFailedError("open file", path=path, msg="Big file cannot be edited ATM")

        raise ValueError("Mode must contain be 'r' or 'w'")

    @synchronize
    def getcontents(self, path):
        if not self.exists(path):
            raise ResourceNotFoundError(path)
        path = normpath(path)
        try:
            contents = self.entries[path].getcontents(self.bf)
        except KeyError:
            raise ResourceNotFoundError(path)
        except RuntimeError:
            raise OperationFailedError("read file", path=path, msg="Big file must be oppened with 'r' or 'a' to read")
        return contents

    def desc(self, path):
        if self.isdir(path):
            return "Dir in big file: %s" % self.big_path
        else:
            return "File in big file: %s" % self.big_path

    def isdir(self, path):
        return self._path_fs.isdir(path)

    def isfile(self, path):
        return self._path_fs.isfile(path)

    def exists(self, path):
        return self._path_fs.exists(path)

    @synchronize
    def makedir(self, dirname, recursive=False, allow_recreate=False):
        dirname = normpath(dirname)
        if self.file_mode not in "wa":
            raise OperationFailedError("create directory", path=dirname, msg="Big file must be opened for writing ('w') or appending ('a')")
        if not dirname.endswith('/'):
            dirname += '/'
        self._add_resource(dirname)

    def listdir(self, path="/", wildcard=None, full=False, absolute=False, dirs_only=False, files_only=False):
        return self._path_fs.listdir(path, wildcard, full, absolute, dirs_only, files_only)

    @synchronize
    def getinfo(self, path):
        if not self.exists(path):
            raise ResourceNotFoundError(path)
        path = normpath(path).lstrip('/')
        info = {'size': 0}
        if path in self.entries:
            be = self.entries[path]
            info['size'] = be.realSize
            info['file_size'] = be.realSize
            info['stored_size'] = be.storedSize
            info['is_compressed'] = be.isCompressed
            info['offset'] = be.offset
            info['internal_filename'] = be.filename
            info['filename'] = path
        return info
class TestLibraryContentExportImport(LibraryContentTest):
    """
    Export and import tests for LibraryContentBlock
    """
    def setUp(self):
        super().setUp()

        # Children will only set after calling this.
        self.lc_block.refresh_children()
        self.lc_block = self.store.get_item(self.lc_block.location)

        self.expected_olx = (
            '<library_content display_name="{block.display_name}" max_count="{block.max_count}"'
            ' source_library_id="{block.source_library_id}" source_library_version="{block.source_library_version}">\n'
            '  <html url_name="{block.children[0].block_id}"/>\n'
            '  <html url_name="{block.children[1].block_id}"/>\n'
            '  <html url_name="{block.children[2].block_id}"/>\n'
            '  <html url_name="{block.children[3].block_id}"/>\n'
            '</library_content>\n'
        ).format(
            block=self.lc_block,
        )

        # Set the virtual FS to export the olx to.
        self.export_fs = MemoryFS()
        self.lc_block.runtime._descriptor_system.export_fs = self.export_fs  # pylint: disable=protected-access

        # Prepare runtime for the import.
        self.runtime = TestImportSystem(load_error_modules=True, course_id=self.lc_block.location.course_key)
        self.runtime.resources_fs = self.export_fs
        self.id_generator = Mock()

        # Export the olx.
        node = etree.Element("unknown_root")
        self.lc_block.add_xml_to_node(node)

    def _verify_xblock_properties(self, imported_lc_block):
        """
        Check the new XBlock has the same properties as the old one.
        """
        assert imported_lc_block.display_name == self.lc_block.display_name
        assert imported_lc_block.source_library_id == self.lc_block.source_library_id
        assert imported_lc_block.source_library_version == self.lc_block.source_library_version
        assert imported_lc_block.mode == self.lc_block.mode
        assert imported_lc_block.max_count == self.lc_block.max_count
        assert imported_lc_block.capa_type == self.lc_block.capa_type
        assert len(imported_lc_block.children) == len(self.lc_block.children)
        assert imported_lc_block.children == self.lc_block.children

    def test_xml_export_import_cycle(self):
        """
        Test the export-import cycle.
        """
        # Read back the olx.
        with self.export_fs.open('{dir}/{file_name}.xml'.format(
            dir=self.lc_block.scope_ids.usage_id.block_type,
            file_name=self.lc_block.scope_ids.usage_id.block_id
        )) as f:
            exported_olx = f.read()

        # And compare.
        assert exported_olx == self.expected_olx

        # Now import it.
        olx_element = etree.fromstring(exported_olx)
        imported_lc_block = LibraryContentBlock.parse_xml(olx_element, self.runtime, None, self.id_generator)

        self._verify_xblock_properties(imported_lc_block)

    def test_xml_import_with_comments(self):
        """
        Test that XML comments within LibraryContentBlock are ignored during the import.
        """
        olx_with_comments = (
            '<!-- Comment -->\n'
            '<library_content display_name="{block.display_name}" max_count="{block.max_count}"'
            ' source_library_id="{block.source_library_id}" source_library_version="{block.source_library_version}">\n'
            '<!-- Comment -->\n'
            '  <html url_name="{block.children[0].block_id}"/>\n'
            '  <html url_name="{block.children[1].block_id}"/>\n'
            '  <html url_name="{block.children[2].block_id}"/>\n'
            '  <html url_name="{block.children[3].block_id}"/>\n'
            '</library_content>\n'
        ).format(
            block=self.lc_block,
        )

        # Import the olx.
        olx_element = etree.fromstring(olx_with_comments)
        imported_lc_block = LibraryContentBlock.parse_xml(olx_element, self.runtime, None, self.id_generator)

        self._verify_xblock_properties(imported_lc_block)
Example #12
0
def encode_file_into_luby_blocks_func(
        folder_containing_art_image_and_metadata_files):
    global block_redundancy_factor
    global desired_block_size_in_bytes
    file_paths_in_folder = glob.glob(
        folder_containing_art_image_and_metadata_files + '*')
    for current_file_path in file_paths_in_folder:
        if current_file_path.split('.')[-1] in ['zst', 'tar']:
            try:
                os.remove(current_file_path)
            except Exception as e:
                print('Error: ' + str(e))
    c_constant = 0.1  #Don't touch
    delta_constant = 0.5  #Don't touch
    start_time = time()
    ramdisk_object = MemoryFS()
    c_constant = 0.1
    delta_constant = 0.5
    seed = random.randint(0, 1 << 31 - 1)
    compressed_output_file_path, compressed_file_hash = add_art_image_files_and_metadata_to_zstd_compressed_tar_file_func(
        folder_containing_art_image_and_metadata_files)
    final_art_file__original_size_in_bytes = os.path.getsize(
        compressed_output_file_path)
    output_blocks_list = [
    ]  #Process compressed file into a stream of encoded blocks, and save those blocks as separate files in the output folder:
    print('Now encoding file ' + compressed_output_file_path + ' (' +
          str(round(final_art_file__original_size_in_bytes / 1000000)) +
          'mb)\n\n')
    total_number_of_blocks_to_generate = ceil(
        (1.00 * block_redundancy_factor *
         final_art_file__original_size_in_bytes) / desired_block_size_in_bytes)
    print(
        'Total number of blocks to generate for target level of redundancy: ' +
        str(total_number_of_blocks_to_generate))
    with open(compressed_output_file_path, 'rb') as f:
        compressed_data = f.read()
    compressed_data_size_in_bytes = len(compressed_data)
    blocks = [
        int.from_bytes(
            compressed_data[ii:ii + desired_block_size_in_bytes].ljust(
                desired_block_size_in_bytes, b'0'), 'little') for ii in
        range(0, compressed_data_size_in_bytes, desired_block_size_in_bytes)
    ]
    prng = PRNG(params=(len(blocks), delta_constant, c_constant))
    prng.set_seed(seed)
    output_blocks_list = list()
    number_of_blocks_generated = 0
    while number_of_blocks_generated < total_number_of_blocks_to_generate:
        random_seed, d, ix_samples = prng.get_src_blocks()
        block_data = 0
        for ix in ix_samples:
            block_data ^= blocks[ix]
        block_data_bytes = int.to_bytes(block_data,
                                        desired_block_size_in_bytes, 'little')
        block_data_hash = hashlib.sha3_256(block_data_bytes).digest()
        block = (compressed_data_size_in_bytes, desired_block_size_in_bytes,
                 random_seed, block_data_hash, block_data_bytes)
        header_bit_packing_pattern_string = '<3I32s'
        bit_packing_pattern_string = header_bit_packing_pattern_string + str(
            desired_block_size_in_bytes) + 's'
        length_of_header_in_bytes = struct.calcsize(
            header_bit_packing_pattern_string)
        packed_block_data = pack(bit_packing_pattern_string, *block)
        if number_of_blocks_generated == 0:  #Test that the bit-packing is working correctly:
            with io.BufferedReader(io.BytesIO(packed_block_data)) as f:
                header_data = f.read(length_of_header_in_bytes)
                #first_generated_block_raw_data = f.read(desired_block_size_in_bytes)
            compressed_input_data_size_in_bytes_test, desired_block_size_in_bytes_test, random_seed_test, block_data_hash_test = unpack(
                header_bit_packing_pattern_string, header_data)
            if block_data_hash_test != block_data_hash:
                print(
                    'Error! Block data hash does not match the hash reported in the block header!'
                )
        output_blocks_list.append(packed_block_data)
        number_of_blocks_generated = number_of_blocks_generated + 1
        hash_of_block = get_sha256_hash_of_input_data_func(packed_block_data)
        output_block_file_path = 'FileHash__' + compressed_file_hash + '__Block__' + '{0:09}'.format(
            number_of_blocks_generated
        ) + '__BlockHash_' + hash_of_block + '.block'
        try:
            with ramdisk_object.open(output_block_file_path, 'wb') as f:
                f.write(packed_block_data)
        except Exception as e:
            print('Error: ' + str(e))
    duration_in_seconds = round(time() - start_time, 1)
    print('\n\nFinished processing in ' + str(duration_in_seconds) +
          ' seconds! \nOriginal zip file was encoded into ' +
          str(number_of_blocks_generated) + ' blocks of ' +
          str(ceil(desired_block_size_in_bytes / 1000)) +
          ' kilobytes each. Total size of all blocks is ~' + str(
              ceil((number_of_blocks_generated * desired_block_size_in_bytes) /
                   1000000)) + ' megabytes\n')
    print('Now copying encoded files from ram disk to local storage...')
    block_storage_folder_path = folder_containing_art_image_and_metadata_files + os.sep + 'block_files'
    if not os.path.isdir(block_storage_folder_path):
        os.makedirs(block_storage_folder_path)
    filesystem_object = OSFS(block_storage_folder_path)
    copy_fs(ramdisk_object, filesystem_object)
    print('Done!\n')
    ramdisk_object.close()
    return duration_in_seconds
Example #13
0
 def parted_file(self):
     fs = MemoryFS()
     mode = "wb+"
     path = "cuckoo.tar"
     parts = [FilePart(fs.open("cuckoo.tar.part0", mode)), (fs.open("cuckoo.tar.part1", mode))]
     return PartedFile(path=path, mode=mode, fs=fs, max_part_size=kb(4), parts=parts)
Example #14
0
class BigFS(FS):
    """A FileSystem that represents a BIG file."""

    _meta = {
        'virtual': False,
        'read_only': True,
        'unicode_paths': True,
        'case_insensitive_paths': False,
        'network': False,
    }

    def __init__(self, filename, mode="r", thread_synchronize=True):
        """Create a FS that maps on to a big file.

        :param filename: A (system) path, or a file-like object
        :param mode: Mode to open file: 'r' for reading, 'w' and 'a' not supported
        :param thread_synchronize: -- Set to True (default) to enable thread-safety

        """
        super(BigFS, self).__init__(thread_synchronize=thread_synchronize)

        if len(mode) > 1 or mode not in "r":
            raise ValueError("mode must be 'r'")
        self.file_mode = mode
        self.big_path = str(filename)

        self.entries = {}
        try:
            self.bf = open(filename, "rb")
        except IOError:
            raise ResourceNotFoundError(
                str(filename), msg="BIG file does not exist: %(path)s")

        self._path_fs = MemoryFS()
        if mode in 'ra':
            self._parse_resource_list(self.bf)

    def __str__(self):
        return "<BigFS: %s>" % self.big_path

    def __unicode__(self):
        return unicode(self.__str__())

    def _parse_resource_list(self, g):
        magicWord = g.read(4)
        if magicWord != "BIGF" and magicWord != "BIG4":
            raise ValueError("Magic word of BIG file invalid: " + filename +
                             " " + repr(magicWord))
        header = g.read(12)
        header = unpack(">III", header)
        BIGSize = header[0]
        fileCount = header[1]
        bodyOffset = header[2]
        for i in range(fileCount):
            fileHeader = g.read(8)
            fileHeader = unpack(">II", fileHeader)

            pos = g.tell()
            buf = g.read(4096)
            marker = buf.find("\0")
            if marker == -1:
                raise ValueError(
                    "Could not parse filename in BIG file: Too long or invalid file"
                )
            name = buf[:marker]
            # TODO: decode the encoding of name (or normalize the path?)
            isCompressed, uncompressedSize = self.__isCompressed(
                g, fileHeader[0], fileHeader[1])
            be = BIGEntry(name, fileHeader[0], fileHeader[1], isCompressed,
                          uncompressedSize)
            name = normpath(name)
            self.entries[name] = be
            self._add_resource(name)
            g.seek(pos + marker + 1)

    def __isCompressed(self, g, offset, size):
        g.seek(offset)
        buf = g.read(2)
        magic = unpack(">H", buf)[0]
        if (magic & 0x3EFF) == 0x10FB:
            # it is compressed
            if magic & 0x8000:
                # decompressed size is uint32
                return True, unpack(">I", g.read(4))[0]
            else:
                # use only 3 bytes
                return True, unpack(">I", "\0" + g.read(3))[0]
        return False, size

    def _add_resource(self, path):
        if path.endswith('/'):
            path = path[:-1]
            if path:
                self._path_fs.makedir(path,
                                      recursive=True,
                                      allow_recreate=True)
        else:
            dirpath, filename = pathsplit(path)
            if dirpath:
                self._path_fs.makedir(dirpath,
                                      recursive=True,
                                      allow_recreate=True)
            f = self._path_fs.open(path, 'w')
            f.close()

    def close(self):
        """Finalizes the zip file so that it can be read.
        No further operations will work after this method is called."""

        if hasattr(self, 'bf') and self.bf:
            self.bf.close()
            self.bf = _ExceptionProxy()

    @synchronize
    def open(self, path, mode="r", **kwargs):
        path = normpath(relpath(path))

        if 'r' in mode:
            if self.file_mode not in 'ra':
                raise OperationFailedError(
                    "open file",
                    path=path,
                    msg=
                    "Big file must be opened for reading ('r') or appending ('a')"
                )
            try:
                return self.entries[path].getfile(self.bf)
            except KeyError:
                raise ResourceNotFoundError(path)

        if 'w' in mode:
            raise OperationFailedError("open file",
                                       path=path,
                                       msg="Big file cannot be edited ATM")

        raise ValueError("Mode must contain be 'r' or 'w'")

    @synchronize
    def getcontents(self, path):
        if not self.exists(path):
            raise ResourceNotFoundError(path)
        path = normpath(path)
        try:
            contents = self.entries[path].getcontents(self.bf)
        except KeyError:
            raise ResourceNotFoundError(path)
        except RuntimeError:
            raise OperationFailedError(
                "read file",
                path=path,
                msg="Big file must be oppened with 'r' or 'a' to read")
        return contents

    def desc(self, path):
        if self.isdir(path):
            return "Dir in big file: %s" % self.big_path
        else:
            return "File in big file: %s" % self.big_path

    def isdir(self, path):
        return self._path_fs.isdir(path)

    def isfile(self, path):
        return self._path_fs.isfile(path)

    def exists(self, path):
        return self._path_fs.exists(path)

    @synchronize
    def makedir(self, dirname, recursive=False, allow_recreate=False):
        dirname = normpath(dirname)
        if self.file_mode not in "wa":
            raise OperationFailedError(
                "create directory",
                path=dirname,
                msg=
                "Big file must be opened for writing ('w') or appending ('a')")
        if not dirname.endswith('/'):
            dirname += '/'
        self._add_resource(dirname)

    def listdir(self,
                path="/",
                wildcard=None,
                full=False,
                absolute=False,
                dirs_only=False,
                files_only=False):
        return self._path_fs.listdir(path, wildcard, full, absolute, dirs_only,
                                     files_only)

    @synchronize
    def getinfo(self, path):
        if not self.exists(path):
            raise ResourceNotFoundError(path)
        path = normpath(path).lstrip('/')
        info = {'size': 0}
        if path in self.entries:
            be = self.entries[path]
            info['size'] = be.realSize
            info['file_size'] = be.realSize
            info['stored_size'] = be.storedSize
            info['is_compressed'] = be.isCompressed
            info['offset'] = be.offset
            info['internal_filename'] = be.filename
            info['filename'] = path
        return info
Example #15
0
def encode_final_art_zipfile_into_luby_transform_blocks_func(
        sha256_hash_of_art_file):
    global block_storage_folder_path
    global block_redundancy_factor
    global desired_block_size_in_bytes
    global prepared_final_art_zipfiles_folder_path
    start_time = time()
    ramdisk_object = MemoryFS()
    filesystem_object = OSFS(block_storage_folder_path)
    c_constant = 0.1
    delta_constant = 0.5
    seed = randint(0, 1 << 31 - 1)
    path_to_final_artwork_zipfile_including_metadata = glob.glob(
        prepared_final_art_zipfiles_folder_path + '*' +
        sha256_hash_of_art_file + '*')[0]
    final_art_file__original_size_in_bytes = os.path.getsize(
        path_to_final_artwork_zipfile_including_metadata)
    output_blocks_list = [
    ]  #Process ZIP file into a stream of encoded blocks, and save those blocks as separate files in the output folder:
    print('Now encoding file ' +
          os.path.split(path_to_final_artwork_zipfile_including_metadata)[-1] +
          ' (' + str(round(final_art_file__original_size_in_bytes / 1000000)) +
          'mb)\n\n')
    total_number_of_blocks_to_generate = ceil(
        (1.00 * block_redundancy_factor *
         final_art_file__original_size_in_bytes) / desired_block_size_in_bytes)
    print(
        'Total number of blocks to generate for target level of redundancy: ' +
        str(total_number_of_blocks_to_generate))
    pbar = tqdm(total=total_number_of_blocks_to_generate)
    with open(path_to_final_artwork_zipfile_including_metadata, 'rb') as f:
        f_bytes = f.read()
    filesize = len(f_bytes)
    art_zipfile_hash = hashlib.sha256(f_bytes).hexdigest()
    if art_zipfile_hash == sha256_hash_of_art_file:  #Convert file byte contents into blocksize chunks, padding last one if necessary:
        blocks = [
            int.from_bytes(
                f_bytes[ii:ii + desired_block_size_in_bytes].ljust(
                    desired_block_size_in_bytes, b'0'), sys.byteorder)
            for ii in range(0, len(f_bytes), desired_block_size_in_bytes)
        ]
        number_of_blocks = len(blocks)
        print('The length of the blocks list: ' + str(number_of_blocks))
        prng = PRNG(params=(number_of_blocks, delta_constant, c_constant))
        prng.set_seed(seed)
        number_of_blocks_generated = 0  # block generation loop
        while number_of_blocks_generated <= total_number_of_blocks_to_generate:
            update_skip = 1
            if (number_of_blocks_generated % update_skip) == 0:
                pbar.update(update_skip)
            blockseed, d, ix_samples = prng.get_src_blocks()
            block_data = 0
            for ix in ix_samples:
                block_data ^= blocks[ix]
            block = (filesize, desired_block_size_in_bytes, blockseed,
                     int.to_bytes(block_data, desired_block_size_in_bytes,
                                  sys.byteorder)
                     )  # Generate blocks of XORed data in network byte order
            number_of_blocks_generated = number_of_blocks_generated + 1
            packed_block_data = pack('!III%ss' % desired_block_size_in_bytes,
                                     *block)
            output_blocks_list.append(packed_block_data)
            hash_of_block = hashlib.sha256(packed_block_data).hexdigest()
            output_block_file_path = 'FileHash__' + art_zipfile_hash + '__Block__' + '{0:09}'.format(
                number_of_blocks_generated
            ) + '__BlockHash_' + hash_of_block + '.block'
            try:
                with ramdisk_object.open(output_block_file_path, 'wb') as f:
                    f.write(packed_block_data)
            except Exception as e:
                print('Error: ' + str(e))
        duration_in_seconds = round(time() - start_time, 1)
        print('\n\nFinished processing in ' + str(duration_in_seconds) +
              ' seconds! \nOriginal zip file was encoded into ' +
              str(number_of_blocks_generated) + ' blocks of ' +
              str(ceil(desired_block_size_in_bytes / 1000)) +
              ' kilobytes each. Total size of all blocks is ~' + str(
                  ceil((number_of_blocks_generated *
                        desired_block_size_in_bytes) / 1000000)) +
              ' megabytes\n')
        print('Now copying encoded files from ram disk to local storage...')
        copy_fs(ramdisk_object, filesystem_object)
        print('Done!\n')
        ramdisk_object.close()
        return duration_in_seconds
Example #16
0
    def test_metadata_import_export(self):
        """Two checks:
            - unknown metadata is preserved across import-export
            - inherited metadata doesn't leak to children.
        """
        system = self.get_system()
        v = "March 20 17:00"
        url_name = "test1"
        start_xml = """
        <course org="{org}" course="{course}"
                due="{due}" url_name="{url_name}" unicorn="purple">
            <chapter url="hi" url_name="ch" display_name="CH">
                <html url_name="h" display_name="H">Two houses, ...</html>
            </chapter>
        </course>""".format(
            due=v, org=ORG, course=COURSE, url_name=url_name
        )
        descriptor = system.process_xml(start_xml)
        compute_inherited_metadata(descriptor)

        # pylint: disable=W0212
        print(descriptor, descriptor._field_data)
        self.assertEqual(descriptor.due, ImportTestCase.date.from_json(v))

        # Check that the child inherits due correctly
        child = descriptor.get_children()[0]
        self.assertEqual(child.due, ImportTestCase.date.from_json(v))
        # need to convert v to canonical json b4 comparing
        self.assertEqual(
            ImportTestCase.date.to_json(ImportTestCase.date.from_json(v)), child.xblock_kvs.inherited_settings["due"]
        )

        # Now export and check things
        resource_fs = MemoryFS()
        exported_xml = descriptor.export_to_xml(resource_fs)

        # Check that the exported xml is just a pointer
        print("Exported xml:", exported_xml)
        pointer = etree.fromstring(exported_xml)
        self.assertTrue(is_pointer_tag(pointer))
        # but it's a special case course pointer
        self.assertEqual(pointer.attrib["course"], COURSE)
        self.assertEqual(pointer.attrib["org"], ORG)

        # Does the course still have unicorns?
        with resource_fs.open("course/{url_name}.xml".format(url_name=url_name)) as f:
            course_xml = etree.fromstring(f.read())

        self.assertEqual(course_xml.attrib["unicorn"], "purple")

        # the course and org tags should be _only_ in the pointer
        self.assertTrue("course" not in course_xml.attrib)
        self.assertTrue("org" not in course_xml.attrib)

        # did we successfully strip the url_name from the definition contents?
        self.assertTrue("url_name" not in course_xml.attrib)

        # Does the chapter tag now have a due attribute?
        # hardcoded path to child
        with resource_fs.open("chapter/ch.xml") as f:
            chapter_xml = etree.fromstring(f.read())
        self.assertEqual(chapter_xml.tag, "chapter")
        self.assertFalse("due" in chapter_xml.attrib)
Example #17
0
def do_translate(self, msg):
    # fixed width assumption, or at least maximum constraint
    font_size = 30
    font_vertical_padding = 3
    lines = msg.splitlines()
    max_cols = len(max(lines, key=len))
    max_lines = msg.count('\n')
    line_extents = []

    # ############################
    # First, we figure out a surface we KNOW is large enough to render the
    # text. This ensures that the text doesn't fall off the edges of the
    # image and confuse our computations.
    # ############################
    WIDTH = max_cols * font_size
    HEIGHT = (max_lines + 1) * (font_size + font_vertical_padding)

    # ############################
    # Now make a surface so we can find the text extents of each line.
    # ############################
    surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, WIDTH, HEIGHT)
    ctx = cairo.Context(surface)

    #ctx.select_font_face("DejaVu Sans Mono", cairo.FONT_SLANT_NORMAL, \
    #	cairo.FONT_WEIGHT_NORMAL)
    ctx.set_font_face(self.kilta_font.get_cairo_font_face())
    ctx.set_font_size(font_size)
    # font_extents is (ascent, descent, height, max_x_advance, max_y_advance)
    font_extents = ctx.font_extents()

    # We act as if we draw the lines over each other cause it doesn't matter
    # for extent calculation. But we move to the middle of the surface for
    # safety.
    for line in lines:
        ctx.move_to(0, HEIGHT / 2)
        glyph_line = self.kilta_font.layout_line(ctx, line, font_size)
        # Note I still store the old 'line' here too. I'll need it later
        # when doing the pen position.
        line_extents.append([line, ctx.glyph_extents(glyph_line)])

    # Clean up cause we're dumping this surface and context now!
    del ctx
    surface.finish()
    del surface

    # ############################
    # Recompute the correct size of the surface.
    # ############################
    WIDTH = 0
    HEIGHT = 0
    for line_extent in line_extents:
        extent = line_extent[1]
        WIDTH = max(math.ceil(extent.width), WIDTH)
        # TODO: Computation of height needs a reckoning.
        HEIGHT += max(font_size, math.ceil(extent.height)) + \
           font_vertical_padding
    # TODO: in the next line, 3 is Wrong(tm). Need to figure out the x_advance
    # of the last character on the longest line above and use that here.
    WIDTH = math.ceil(WIDTH + 3)
    # ..and add font's average descent to entail the last line's descenders.
    HEIGHT = math.ceil(HEIGHT + font_extents[1])

    # ############################
    # Now finally we can reallocate a new surface that is exactly what we need
    # to draw the text.
    # ############################

    surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, WIDTH, HEIGHT)
    ctx = cairo.Context(surface)

    ctx.set_source_rgba(1.0, 1.0, 1.0, 1.0)  # background: white
    ctx.rectangle(0, 0, WIDTH, HEIGHT)
    ctx.fill()

    # ############################
    # And finally render the text!
    # ############################
    #ctx.select_font_face("DejaVu Sans Mono", cairo.FONT_SLANT_NORMAL, \
    #	cairo.FONT_WEIGHT_NORMAL)
    ctx.set_font_face(self.kilta_font.get_cairo_font_face())
    ctx.set_font_size(font_size)
    ctx.set_source_rgba(0, 0, 0, 1)  # foreground font color: black

    # TODO: When it comes time to deal with the y_bearing and whatnot, there
    # will be a reckoning in this snippet of code... Kerning is probably
    # screwed as well.
    dx = 0
    dy = 0
    for line_extent in line_extents:
        line = line_extent[0]
        extent = line_extent[1]
        dy += font_size  # math.ceil(extent.height) + y_bearing... etc etc
        ctx.move_to(dx, dy)
        # NOTE: ctx knows the current pen position which is why I need to
        # re-layout the line right here again.
        glyph_line = self.kilta_font.layout_line(ctx, line, font_size)
        ctx.show_glyphs(glyph_line)  # Already glyphs
        dy += font_vertical_padding

    ctx.stroke()

    # ############################
    # Prepare an _in memory_ file system and write the image to a file.
    # ############################
    memfs = MemoryFS()
    with memfs.open("translation.png", "wb") as fout:
        surface.write_to_png(fout)

    del ctx
    surface.finish()
    del surface

    return memfs
Example #18
0
  try:
    auth.get_access_token(verifier)
  except tweepy.TweepError:
    print("Error! Failed to get access token.")
    sys.exit(ex)

  # Save access token
  credentials = {'token': auth.access_token, 'token_secret': auth.access_token_secret}
  with open(credentials_file, "w") as file:
    yaml.dump(credentials, file, default_flow_style=False)

# Initialize camera
camera = PiCamera()
memfs=MemoryFS()

# Let's go!
twitter_api = tweepy.API(auth)

camera.resolution = (1024, 768)
camera.start_preview()
# Camera warm-up time
sleep(2)

capture_file=memfs.open('memory_capture.jpg', 'w+b')
camera.capture(capture_file)
sleep(1)
twitter_api.update_with_media('memory_capture.jpg', file=capture_file)
capture_file.close()

memfs.close()