Пример #1
0
class DirectoriesFilter(unittest.TestCase):
    def setUp(self):
        self.fs = MemoryFS()
        for i in range(5):
            self.fs.makedir('sub.{0}'.format(i))
            with self.fs.opendir('sub.{0}'.format(i)) as d:
                d.makedir('sub.0')
        for i in range(5):
            self.fs.makedir('child.{0}'.format(i))

    def tearDown(self):
        self.fs.close()

    def test_basic(self):
        ff = batch.DirectoriesFilter(include_filters=['sub*'])
        results = set(ff.lst(self.fs))
        expected = {'sub.{}'.format(i) for i in range(5)}
        self.assertEqual(results, expected)

    def test_depth_1(self):
        ff = batch.DirectoriesFilter(include_filters=['sub*'], depth=1)
        results = set(ff.lst(self.fs))
        expected = {'sub.{}'.format(i)
                    for i in range(5)
                    }.union({'sub.{}/sub.0'.format(i)
                             for i in range(5)})
        self.assertEqual(results, expected)
Пример #2
0
 def test_writefs_returns_none_if_all_fs_closed(self):
     # Arrange
     multifs = WritableMultiFS()
     fs1 = MemoryFS()
     multifs.addfs("fs1", fs1)
     fs1.close()
     # Act
     assert multifs.writefs is None
Пример #3
0
 def test_writefs_returns_none_if_all_fs_closed(self):
     # Arrange
     multifs = WritableMultiFS()
     fs1 = MemoryFS()
     multifs.addfs("fs1", fs1)
     fs1.close()
     # Act
     assert multifs.writefs is None
Пример #4
0
 def test_read_subdir_versions(self) -> None:
     fs = MemoryFS()
     d = read_subdir_versions_from_directory(fs, "/")
     self.assertEqual({}, d)
     fs.writetext(SUBDIR_VERSIONS_FILENAME, self.txt)
     d = read_subdir_versions_from_directory(fs, "/")
     self.assertEqual(self.d, d)
     fs.close()
class TestFileCache(unittest.TestCase, CacheTests, NamespacesTests):

    __test__ = True

    def setUp(self):
        self.fs = MemoryFS()
        self.cache = cache.filecache.FileCache("test", "ns1", fs=self.fs)
        self.cache2 = cache.filecache.FileCache("test", "ns2", fs=self.fs)

    def tearDown(self):
        self.fs.close()
        self.fs = None
Пример #6
0
class TestFileCache(unittest.TestCase, CacheTests, NamespacesTests):

    __test__ = True

    def setUp(self):
        self.fs = MemoryFS()
        self.cache = cache.filecache.FileCache("test", "ns1", fs=self.fs)
        self.cache2 = cache.filecache.FileCache("test", "ns2", fs=self.fs)

    def tearDown(self):
        self.fs.close()
        self.fs = None
Пример #7
0
class TestFilesFilter(unittest.TestCase):
    def setUp(self):
        self.fs = MemoryFS()
        for i in range(5):
            self.fs.makedir('sub.{0}'.format(i))
            with self.fs.opendir('sub.{0}'.format(i)) as d:
                d.touch('result.txt')
        for i in range(5):
            self.fs.makedir('child.{0}'.format(i))
            with self.fs.opendir('child.{0}'.format(i)) as d:
                d.touch('result.txt')
        for i in range(5):
            self.fs.touch('result.{}.txt'.format(i))
            self.fs.touch('errors.{}.txt'.format(i))

    def tearDown(self):
        self.fs.close()

    def test_basic(self):
        ff = batch.FilesFilter(include_filters=['result*'])
        results = set(ff.lst(self.fs))
        expected = {'result.{}.txt'.format(i) for i in range(5)}
        self.assertEqual(results, expected)

    def test_multi_filters(self):
        ff = batch.FilesFilter(include_filters=['result*', 'errors*'])
        results = set(ff.lst(self.fs))
        expected = {'result.{}.txt'.format(i)
                    for i in range(5)
                    }.union({'errors.{}.txt'.format(i)
                             for i in range(5)})
        self.assertEqual(results, expected)

    def test_with_directory_filter(self):
        results = set()
        (batch.DirectoriesFilter(['sub*']).obv(
            self.fs).flat_map(lambda p: batch.FilesFilter(['result*']).obv(
                self.fs, p)).subscribe(results.add))
        expected = {'sub.{}/result.txt'.format(i) for i in range(5)}
        self.assertEqual(results, expected)
Пример #8
0
        path = relpath(normpath(path))
        path = path.replace("__colon__", ":")
        if not self.allow_autorun:
            if path.lower().startswith("autorun."):
                path = "_" + path
        return path


if __name__ == "__main__":
    import os.path
    import tempfile
    from fs.osfs import OSFS
    from fs.memoryfs import MemoryFS
    from shutil import rmtree
    from six import b
    path = tempfile.mkdtemp()
    try:
        #fs = OSFS(path)
        fs = MemoryFS()
        fs.create('test.txt')
        fs.appendtext('test.txt',
                      'this is a test',
                      encoding=u'utf-8',
                      errors=None,
                      newline=u'')
        flags = DOKAN_OPTION_DEBUG | DOKAN_OPTION_STDERR | DOKAN_OPTION_REMOVABLE
        mount(fs, "Q:\\", foreground=True, numthreads=1, flags=flags)
        fs.close()
    finally:
        rmtree(path)
Пример #9
0
class TestCaseWithMemoryFS(unittest.TestCase):
    def setUp(self):
        self.fs = MemoryFS()

    def tearDown(self):
        self.fs.close()
Пример #10
0
class FSExpirationsTest(TestCase):
    def setUp(self):
        self.fs = MemoryFS()
        self.test_file_path = '/foo/bar'
        self.expires = True
        self.expire_secs = 1
        self.expire_days = 0
        self.create_time = timezone.now()
        self.module = "unittest"

    def tearDown(self):
        self.fs.close()

    def test_create_expiration_exists(self):
        """
        Exercises FSExpirations.create_expiration() with an existing expiration
        """
        # In the first create_expiration, it does not exist. Second loop it is
        # updating the existing row.
        for _ in range(2):
            FSExpirations.create_expiration(
                self.module, self.test_file_path, self.expire_secs, self.expire_days, self.expires
            )

            self.assertEqual(FSExpirations.objects.all().count(), 1)

            fse = FSExpirations.objects.first()
            self.assertEqual(fse.module, self.module)
            self.assertEqual(fse.filename, self.test_file_path)
            self.assertEqual(fse.expires, self.expires)

            # Check expiration time within a couple of seconds
            self.assertGreaterEqual(fse.expiration, self.create_time)
            self.assertLessEqual(fse.expiration, self.create_time + timezone.timedelta(seconds=self.expire_secs + 3))

    def test_expired(self):
        """
        Exercises FSExpirations.expired() with an expired expiration
        """
        expire_secs = 0
        expire_days = 0

        FSExpirations.create_expiration(self.module, self.test_file_path, expire_secs, expire_days, self.expires)

        self.assertEqual(FSExpirations.objects.all().count(), 1)
        fse = FSExpirations.objects.first()

        self.assertEqual(fse.expires, True)

        expirations = fse.expired()
        self.assertEqual(len(expirations), 1)
        self.assertEqual(expirations[0], fse)

    def test_expired_is_not_expired(self):
        """
        Exercises FSExpirations.expired() with no expired expirations
        """
        # Make sure an empty result is empty, not an exception
        self.assertEqual(len(FSExpirations.expired()), 0)

        # Create a future expiration
        expire_secs = 30
        expire_days = 0
        FSExpirations.create_expiration(
            self.module, self.test_file_path, expire_secs, expire_days, self.expires
        )

        # Make sure there is 1 expiration pending, but nothing currently
        # expired
        self.assertEqual(FSExpirations.objects.all().count(), 1)
        self.assertEqual(len(FSExpirations.expired()), 0)

    def test_str(self):
        # First check the unexpired version of the string
        fse = FSExpirations(
            module=self.module, filename=self.test_file_path, expiration=self.create_time, expires=False
        )
        fse2 = FSExpirations(
            module=self.module, filename=self.test_file_path, expiration=self.create_time, expires=True
        )

        for f in (fse, fse2):
            # Don't really care what __str__ is, just that it returns a string
            # of some variety and doesn't error
            try:
                result = f.__str__()
                self.assertTrue(isinstance(result, six.string_types))
            except Exception as e:
                self.fail("__str__ raised an exception! {}".format(e))
Пример #11
0
def encode_file_into_luby_blocks_func(
        folder_containing_art_image_and_metadata_files):
    global block_redundancy_factor
    global desired_block_size_in_bytes
    file_paths_in_folder = glob.glob(
        folder_containing_art_image_and_metadata_files + '*')
    for current_file_path in file_paths_in_folder:
        if current_file_path.split('.')[-1] in ['zst', 'tar']:
            try:
                os.remove(current_file_path)
            except Exception as e:
                print('Error: ' + str(e))
    c_constant = 0.1  #Don't touch
    delta_constant = 0.5  #Don't touch
    start_time = time()
    ramdisk_object = MemoryFS()
    c_constant = 0.1
    delta_constant = 0.5
    seed = random.randint(0, 1 << 31 - 1)
    compressed_output_file_path, compressed_file_hash = add_art_image_files_and_metadata_to_zstd_compressed_tar_file_func(
        folder_containing_art_image_and_metadata_files)
    final_art_file__original_size_in_bytes = os.path.getsize(
        compressed_output_file_path)
    output_blocks_list = [
    ]  #Process compressed file into a stream of encoded blocks, and save those blocks as separate files in the output folder:
    print('Now encoding file ' + compressed_output_file_path + ' (' +
          str(round(final_art_file__original_size_in_bytes / 1000000)) +
          'mb)\n\n')
    total_number_of_blocks_to_generate = ceil(
        (1.00 * block_redundancy_factor *
         final_art_file__original_size_in_bytes) / desired_block_size_in_bytes)
    print(
        'Total number of blocks to generate for target level of redundancy: ' +
        str(total_number_of_blocks_to_generate))
    with open(compressed_output_file_path, 'rb') as f:
        compressed_data = f.read()
    compressed_data_size_in_bytes = len(compressed_data)
    blocks = [
        int.from_bytes(
            compressed_data[ii:ii + desired_block_size_in_bytes].ljust(
                desired_block_size_in_bytes, b'0'), 'little') for ii in
        range(0, compressed_data_size_in_bytes, desired_block_size_in_bytes)
    ]
    prng = PRNG(params=(len(blocks), delta_constant, c_constant))
    prng.set_seed(seed)
    output_blocks_list = list()
    number_of_blocks_generated = 0
    while number_of_blocks_generated < total_number_of_blocks_to_generate:
        random_seed, d, ix_samples = prng.get_src_blocks()
        block_data = 0
        for ix in ix_samples:
            block_data ^= blocks[ix]
        block_data_bytes = int.to_bytes(block_data,
                                        desired_block_size_in_bytes, 'little')
        block_data_hash = hashlib.sha3_256(block_data_bytes).digest()
        block = (compressed_data_size_in_bytes, desired_block_size_in_bytes,
                 random_seed, block_data_hash, block_data_bytes)
        header_bit_packing_pattern_string = '<3I32s'
        bit_packing_pattern_string = header_bit_packing_pattern_string + str(
            desired_block_size_in_bytes) + 's'
        length_of_header_in_bytes = struct.calcsize(
            header_bit_packing_pattern_string)
        packed_block_data = pack(bit_packing_pattern_string, *block)
        if number_of_blocks_generated == 0:  #Test that the bit-packing is working correctly:
            with io.BufferedReader(io.BytesIO(packed_block_data)) as f:
                header_data = f.read(length_of_header_in_bytes)
                #first_generated_block_raw_data = f.read(desired_block_size_in_bytes)
            compressed_input_data_size_in_bytes_test, desired_block_size_in_bytes_test, random_seed_test, block_data_hash_test = unpack(
                header_bit_packing_pattern_string, header_data)
            if block_data_hash_test != block_data_hash:
                print(
                    'Error! Block data hash does not match the hash reported in the block header!'
                )
        output_blocks_list.append(packed_block_data)
        number_of_blocks_generated = number_of_blocks_generated + 1
        hash_of_block = get_sha256_hash_of_input_data_func(packed_block_data)
        output_block_file_path = 'FileHash__' + compressed_file_hash + '__Block__' + '{0:09}'.format(
            number_of_blocks_generated
        ) + '__BlockHash_' + hash_of_block + '.block'
        try:
            with ramdisk_object.open(output_block_file_path, 'wb') as f:
                f.write(packed_block_data)
        except Exception as e:
            print('Error: ' + str(e))
    duration_in_seconds = round(time() - start_time, 1)
    print('\n\nFinished processing in ' + str(duration_in_seconds) +
          ' seconds! \nOriginal zip file was encoded into ' +
          str(number_of_blocks_generated) + ' blocks of ' +
          str(ceil(desired_block_size_in_bytes / 1000)) +
          ' kilobytes each. Total size of all blocks is ~' + str(
              ceil((number_of_blocks_generated * desired_block_size_in_bytes) /
                   1000000)) + ' megabytes\n')
    print('Now copying encoded files from ram disk to local storage...')
    block_storage_folder_path = folder_containing_art_image_and_metadata_files + os.sep + 'block_files'
    if not os.path.isdir(block_storage_folder_path):
        os.makedirs(block_storage_folder_path)
    filesystem_object = OSFS(block_storage_folder_path)
    copy_fs(ramdisk_object, filesystem_object)
    print('Done!\n')
    ramdisk_object.close()
    return duration_in_seconds
Пример #12
0
 def test_write_subdir_versions(self) -> None:
     fs = MemoryFS()
     write_subdir_versions_to_directory(fs, "/", self.d)
     self.assertEqual(self.txt, fs.readtext(SUBDIR_VERSIONS_FILENAME))
     fs.close()
Пример #13
0
def encode_final_art_zipfile_into_luby_transform_blocks_func(
        sha256_hash_of_art_file):
    global block_storage_folder_path
    global block_redundancy_factor
    global desired_block_size_in_bytes
    global prepared_final_art_zipfiles_folder_path
    start_time = time()
    ramdisk_object = MemoryFS()
    filesystem_object = OSFS(block_storage_folder_path)
    c_constant = 0.1
    delta_constant = 0.5
    seed = randint(0, 1 << 31 - 1)
    path_to_final_artwork_zipfile_including_metadata = glob.glob(
        prepared_final_art_zipfiles_folder_path + '*' +
        sha256_hash_of_art_file + '*')[0]
    final_art_file__original_size_in_bytes = os.path.getsize(
        path_to_final_artwork_zipfile_including_metadata)
    output_blocks_list = [
    ]  #Process ZIP file into a stream of encoded blocks, and save those blocks as separate files in the output folder:
    print('Now encoding file ' +
          os.path.split(path_to_final_artwork_zipfile_including_metadata)[-1] +
          ' (' + str(round(final_art_file__original_size_in_bytes / 1000000)) +
          'mb)\n\n')
    total_number_of_blocks_to_generate = ceil(
        (1.00 * block_redundancy_factor *
         final_art_file__original_size_in_bytes) / desired_block_size_in_bytes)
    print(
        'Total number of blocks to generate for target level of redundancy: ' +
        str(total_number_of_blocks_to_generate))
    pbar = tqdm(total=total_number_of_blocks_to_generate)
    with open(path_to_final_artwork_zipfile_including_metadata, 'rb') as f:
        f_bytes = f.read()
    filesize = len(f_bytes)
    art_zipfile_hash = hashlib.sha256(f_bytes).hexdigest()
    if art_zipfile_hash == sha256_hash_of_art_file:  #Convert file byte contents into blocksize chunks, padding last one if necessary:
        blocks = [
            int.from_bytes(
                f_bytes[ii:ii + desired_block_size_in_bytes].ljust(
                    desired_block_size_in_bytes, b'0'), sys.byteorder)
            for ii in range(0, len(f_bytes), desired_block_size_in_bytes)
        ]
        number_of_blocks = len(blocks)
        print('The length of the blocks list: ' + str(number_of_blocks))
        prng = PRNG(params=(number_of_blocks, delta_constant, c_constant))
        prng.set_seed(seed)
        number_of_blocks_generated = 0  # block generation loop
        while number_of_blocks_generated <= total_number_of_blocks_to_generate:
            update_skip = 1
            if (number_of_blocks_generated % update_skip) == 0:
                pbar.update(update_skip)
            blockseed, d, ix_samples = prng.get_src_blocks()
            block_data = 0
            for ix in ix_samples:
                block_data ^= blocks[ix]
            block = (filesize, desired_block_size_in_bytes, blockseed,
                     int.to_bytes(block_data, desired_block_size_in_bytes,
                                  sys.byteorder)
                     )  # Generate blocks of XORed data in network byte order
            number_of_blocks_generated = number_of_blocks_generated + 1
            packed_block_data = pack('!III%ss' % desired_block_size_in_bytes,
                                     *block)
            output_blocks_list.append(packed_block_data)
            hash_of_block = hashlib.sha256(packed_block_data).hexdigest()
            output_block_file_path = 'FileHash__' + art_zipfile_hash + '__Block__' + '{0:09}'.format(
                number_of_blocks_generated
            ) + '__BlockHash_' + hash_of_block + '.block'
            try:
                with ramdisk_object.open(output_block_file_path, 'wb') as f:
                    f.write(packed_block_data)
            except Exception as e:
                print('Error: ' + str(e))
        duration_in_seconds = round(time() - start_time, 1)
        print('\n\nFinished processing in ' + str(duration_in_seconds) +
              ' seconds! \nOriginal zip file was encoded into ' +
              str(number_of_blocks_generated) + ' blocks of ' +
              str(ceil(desired_block_size_in_bytes / 1000)) +
              ' kilobytes each. Total size of all blocks is ~' + str(
                  ceil((number_of_blocks_generated *
                        desired_block_size_in_bytes) / 1000000)) +
              ' megabytes\n')
        print('Now copying encoded files from ram disk to local storage...')
        copy_fs(ramdisk_object, filesystem_object)
        print('Done!\n')
        ramdisk_object.close()
        return duration_in_seconds
Пример #14
0
  try:
    auth.get_access_token(verifier)
  except tweepy.TweepError:
    print("Error! Failed to get access token.")
    sys.exit(ex)

  # Save access token
  credentials = {'token': auth.access_token, 'token_secret': auth.access_token_secret}
  with open(credentials_file, "w") as file:
    yaml.dump(credentials, file, default_flow_style=False)

# Initialize camera
camera = PiCamera()
memfs=MemoryFS()

# Let's go!
twitter_api = tweepy.API(auth)

camera.resolution = (1024, 768)
camera.start_preview()
# Camera warm-up time
sleep(2)

capture_file=memfs.open('memory_capture.jpg', 'w+b')
camera.capture(capture_file)
sleep(1)
twitter_api.update_with_media('memory_capture.jpg', file=capture_file)
capture_file.close()

memfs.close()