def handle(req: bytes): """handle a request to the function Args: req (str): request body """ args = json.loads(req) odfs = OnedataFS(args["host"], args["accessToken"], force_direct_io=True, insecure=True) algorithm = args.get("algorithm", "md5") checksum = init(algorithm) with odfs.open(args["filePath"], 'rb') as f: while True: data = f.read(BLOCK_SIZE) if not data: break checksum = update(algorithm, checksum, data) return json.dumps({"xattrs": {"checksum": finish(algorithm, checksum)}})
#!/usr/bin/python3 from fs.walk import Walker from fs.onedatafs import OnedataFS from time import time import os import subprocess from vars import * odfs = OnedataFS( provider, token, force_direct_io=True, cli_args="--communicator-pool-size=20 --communicator-thread-count=8") space = odfs.opendir(space_name) st = ct = pt = time() file_count = 0 if 'block_size' not in globals(): block_size = 1048576 if 'block_count' not in globals(): block_count = 100 space.makedir('dir-flat') print('Uploading', files_limit, 'files of size', block_size * block_count / 1048576, 'MB...', flush=True) for i in range(files_limit): f = subprocess.run( ['./gbs', str(block_size),
def _open_fs(self, user_context): props = self._serialization_props(user_context) handle = OnedataFS(**props) return handle
fileHandles[i].truncate(0) # temporary save of a sequence so the value does not change between write and when log message if printed fileHandles[i].write(str(aSequenceNumber)) fileHandles[i].flush() l.info("Saving sequence={} of worker={} to file={}, ".format( threadsSequenceNumbers[i], i, fileHandles[i])) # TODO: fix it so that a task is scheduled properly! fileHandlesTimer = threading.Timer(2.0, flushFileHandles) fileHandlesTimer.start() fileHandlesTimer = threading.Timer(2.0, flushFileHandles) fileHandlesTimer.start() # Initialize OnedataFS odfs = OnedataFS(sourceProvider, apiToken, insecure=True, force_direct_io=True) # Print list of user spaces l.debug(odfs.listdir('/')) # Open the space space = odfs.opendir('/{}'.format(sourceSpaceName)) # Start filling up the queue with files myChangesListener = ChangesListener( name='producer', startingSequenceNumber=initialStartingSequence) myChangesListener.start() # Process items in the queue traverse(odfs) # Close OnedataFS l.info("Processing ended. Closing onedatafs.")