Пример #1
0
def handle(req: bytes):
    """handle a request to the function
    Args:
        req (str): request body
    """
    args = json.loads(req)

    odfs = OnedataFS(args["host"],
                     args["accessToken"],
                     force_direct_io=True,
                     insecure=True)

    algorithm = args.get("algorithm", "md5")
    checksum = init(algorithm)

    with odfs.open(args["filePath"], 'rb') as f:
        while True:
            data = f.read(BLOCK_SIZE)
            if not data:
                break
            checksum = update(algorithm, checksum, data)

    return json.dumps({"xattrs": {"checksum": finish(algorithm, checksum)}})
#!/usr/bin/python3

from fs.walk import Walker
from fs.onedatafs import OnedataFS
from time import time
import os
import subprocess
from vars import *

odfs = OnedataFS(
    provider,
    token,
    force_direct_io=True,
    cli_args="--communicator-pool-size=20 --communicator-thread-count=8")
space = odfs.opendir(space_name)
st = ct = pt = time()
file_count = 0
if 'block_size' not in globals():
    block_size = 1048576
if 'block_count' not in globals():
    block_count = 100
space.makedir('dir-flat')
print('Uploading',
      files_limit,
      'files of size',
      block_size * block_count / 1048576,
      'MB...',
      flush=True)
for i in range(files_limit):
    f = subprocess.run(
        ['./gbs', str(block_size),
Пример #3
0
 def _open_fs(self, user_context):
     props = self._serialization_props(user_context)
     handle = OnedataFS(**props)
     return handle
Пример #4
0
            fileHandles[i].truncate(0)
            # temporary save of a sequence so the value does not change between write and when log message if printed
            fileHandles[i].write(str(aSequenceNumber))
            fileHandles[i].flush()
            l.info("Saving sequence={} of worker={} to file={}, ".format(
                threadsSequenceNumbers[i], i, fileHandles[i]))
    # TODO: fix it so that a task is scheduled properly!
    fileHandlesTimer = threading.Timer(2.0, flushFileHandles)
    fileHandlesTimer.start()


fileHandlesTimer = threading.Timer(2.0, flushFileHandles)
fileHandlesTimer.start()

# Initialize OnedataFS
odfs = OnedataFS(sourceProvider, apiToken, insecure=True, force_direct_io=True)
# Print list of user spaces
l.debug(odfs.listdir('/'))
# Open the space
space = odfs.opendir('/{}'.format(sourceSpaceName))

# Start filling up the queue with files
myChangesListener = ChangesListener(
    name='producer', startingSequenceNumber=initialStartingSequence)
myChangesListener.start()

# Process items in the queue
traverse(odfs)

# Close OnedataFS
l.info("Processing ended. Closing onedatafs.")
Пример #5
0
        while True:
            data = f.read(BLOCKSIZE)
            if not data:
                break
            #print(type(data))
            asum = zlib.adler32(data, asum)
            if asum < 0:
                asum += 2**32
    return format(asum,'x')

args=read_arguments()
algorithm=args.algorithm
file_to_upload=args.local_file
upload_path=args.onedata_path

odfs = OnedataFS(provider, token, force_proxy_io=True, cli_args="--communicator-pool-size=20 --communicator-thread-count=8")
if algorithm == 'md5':
    hash_value = md5(file_to_upload)
    hash_key = 'md5_orig'
elif algorithm == 'adler32':
    hash_value = adler32sum(file_to_upload)
    hash_key = 'adler32_orig'
elif algorithm == 'sha512':
    hash_value = sha512(file_to_upload)
    hash_key = 'sha512_orig'
else:
    print(algorithm+" not supported")
    quit()
print(hash_key, hash_value)
with open(file_to_upload,"rb") as read_file:
    odfs.writefile(upload_path, read_file)