#!/usr/bin/python3

from fs.walk import Walker
from fs.onedatafs import OnedataFS
from time import time
import os
import subprocess
from vars import *

odfs = OnedataFS(
    provider,
    token,
    force_direct_io=True,
    cli_args="--communicator-pool-size=20 --communicator-thread-count=8")
space = odfs.opendir(space_name)
st = ct = pt = time()
file_count = 0
if 'block_size' not in globals():
    block_size = 1048576
if 'block_count' not in globals():
    block_count = 100
space.makedir('dir-flat')
print('Uploading',
      files_limit,
      'files of size',
      block_size * block_count / 1048576,
      'MB...',
      flush=True)
for i in range(files_limit):
    f = subprocess.run(
        ['./gbs', str(block_size),
Пример #2
0
            l.info("Saving sequence={} of worker={} to file={}, ".format(
                threadsSequenceNumbers[i], i, fileHandles[i]))
    # TODO: fix it so that a task is scheduled properly!
    fileHandlesTimer = threading.Timer(2.0, flushFileHandles)
    fileHandlesTimer.start()


fileHandlesTimer = threading.Timer(2.0, flushFileHandles)
fileHandlesTimer.start()

# Initialize OnedataFS
odfs = OnedataFS(sourceProvider, apiToken, insecure=True, force_direct_io=True)
# Print list of user spaces
l.debug(odfs.listdir('/'))
# Open the space
space = odfs.opendir('/{}'.format(sourceSpaceName))

# Start filling up the queue with files
myChangesListener = ChangesListener(
    name='producer', startingSequenceNumber=initialStartingSequence)
myChangesListener.start()

# Process items in the queue
traverse(odfs)

# Close OnedataFS
l.info("Processing ended. Closing onedatafs.")
odfs.close()

# Close workers filehandles
for i in range(0, numberOfWorkers):