def test_insuff_space_warm_up(): with cloudmanager.CloudManager(TEST_DB_PATH, "storage", 31) as cm: upload1 = cm.upload(file1) assert upload1 cm.storage = cloudmanager.storage.Storage('', 0) cm.warm_up(upload1)
def __init__(self): self.cloud = cloudmanager.CloudManager(settings.DATABASE_PATH, settings.STORAGE_PATH, settings.STORAGE_SIZE) self.coin = metachains.Florincoin(settings.METACHAINS_URL, settings.METACHAINS_USERNAME, settings.METACHAINS_PASSWORD) self.accounts = accounts.create(settings.ACCOUNTS_API_ENABLED, settings.ACCOUNTS_API_BASE_URL, settings.ACCOUNTS_API_KEY)
def test_blockchain_queue_info(): with cloudmanager.CloudManager(TEST_DB_PATH, "storage", 31) as cm: TEST_RECORD_COUNT = 3 TEST_RECORD_SIZE_EACH = 1024 with cm.file_database.db.cursor() as cursor: for i in range(TEST_RECORD_COUNT): cursor.execute( ''' INSERT INTO files (name, size, hash, payload, exported_timestamp) VALUES ('name {}', {}, NULL, '{{}}', NULL); '''.format(i, TEST_RECORD_SIZE_EACH)) cm.file_database.db.commit() result = cm.export_candidates(0) assert result == [] expected_info = { "size": TEST_RECORD_COUNT * TEST_RECORD_SIZE_EACH, "count": TEST_RECORD_COUNT } assert cm.blockchain_queue_info() == expected_info
def test_cloudmanager_no_disk_space(): with cloudmanager.CloudManager(TEST_DB_PATH, "storage", 2200) as cm: needed = os.path.getsize(big_file) old_used = cm.storage.used def used_stub(): return 1 cm.storage.used = used_stub old_fits = cm.storage.fits def fits_stub(file_bytes): if file_bytes <= needed: return True return False cm.storage.fits = fits_stub upload3 = cm.upload(big_file) assert upload3 == False cm.storage.fits = old_fits cm.storage.used = old_used cm.close()
return userdata if __name__ == "__main__": try: parser = argparse.ArgumentParser( description='Create Openstack image containing Docker.') cloudmanager.add_parser_args(parser) args = parser.parse_args() loggerName = "Provisioner" cloudmanager.config_logger(loggerName, args.verbose, args.verboseAll) cloudManager = cloudmanager.CloudManager( config_file_name=args.configFile) userdata_snapshot = get_cloudconfig() previous_snapshot = cloudManager.nova_snapshot_find() if args.cleanup: if previous_snapshot is not None: logging.debug("Removing previous snapshot: %s", cloudManager.snapshot_name) cloudManager.nova_snapshot_delete(previous_snapshot) elif previous_snapshot is not None: logging.critical("Destination snapshot: %s already exist", cloudManager.snapshot_name) sys.exit(1)
if __name__ == "__main__": try: # Define command-line arguments parser = argparse.ArgumentParser( description='Boot instances from image containing Docker.') parser.add_argument('-n', '--nb-servers', dest='nbServers', required=False, default=3, type=int, help='Choose the number of servers to boot') cloudmanager.add_parser_args(parser) args = parser.parse_args() loggerName = "Provisioner" cloudmanager.config_logger(loggerName, args.verbose, args.verboseAll) cloudManager = cloudmanager.CloudManager( config_file_name=args.configFile, used_image_key=cloudmanager.SNAPSHOT_IMAGE_KEY, add_ssh_key=True) main() except Exception as exc: logging.critical('Exception occurred: %s', exc, exc_info=True) sys.exit(1)
if cloudManager.volume_names: if len(cloudManager.volume_names) != len(qserv_instances): logging.error("Data volumes: %s", cloudManager.volume_names) raise ValueError("Invalid number of cinder data volumes") for (instance, vol_name) in zip(qserv_instances, cloudManager.volume_names): cloudManager.nova_create_server_volume(instance.id, vol_name) cloudManager.mount_volume(qserv_instances) logging.debug("SUCCESS: Qserv Openstack cluster is up") if __name__ == "__main__": try: # Define command-line arguments parser = argparse.ArgumentParser( description='Boot instances from image containing Docker.') cloudmanager.add_parser_args(parser) args = parser.parse_args() cloudmanager.config_logger(args.verbose, args.verboseAll) cloudManager = cloudmanager.CloudManager( config_file_name=args.configFile, add_ssh_key=True) main() except Exception as exc: logging.critical('Exception occurred: %s', exc, exc_info=True) sys.exit(1)
def test_cloudmanager(): remove_storage_files() with cloudmanager.CloudManager(TEST_DB_PATH, "storage", 31) as cm: assert cm.data_dump(128000) is None load_success = cm.data_load(None, '0000') assert load_success is None warm_up = cm.warm_up('0000') assert warm_up is None assert cm.info('0000') is None result = cm.download("invalidhash") assert not result #assert cm.usage_ratio() upload1 = cm.upload(file1) assert upload1 assert cm.blockchain_queue_info() #call upload again for coverage upload1 = cm.upload(file1) assert upload1 assert cm.warm_up(upload1) assert cm.exists(upload1) assert cm.on_cache(upload1) assert cm.upload(big_file) is False cm.cloud_sync() data = cm.data_dump(128000) lkb = cm.last_known_block() assert lkb == 0 genesis_block = cm.visit_block(lkb) assert not genesis_block uploadinfo = cm.info(upload1) assert uploadinfo['filesize'] == os.stat(file1).st_size load_success = cm.data_load(data, upload1) assert load_success upload2 = cm.upload(file2) assert upload2 assert cm.warm_up(upload2) assert cm.usage_ratio() assert cm.total_incoming() == 0 assert cm.total_outgoing() == 0 assert cm.current_incoming() == 0 assert cm.current_outgoing() == 0 assert cm.upload_queue_info() assert cm.blockchain_queue_info() assert cm.used_space() assert cm.capacity() assert cm.sync_status() record = cm.file_database.fetch(upload1) assert 'uploads' in cm.dict_description(record) #test download of file if not in storage remove_storage_files() assert cm.warm_up(upload1) #Test if we run out of disk space old = cm.make_room_for def make_room_for_stub(needed): return False cm.make_room_for = make_room_for_stub assert cm.upload(file3) is False remove_storage_files() cm.make_room_for = old #Test if file is missing for warm_up upload3 = cm.upload(file3) remove_storage_files() assert cm.warm_up(upload3) is False #more test coverage from cloudmanager.payload import from_dict d = {'version': '0.1', 'filehash': 'filename 01234567890123456789', 'filesize': '123', 'datetime': '2000-01-01', 'uploads': 1 } assert from_dict(d) cm.close()
def test_empty_records(): with cloudmanager.CloudManager(TEST_DB_PATH, "storage", 31) as cm: result = cm.file_database.import_files([], None) assert result
import cloudmanager # This sample uploads a few sample files. # # The limit is set to 20 MiB to force some # files to be purged from cache. readme = "5ad70c0c7cc50a73600df290e545cd9d2c83a815ce63c8bce400b867f1b4f5b5" nostro = "dc28c33939823340bdb7a5826d09eca991d6274a3cd4411e280c2a65bcc684cc" with cloudmanager.CloudManager("db/files.db", "storage", 20 * (2**20)) as cm: print cm.exists(readme) print cm.on_cache(readme) print cm.usage_ratio() print "uploading", cm.upload("README.md") print "uploading", cm.upload("nostromo.mp3") print "warming", cm.warm_up(readme) print "warming", cm.warm_up(nostro) data = cm.data_dump(128000) print data print cm.data_load(data, "blockchain-hash")
''' return userdata if __name__ == "__main__": try: parser = argparse.ArgumentParser( description='Create Openstack image containing Docker.') cloudmanager.add_parser_args(parser) args = parser.parse_args() cloudmanager.config_logger(args.verbose, args.verboseAll) cloudManager = cloudmanager.CloudManager( config_file_name=args.configFile, create_snapshot=True) userdata_snapshot = get_cloudconfig() previous_snapshot = cloudManager.nova_snapshot_find() if args.cleanup: if previous_snapshot is not None: logging.debug("Removing previous snapshot: %s", cloudManager.snapshot_name) cloudManager.nova_snapshot_delete(previous_snapshot) elif previous_snapshot is not None: logging.critical("Destination snapshot: %s already exist", cloudManager.snapshot_name) sys.exit(1)
def make_cloudmanager(): return cloudmanager.CloudManager(settings.DATABASE_PATH, settings.STORAGE_PATH, settings.STORAGE_SIZE)