def server(request): class Server(object): def __init__(self, auth_url, container_name, tenant_name, username, password, container, ip): self.auth_url = auth_url self.container_name = container_name self.tenant_name = tenant_name self.username = username self.password = password self.container = container self.ip = ip def list(self, file_id): cmd = 'list {0} -p {1}'.format(self.container_name, file_id) cmd = swift.SWIFT_COMMAND.format(self.ip, cmd).split() res = docker.exec_(self.container, cmd, output=True, stdout=sys.stderr) return res.split() container_name = 'onedata' result = swift.up('onedata/dockswift', [container_name], 'storage', common.generate_uid()) [container] = result['docker_ids'] auth_url = 'http://{0}:{1}/v2.0/tokens'.format(result['host_name'], result['keystone_port']) def fin(): docker.remove([container], force=True, volumes=True) request.addfinalizer(fin) print("AuthUrl: {0}".format(auth_url)) return Server(auth_url, container_name, result['tenant_name'], result['user_name'], result['password'], container, result['host_name'])
def server(request): class Server(object): def __init__(self, mountpoint, uid, gid, hostname, port, volume, transport, xlatorOptions): self.mountpoint = mountpoint self.uid = uid self.gid = gid self.hostname = hostname self.port = port self.volume = volume self.transport = transport self.xlatorOptions = xlatorOptions uid = 0 gid = 0 volume = 'data' result = glusterfs.up('gluster/gluster-centos', [volume], 'storage', common.generate_uid(), 'tcp', random_str() + "/" + random_str()) [container] = result['docker_ids'] hostname = result['host_name'].encode('ascii') port = result['port'] transport = result['transport'].encode('ascii') mountpoint = result['mountpoint'] def fin(): docker.remove([container], force=True, volumes=True) request.addfinalizer(fin) return Server(mountpoint, uid, gid, hostname, port, volume, transport, "")
def setup_class(cls): cls.result = appmock.up(image='onedata/builder', bindir=appmock_dir, dns_server='none', uid=common.generate_uid(), config_path=os.path.join( test_utils.test_file('env.json')))
def server(request): class Server(object): def __init__(self, scheme, hostname, bucket, access_key, secret_key): [ip, port] = hostname.split(':') self.scheme = scheme self.hostname = hostname self.access_key = access_key self.secret_key = secret_key self.bucket = bucket self.conn = S3Connection(self.access_key, self.secret_key, host=ip, port=int(port), is_secure=False, calling_format=OrdinaryCallingFormat()) def list(self, file_id): bucket = self.conn.get_bucket(self.bucket, validate=False) return list(bucket.list(prefix=file_id + '/', delimiter='/')) bucket = 'data' result = s3.up('onedata/s3proxy', [bucket], 'storage', common.generate_uid()) [container] = result['docker_ids'] def fin(): docker.remove([container], force=True, volumes=True) request.addfinalizer(fin) return Server('http', result['host_name'], bucket, result['access_key'], result['secret_key'])
def server(request): class Server(object): def __init__(self, mountpoint, uid, gid, hostname, port, volume, transport, xlatorOptions): self.mountpoint = mountpoint self.uid = uid self.gid = gid self.hostname = hostname self.port = port self.volume = volume self.transport = transport self.xlatorOptions = xlatorOptions uid = 0 gid = 0 volume = 'data' result = glusterfs.up('gluster/gluster-centos', [volume], 'storage', common.generate_uid(), 'tcp', random_str()+"/"+random_str()) [container] = result['docker_ids'] hostname = result['host_name'].encode('ascii') port = result['port'] transport = result['transport'].encode('ascii') mountpoint = result['mountpoint'] def fin(): docker.remove([container], force=True, volumes=True) request.addfinalizer(fin) return Server(mountpoint, uid, gid, hostname, port, volume, transport, "")
def setup_class(cls): logdir = make_logdir(ENV_UP_LOGDIR, get_file_name(__file__)) cls.result = appmock.up(image='onedata/builder', bindir=APPMOCK_DIR, dns_server='none', uid=common.generate_uid(), config_path=os.path.join(config_file('env.json')), logdir=logdir)
def _appmock_client(request): test_dir = os.path.dirname(os.path.realpath(request.module.__file__)) result = appmock.up(image='onedata/builder', bindir=appmock_dir, dns_server='none', uid=common.generate_uid(), config_path=os.path.join(test_dir, 'env.json')) [container] = result['docker_ids'] appmock_ip = docker.inspect(container)['NetworkSettings']['IPAddress']. \ encode('ascii') def fin(): docker.remove([container], force=True, volumes=True) request.addfinalizer(fin) return AppmockClient(appmock_ip)
def _appmock_client(request): test_dir = os.path.dirname(os.path.realpath(request.module.__file__)) result = appmock.up(image='onedata/builder', bindir=appmock_dir, dns_server='none', uid=common.generate_uid(), config_path=os.path.join(test_dir, 'env.json')) [container] = result['docker_ids'] appmock_ip = docker.inspect(container)['NetworkSettings']['IPAddress']. \ encode('ascii') def fin(): docker.remove([container], force=True, volumes=True) request.addfinalizer(fin) return AppmockClient(appmock_ip)
def server(request): class Server(object): def __init__(self, mon_host, username, key, pool_name): self.mon_host = mon_host self.username = username self.key = key self.pool_name = pool_name pool_name = 'data' result = ceph.up('onedata/ceph', [(pool_name, '8')], 'storage', common.generate_uid()) [container] = result['docker_ids'] username = result['username'].encode('ascii') key = result['key'].encode('ascii') mon_host = result['host_name'].encode('ascii') def fin(): docker.remove([container], force=True, volumes=True) request.addfinalizer(fin) return Server(mon_host, username, key, pool_name)
description='Bring up Ceph storage cluster.') parser.add_argument('-i', '--image', action='store', default='docker.onedata.org/ceph-base', help='docker image to use for the container', dest='image') parser.add_argument( '-p', '--pool', action='append', default=[], help='pool name and number of placement groups in format name:pg_num', dest='pools') parser.add_argument('-u', '--uid', action='store', default=common.generate_uid(), help='uid that will be concatenated to docker names', dest='uid') args = parser.parse_args() pools = map(lambda pool: tuple(pool.split(':')), args.pools) config = ceph.up(args.image, pools, 'storage', args.uid) print(json.dumps(config))
default='onedata/riak', help='docker image to use for the container', dest='image') parser.add_argument( '-d', '--dns', action='store', default='auto', help='IP address of DNS or "none" - if no dns should be started or \ "auto" - if it should be started automatically', dest='dns') parser.add_argument( '-u', '--uid', action='store', default=common.generate_uid(), help='uid that will be concatenated to docker names', dest='uid') parser.add_argument( '--maps', action='store', default=None, help='custom argument for `riak-admin create maps`', dest='maps') parser.add_argument( '-n', '--nodes', type=int, action='store', default=2,
This software is released under the MIT license cited in 'LICENSE.txt' A script that brings up a nfs server. Run the script with -h flag to learn about script's running options. """ from __future__ import print_function import argparse import json from environment import nfs, common, dockers_config parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Bring up nfs server.') parser.add_argument( '-i', '--image', action='store', default=None, help='override of docker image for the container', dest='image') args = parser.parse_args() dockers_config.ensure_image(args, 'image', 'worker') config = nfs.up(args.image, common.generate_uid(), 'storage') print(json.dumps(config))
A script that brings up a nfs server. Run the script with -h flag to learn about script's running options. """ from __future__ import print_function import argparse import json from environment import nfs from environment import common parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description="Bring up nfs server." ) parser.add_argument( "-i", "--image", action="store", default="onedata/worker", help="docker image to use for the container", dest="image", ) args = parser.parse_args() config = nfs.up(args.image, common.generate_uid(), "storage") print(json.dumps(config))
dest='bin_op_worker') parser.add_argument( '-bcm', '--bin-cm', action='store', default=os.getcwd() + '/cluster_manager', help='the path to cluster_manager repository (precompiled)', dest='bin_cluster_manager') # Prepare config args = parser.parse_args() config = common.parse_json_file(args.config_path) output = { 'cluster_manager_nodes': [], 'cluster_worker_nodes': [], } uid = common.generate_uid() # Start DNS [dns_server], dns_output = dns.maybe_start('auto', uid) common.merge(output, dns_output) # Start cms cm_output = cluster_manager.up(args.image, args.bin_cluster_manager, dns_server, uid, args.config_path, args.logdir) common.merge(output, cm_output) # Start workers worker_output = cluster_worker.up(args.image, args.bin_op_worker, dns_server, uid, args.config_path, args.logdir) common.merge(output, worker_output) # Make sure domain are added to the dns server dns.maybe_restart_with_configuration('auto', uid, output)
dest="bin_op_worker", ) parser.add_argument( "-bcm", "--bin-cm", action="store", default=os.getcwd() + "/cluster_manager", help="the path to cluster_manager repository (precompiled)", dest="bin_cluster_manager", ) # Prepare config args = parser.parse_args() config = common.parse_json_file(args.config_path) output = {"cluster_manager_nodes": [], "cluster_worker_nodes": []} uid = common.generate_uid() # Start DNS [dns_server], dns_output = dns.maybe_start("auto", uid) common.merge(output, dns_output) # Start cms cm_output = cluster_manager.up(args.image, args.bin_cluster_manager, dns_server, uid, args.config_path, args.logdir) common.merge(output, cm_output) # Start workers worker_output = cluster_worker.up(args.image, args.bin_op_worker, dns_server, uid, args.config_path, args.logdir) common.merge(output, worker_output) # Make sure domain are added to the dns server dns.maybe_restart_with_configuration("auto", uid, output)