def test_start_volumes_from(): with injector({'dns-server': 'local.dns', 'dns-search-suffix': 'local'}): s = Service() s.name = 'my_service' s.volumes_from = ['foo', 'bar'] flexmock(s) s.client = flexmock() s.client.should_receive('find_container_by_name').with_args( 'my_service').once().and_return(defer.succeed('123abc')) s.client.should_receive('start_container').with_args( '123abc', ticket_id=123123, config={ "VolumesFrom": ['foo', 'bar'], 'DnsSearch': 'None.local', 'Dns': ['local.dns'] }).once().and_return(defer.succeed('boo')) s.should_receive('inspect').with_args().once().and_return( defer.succeed('baz')) r = yield s.start(ticket_id=123123) assert r == 'baz'
def test_create(): redis = flexmock() redis.should_receive('hgetall').and_return([]) fake_inject({txredisapi.Connection: redis}) s = Service() s.name = 'my_service' flexmock(s) s.image_builder = flexmock() s.image_builder.should_receive('build_image').with_args(ticket_id=123123, service=s).ordered().once()\ .and_return(defer.succeed('boo')) s.client = flexmock() s.client.should_receive('create_container').with_args( { "Hostname": 'my_service', "Image": 'boo' }, 'my_service', ticket_id=123123).ordered().once().and_return('magic') s.should_receive('inspect').with_args().ordered().once().and_return( 'magic') r = yield s.create(ticket_id=123123) assert r == 'magic'
def test_generate_config_volumes(): redis = flexmock() redis.should_receive('hgetall').and_return([]) fake_inject({ txredisapi.Connection: redis }) s = Service() s.name = 'my_service' s.volumes = [ {'local': '/base/path/foo1', 'remote': '/bar1'}, {'local': '/base/path/foo2', 'remote': '/bar2'}, {'local': '/base/path/foo3', 'remote': '/bar3'} ] config = yield s._generate_config('foo') assert config == { "Hostname": 'my_service', "Image": 'foo', # Volumes are not passed as of Docker 1.6 version # "Volumes": { # "/bar1": {}, # "/bar2": {}, # "/bar3": {}, # } }
def test_generate_config_volumes(): redis = flexmock() redis.should_receive('hgetall').and_return([]) fake_inject({txredisapi.Connection: redis}) s = Service() s.name = 'my_service' s.volumes = [{ 'local': '/base/path/foo1', 'remote': '/bar1' }, { 'local': '/base/path/foo2', 'remote': '/bar2' }, { 'local': '/base/path/foo3', 'remote': '/bar3' }] config = yield s._generate_config('foo') assert config == { "Hostname": 'my_service', "Image": 'foo', # Volumes are not passed as of Docker 1.6 version # "Volumes": { # "/bar1": {}, # "/bar2": {}, # "/bar3": {}, # } }
def test_start_volumes(): with injector({'dns-server': 'local.dns', 'dns-search-suffix': 'local'}): s = Service() s.name = 'my_service' s.volumes = [ {'local': '/base/path/foo1', 'remote': '/bar1'}, {'local': '/base/path/foo2', 'remote': '/bar2'}, {'local': '/base/path/foo3', 'remote': '/bar3'} ] flexmock(s) s.client = flexmock() s.client.should_receive('find_container_by_name').with_args('my_service').once().and_return(defer.succeed('123abc')) s.client.should_receive('start_container').with_args('123abc', ticket_id=123123, config={ "Binds": ['/base/path/foo1:/bar1', '/base/path/foo2:/bar2', '/base/path/foo3:/bar3'], 'DnsSearch': 'None.local', 'Dns': ['local.dns'] }).once().and_return(defer.succeed('boo')) s.should_receive('inspect').with_args().once().and_return(defer.succeed('baz')) r = yield s.start(ticket_id=123123) assert r == 'baz'
def test_create(): redis = flexmock() redis.should_receive('hgetall').and_return([]) fake_inject({ txredisapi.Connection: redis }) s = Service() s.name = 'my_service' flexmock(s) s.image_builder = flexmock() s.image_builder.should_receive('build_image').with_args(ticket_id=123123, service=s).ordered().once()\ .and_return(defer.succeed('boo')) s.client = flexmock() s.client.should_receive('create_container').with_args({ "Hostname": 'my_service', "Image": 'boo' }, 'my_service', ticket_id=123123).ordered().once().and_return('magic') s.should_receive('inspect').with_args().ordered().once().and_return('magic') r = yield s.create(ticket_id=123123) assert r == 'magic'
def test_start_ports(): with injector({'dns-server': 'local.dns', 'dns-search-suffix': 'local'}): s = Service() s.name = 'my_service' s.ports = ['22/tcp'] flexmock(s) s.client = flexmock() s.client.should_receive('find_container_by_name').with_args( 'my_service').once().and_return(defer.succeed('123abc')) s.client.should_receive('start_container').with_args( '123abc', ticket_id=123123, config={ "PortBindings": { '22/tcp': [{}] }, 'DnsSearch': 'None.local', 'Dns': ['local.dns'] }).once().and_return(defer.succeed('boo')) s.should_receive('inspect').with_args().once().and_return( defer.succeed('baz')) r = yield s.start(ticket_id=123123) assert r == 'baz'
def rebuild_haproxy(self, deployments=None, ticket_id=None): # generate new haproxy config all_deployments = yield self.dump() for deployment_name, config in all_deployments.items(): # rebuild only needed deployments if deployments and not deployment_name in deployments: continue if ticket_id: self.rpc_server.task_progress('Updating haproxy config on deployment %s' % deployment_name, ticket_id) deployment = yield self.dep_controller.get(deployment_name) haproxy_path = os.path.expanduser('%s/haproxy/%s' % (self.settings.home_dir, deployment_name)) if not os.path.exists(haproxy_path): os.makedirs(haproxy_path) template_path = os.path.join(haproxy_path, 'haproxy.tpl') haproxy_config_path = os.path.join(haproxy_path, 'haproxy.cfg') if not os.path.exists(template_path): with open(template_path, 'w+') as f: f.write(HAPROXY_TPL) with open(template_path) as f: template = Template(f.read()) config_rendered = template.render(config) with open(haproxy_config_path, 'w+') as f: f.write(config_rendered) haproxy = Service(client=deployment.get_client()) haproxy.name = 'mcloud_haproxy' haproxy.image_builder = VirtualFolderImageBuilder({ 'Dockerfile': """ FROM haproxy:1.5 ADD haproxy.cfg /usr/local/etc/haproxy/haproxy.cfg """, 'haproxy.cfg': config_rendered }) haproxy.ports = ['80/tcp:80', '443/tcp:443'] # haproxy.volumes = [{ # 'local': haproxy_path, # 'remote': '/etc/haproxy' # }] logger.info('Containers updated: dumping haproxy config.') if ticket_id: self.rpc_server.task_progress('updated %s - OK' % deployment_name, ticket_id) yield haproxy.rebuild()
def test_generate_config(): redis = flexmock() redis.should_receive('hgetall').and_return([]) fake_inject({txredisapi.Connection: redis}) s = Service() s.name = 'my_service' config = yield s._generate_config('foo') assert config == {"Hostname": 'my_service', "Image": 'foo'}
def test_start(): with injector({'dns-server': 'local.dns', 'dns-search-suffix': 'local'}): s = Service() s.name = 'my_service' flexmock(s) s.client = flexmock() s.client.should_receive('find_container_by_name').with_args('my_service').once().and_return(defer.succeed('123abc')) s.client.should_receive('start_container').with_args('123abc', ticket_id=123123, config={'DnsSearch': 'None.local', 'Dns': ['local.dns']}).once().and_return(defer.succeed('boo')) s.should_receive('inspect').with_args().once().and_return(defer.succeed('baz')) r = yield s.start(ticket_id=123123) assert r == 'baz'
def test_generate_config_env(): redis = flexmock() redis.should_receive('hgetall').and_return({}) fake_inject({txredisapi.Connection: redis}) s = Service() s.name = 'my_service' s.env = {'FOO': 'bar', 'BAZ': 'foo'} config = yield s._generate_config('foo') assert config == { "Hostname": 'my_service', "Image": 'foo', "Env": ['FOO=bar', 'BAZ=foo'] }
def test_generate_config(): redis = flexmock() redis.should_receive('hgetall').and_return([]) fake_inject({ txredisapi.Connection: redis }) s = Service() s.name = 'my_service' config = yield s._generate_config('foo') assert config == { "Hostname": 'my_service', "Image": 'foo' }
def task_sync_stop(self, ticket_id, app_name, sync_ticket_id): app = yield self.app_controller.get(app_name) client = yield app.get_client() s = Service(client=client) s.app_name = app_name s.name = '%s_%s_%s' % (app_name, '_rsync_', sync_ticket_id) yield s.inspect() if s.is_running(): self.task_log(ticket_id, 'Stopping rsync container.') yield s.stop(ticket_id) if s.is_created(): self.task_log(ticket_id, 'Destroying rsync container.') yield s.destroy(ticket_id)
def test_generate_config_env(): redis = flexmock() redis.should_receive('hgetall').and_return({}) fake_inject({ txredisapi.Connection: redis }) s = Service() s.name = 'my_service' s.env = {'FOO': 'bar', 'BAZ': 'foo'} config = yield s._generate_config('foo') assert config == { "Hostname": 'my_service', "Image": 'foo', "Env": ['FOO=bar', 'BAZ=foo'] }
def test_start_volumes(): with injector({'dns-server': 'local.dns', 'dns-search-suffix': 'local'}): s = Service() s.name = 'my_service' s.volumes = [{ 'local': '/base/path/foo1', 'remote': '/bar1' }, { 'local': '/base/path/foo2', 'remote': '/bar2' }, { 'local': '/base/path/foo3', 'remote': '/bar3' }] flexmock(s) s.client = flexmock() s.client.should_receive('find_container_by_name').with_args( 'my_service').once().and_return(defer.succeed('123abc')) s.client.should_receive('start_container').with_args( '123abc', ticket_id=123123, config={ "Binds": [ '/base/path/foo1:/bar1', '/base/path/foo2:/bar2', '/base/path/foo3:/bar3' ], 'DnsSearch': 'None.local', 'Dns': ['local.dns'] }).once().and_return(defer.succeed('boo')) s.should_receive('inspect').with_args().once().and_return( defer.succeed('baz')) r = yield s.start(ticket_id=123123) assert r == 'baz'
def rebuild_haproxy(self, deployments=None, ticket_id=None): # generate new haproxy config all_deployments = yield self.dump() for deployment_name, config in all_deployments.items(): # rebuild only needed deployments if deployments and not deployment_name in deployments: continue if ticket_id: self.rpc_server.task_progress( 'Updating haproxy config on deployment %s' % deployment_name, ticket_id) deployment = yield self.dep_controller.get(deployment_name) haproxy_path = os.path.expanduser( '%s/haproxy/%s' % (self.settings.home_dir, deployment_name)) if not os.path.exists(haproxy_path): os.makedirs(haproxy_path) template_path = os.path.join(haproxy_path, 'haproxy.tpl') haproxy_config_path = os.path.join(haproxy_path, 'haproxy.cfg') if not os.path.exists(template_path): with open(template_path, 'w+') as f: f.write(HAPROXY_TPL) with open(template_path) as f: template = Template(f.read()) config_rendered = template.render(config) with open(haproxy_config_path, 'w+') as f: f.write(config_rendered) haproxy = Service(client=deployment.get_client()) haproxy.name = 'mcloud_haproxy' haproxy.image_builder = VirtualFolderImageBuilder({ 'Dockerfile': """ FROM haproxy:1.5 ADD haproxy.cfg /usr/local/etc/haproxy/haproxy.cfg """, 'haproxy.cfg': config_rendered }) haproxy.ports = ['80/tcp:80', '443/tcp:443'] # haproxy.volumes = [{ # 'local': haproxy_path, # 'remote': '/etc/haproxy' # }] logger.info('Containers updated: dumping haproxy config.') if ticket_id: self.rpc_server.task_progress( 'updated %s - OK' % deployment_name, ticket_id) yield haproxy.rebuild()
def task_sync(self, ticket_id, app_name, service_name, volume): app = yield self.app_controller.get(app_name) config = yield app.load() client = yield app.get_client() s = Service(client=client) s.app_name = app_name s.name = '%s_%s_%s' % (app_name, '_rsync_', ticket_id) s.image_builder = PrebuiltImageBuilder(image='modera/rsync') s.ports = [873] if service_name: if not volume: raise VolumeNotFound( 'In case of service name is provided, volume name is mandatory!' ) services = config.get_services() service_full_name = '%s.%s' % (service_name, app_name) try: service = services[service_full_name] all_volumes = service.list_volumes() if not volume in all_volumes: raise VolumeNotFound('Volume with name %s no found!' % volume) volume_name = volume s.volumes_from = service_full_name except KeyError: raise VolumeNotFound('Service with name %s was not found!' % service_name) else: s.volumes = [{'local': app.config['path'], 'remote': '/volume'}] volume_name = '/volume' s.env = { 'USERNAME': ''.join( random.choice(string.ascii_lowercase + string.digits) for _ in range(32)), 'PASSWORD': ''.join( random.choice(string.ascii_lowercase + string.punctuation + string.digits) for _ in range(32)), 'ALLOW': '*' } yield s.start(ticket_id) deployment = yield app.get_deployment() if deployment.local: sync_host = 'me' else: sync_host = deployment.host print s.public_ports() defer.returnValue({ 'env': s.env, 'container': s.name, 'host': sync_host, 'port': s.public_ports()['873/tcp'][0]['HostPort'], 'volume': volume_name, 'ticket_id': ticket_id })
def task_sync(self, ticket_id, app_name, service_name, volume): app = yield self.app_controller.get(app_name) config = yield app.load() client = yield app.get_client() s = Service(client=client) s.app_name = app_name s.name = '%s_%s_%s' % (app_name, '_rsync_', ticket_id) s.image_builder = PrebuiltImageBuilder(image='modera/rsync') s.ports = [873] if service_name: if not volume: raise VolumeNotFound('In case of service name is provided, volume name is mandatory!') services = config.get_services() service_full_name = '%s.%s' % (service_name, app_name) try: service = services[service_full_name] all_volumes = service.list_volumes() if not volume in all_volumes: raise VolumeNotFound('Volume with name %s no found!' % volume) volume_name = volume s.volumes_from = service_full_name except KeyError: raise VolumeNotFound('Service with name %s was not found!' % service_name) else: s.volumes = [{ 'local': app.config['path'], 'remote': '/volume' }] volume_name = '/volume' s.env = { 'USERNAME': ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(32)), 'PASSWORD': ''.join( random.choice(string.ascii_lowercase + string.punctuation + string.digits) for _ in range(32)), 'ALLOW': '*' } yield s.start(ticket_id) deployment = yield app.get_deployment() if deployment.local: sync_host = 'me' else: sync_host = deployment.host print s.public_ports() defer.returnValue({ 'env': s.env, 'container': s.name, 'host': sync_host, 'port': s.public_ports()['873/tcp'][0]['HostPort'], 'volume': volume_name, 'ticket_id': ticket_id })