def main(): if Config.setup_logger(): _setup_logger() else: logging.basicConfig(level=logging.INFO) args = _args() Config.set_access_key(args.access_key) Config.set_secret_key(args.secret_key) Config.set_api_url(args.url) process_manager.init() plugins.load() log.info('API URL %s', Config.api_url()) client = EventClient(Config.api_url(), auth=Config.api_auth(), workers=args.workers, agent_id=args.agent_id) events = _gather_events() log.info("Subscribing to %s", events) for startup in get_type_list(LIFECYCLE): startup.on_startup() client.run(events) sys.exit(0)
def _get_volume(vol_name, vol_size, instance_name, user): path = _get_volume_dir(vol_name, user) if os.path.exists(path): volume_uuid = _get_volume_uuid(path) create = False if volume_uuid == "": log.warning("Found volume directory but cannot find related \ volume! Create one") create = True assert _get_volume_instance_name(path) == instance_name if not create: mount_dir = os.path.join(path, volume_uuid) if not service.mounted(mount_dir, Config.volmgr_mount_namespace_fd()): v.mount_volume(volume_uuid, mount_dir, False, Config.volmgr_mount_namespace_fd()) return mount_dir volume_uuid = v.create_volume(vol_size) v.add_volume_to_blockstore(volume_uuid, blockstore_uuid) mount_dir = os.path.join(path, volume_uuid) os.makedirs(mount_dir) f = open(os.path.join(path, INSTANCE_TAG_FILE), "w") try: f.write(instance_name) finally: f.close() v.mount_volume(volume_uuid, mount_dir, True, Config.volmgr_mount_namespace_fd()) return mount_dir
def _add_resources(self, ping, pong): if not utils.ping_include_resources(ping): return physical_host = Config.physical_host() compute = { 'type': 'host', 'kind': LIBVIRT_KIND, 'name': Config.hostname() + '/libvirt', 'uuid': LibvirtConfig.libvirt_uuid(), 'physicalHostUuid': physical_host['uuid'], 'data': { 'libvirt': { 'type': self.get_default_type() } } } resources = [physical_host, compute] for driver in pool_drivers(): for pool in driver.discover(compute): data = utils.get_map_value(pool, 'data', 'libvirt') data['driver'] = driver.driver_name() resources.append(pool) utils.ping_add_resources(pong, *resources)
def _run(self, events): ppid = os.environ.get("AGENT_PARENT_PID") headers = {} args = { "data": _data(events, self._agent_id), "stream": True, "headers": headers, "timeout": Config.event_read_timeout() } if self._auth is not None: if isinstance(self._auth, basestring): headers["Authorization", self._auth] else: args["auth"] = self._auth try: drop_count = 0 ping_drop = 0 r = requests.post(self._url, **args) if r.status_code != 201: raise Exception(r.text) self._start_children() for line in r.iter_lines(chunk_size=1): try: ping = '"ping' in line if len(line) > 0: # TODO Need a better approach here if ping: self._ping_queue.put(line, block=False) ping_drop = 0 else: self._queue.put(line, block=False) except Full: log.info("Dropping request %s" % line) drop_count += 1 max = Config.max_dropped_requests() if ping: ping_drop += 1 max = Config.max_dropped_ping() if drop_count > max: log.error('Max dropped requests [%s] exceeded', max) break if not _should_run(ppid): log.info("Parent process has died or stamp changed," " exiting") break finally: for child in self._children: if hasattr(child, "terminate"): try: child.terminate() except: pass sys.exit(0)
def setup_cattle_config_url(instance, create_config): if instance.get('agentId') is None: return if 'labels' not in create_config: create_config['labels'] = {} create_config['labels']['io.rancher.container.agent_id'] = \ str(instance.get('agentId')) url = Config.config_url() if url is not None: parsed = urlparse(url) if 'localhost' == parsed.hostname: port = Config.api_proxy_listen_port() add_to_env(create_config, CATTLE_AGENT_INSTANCE='true', CATTLE_CONFIG_URL_SCHEME=parsed.scheme, CATTLE_CONFIG_URL_PATH=parsed.path, CATTLE_CONFIG_URL_PORT=port) else: add_to_env(create_config, CATTLE_CONFIG_URL=url) add_to_env(create_config, CATTLE_URL=url)
def test_default_value(): # evn var is unset, return default var_name = uuid.uuid4().hex cattlefied_var_name = 'CATTLE_{}'.format(var_name) default = 'defaulted' actual = default_value(var_name, default) assert default == actual # default is explicitly blank, return blank actual = default_value(var_name, '') assert '' == actual # env var is set to blank, return default os.environ[cattlefied_var_name] = '' actual = default_value(var_name, default) assert default == actual # env var is set, return env var value os.environ[cattlefied_var_name] = 'foobar' actual = default_value(var_name, default) assert 'foobar' == actual # for completeness, set_secret_key which hits the CONFIG_OVERRIDE # code path Config.set_secret_key('override') actual = default_value('SECRET_KEY', default) assert 'override' == actual
def post(req, resp): del resp['data']['instance']['+data']['dockerInspect'] docker_container = resp['data']['instance']['+data']['dockerContainer'] fields = resp['data']['instance']['+data']['+fields'] id = docker_container['Id'] del docker_container['Created'] del docker_container['Id'] del docker_container['Status'] docker_container = _sort_ports(docker_container) del docker_container['Ports'][0]['PublicPort'] del docker_container['Ports'][1]['PublicPort'] del fields['dockerIp'] assert fields['dockerPorts']['8080/tcp'] is not None assert fields['dockerPorts']['12201/udp'] is not None fields['dockerPorts']['8080/tcp'] = '1234' fields['dockerPorts']['12201/udp'] = '5678' inspect = Client().inspect_container(id) port = Config.api_proxy_listen_port() assert 'CATTLE_CONFIG_URL={0}'.format(Config.config_url()) in \ inspect['Config']['Env'] assert 'CATTLE_CONFIG_URL_SCHEME=https' not in inspect['Config']['Env'] assert 'CATTLE_CONFIG_URL_PATH=/a/path' not in inspect['Config']['Env'] assert 'CATTLE_CONFIG_URL_PORT={0}'.format(port) not in \ inspect['Config']['Env'] assert 'ENV1=value1' in inspect['Config']['Env']
def on_message(ws, message): line = message.strip() try: ping = '"ping' in line if len(line) > 0: # TODO Need a better approach here if ping: self._ping_queue.put(line, block=False) drops['ping_drop'] = 0 else: self._queue.put(line, block=False) except Full: log.info("Dropping request %s" % line) drops['drop_count'] += 1 drop_max = Config.max_dropped_requests() drop_type = 'overall' drop_test = drops['drop_count'] if ping: drops['ping_drop'] += 1 drop_type = 'ping' drop_test = drops['ping_drop'] drop_max = Config.max_dropped_ping() if drop_test > drop_max: log.error('Max of [%s] dropped [%s] requests exceeded', drop_max, drop_type) ws.close() if not _should_run(ppid): log.info("Parent process has died or stamp changed," " exiting") ws.close()
def ns_exec(pid, event): script = os.path.join(Config.home(), 'events', event.name.split(';')[0]) cmd = ['nsenter', '-F', '-m', '-u', '-i', '-n', '-p', '-t', str(pid), '--', script] marshaller = get_type(MARSHALLER) input = marshaller.to_string(event) data = None env = {} #in come customized docker like alidocker use with open('/host/proc/{}/environ'.format(pid)) as f: with open('/proc/{}/environ'.format(pid)) as f: for line in f.read().split('\0'): if not len(line): continue kv = line.split('=', 1) if kv[0].startswith('CATTLE'): env[kv[0]] = kv[1] env['PATH'] = os.environ['PATH'] env['CATTLE_CONFIG_URL'] = Config.config_url() for i in range(3): p = popen(cmd, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, error = p.communicate(input=input) retcode = p.poll() if retcode == 0: break exists_cmd = cmd[:-1] + ['/usr/bin/test', '-e', script] if popen(exists_cmd, env=env).wait() == 0: break # Sleep and try again if missing time.sleep(1) if retcode: return retcode, output, None text = [] for line in output.splitlines(): if line.startswith('{'): data = marshaller.from_string(line) break text.append(line) return retcode, ''.join(text), data
def volume_exists(path): if not enabled(): return False if not path.startswith(Config.volmgr_mount_dir()): return False if not os.path.exists(path): return False return service.mounted(path, Config.volmgr_mount_namespace_fd())
def ns_exec(pid, event): script = os.path.join(Config.home(), 'events', event.name.split(';')[0]) cmd = ['nsenter', '-F', '-m', '-u', '-i', '-n', '-p', '-t', str(pid), '--', script] marshaller = get_type(MARSHALLER) input = marshaller.to_string(event) data = None env = {} with open('/proc/{}/environ'.format(pid)) as f: for line in f.read().split('\0'): if not len(line): continue kv = line.split('=', 1) if kv[0].startswith('CATTLE'): env[kv[0]] = kv[1] env['PATH'] = os.environ['PATH'] env['CATTLE_CONFIG_URL'] = Config.config_url() for i in range(3): p = popen(cmd, env=env, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) output, error = p.communicate(input=input) retcode = p.poll() if retcode == 0: break exists_cmd = cmd[:-1] + ['/usr/bin/test', '-e', script] if popen(exists_cmd, env=env).wait() == 0: break # Sleep and try again if missing time.sleep(1) if retcode: return retcode, output, None text = [] for line in output.splitlines(): if line.startswith('{'): data = marshaller.from_string(line) break text.append(line) return retcode, ''.join(text), data
def _get_host_labels(self): try: labels = self.host_info.host_labels() except: log.exception("Error getting host labels") if Config.labels(): labels.update(Config.labels()) return labels
def on_startup(self): cmd = [cadvisor_bin(), '-logtostderr=true', '-ip', Config.cadvisor_ip(), '-port', str(Config.cadvisor_port())] if os.path.exists('/host/proc/1/ns/mnt'): cmd = ['nsenter', '--mount=/host/proc/1/ns/mnt', '--'] + cmd background(cmd)
def __init__(self, docker_client=None): self.unit = 1048576 self.cadvisor = CadvisorAPIClient(Config.cadvisor_ip(), Config.cadvisor_port()) self.docker_client = docker_client self.docker_storage_driver = None if self.docker_client: self.docker_storage_driver = \ self.docker_client.info().get("Driver", None)
def on_startup(self): env = dict(os.environ) env['HOST_API_CONFIG_FILE'] = host_api_config() url = 'http://{0}:{1}'.format(Config.cadvisor_ip(), Config.cadvisor_port()) background(['host-api', '-cadvisor-url', url, '-logtostderr=true', '-ip', Config.host_api_ip(), '-port', str(Config.host_api_port())], env=env)
def _args(): parser = argparse.ArgumentParser(add_help=True) parser.add_argument("--access-key", default=Config.access_key(), help='Default value from CATTLE_ACCESS_KEY') parser.add_argument("--secret-key", default=Config.secret_key(), help='Default value from CATTLE_SECRET_KEY') parser.add_argument("--url", default=Config.api_url(), help='Default value from CATTLE_URL') parser.add_argument("--workers", default=Config.workers(), help='Default value from CATTLE_WORKERS') parser.add_argument("--agent-id") return parser.parse_args()
def on_startup(self): cmd = ['cadvisor', '-logtostderr=true', '-listen_ip', Config.cadvisor_ip(), '-port', str(Config.cadvisor_port())] wrapper = Config.cadvisor_wrapper() if len(wrapper): cmd.insert(0, wrapper) else: if os.path.exists('/host/proc/1/ns/mnt'): cmd = ['nsenter', '--mount=/host/proc/1/ns/mnt', '--'] + cmd background(cmd)
def post(req, resp): id = resp['data']['instanceHostMap']['instance'] id = id['+data']['dockerContainer']['Id'] inspect = docker_client().inspect_container(id) instance_activate_common_validation(resp) port = Config.api_proxy_listen_port() assert 'CATTLE_CONFIG_URL={0}'.format(Config.config_url()) in \ inspect['Config']['Env'] assert 'CATTLE_CONFIG_URL_SCHEME=https' not in inspect['Config']['Env'] assert 'CATTLE_CONFIG_URL_PATH=/a/path' not in inspect['Config']['Env'] assert 'CATTLE_CONFIG_URL_PORT={0}'.format(port) not in \ inspect['Config']['Env'] assert 'ENV1=value1' in inspect['Config']['Env']
def on_startup(self): cmd = [ 'cadvisor', '-logtostderr=true', '-listen_ip', Config.cadvisor_ip(), '-port', str(Config.cadvisor_port()) ] wrapper = Config.cadvisor_wrapper() if len(wrapper): cmd.insert(0, wrapper) else: if os.path.exists('/host/proc/1/ns/mnt'): cmd = ['nsenter', '--mount=/host/proc/1/ns/mnt', '--'] + cmd background(cmd)
def _record_state(self, client, instance, docker_id=None): if docker_id is None: container = self.get_container(client, instance) if container is not None: docker_id = container['Id'] if docker_id is None: return cont_dir = Config.container_state_dir() tmp_file_path = path.join(cont_dir, 'tmp-%s' % docker_id) if path.exists(tmp_file_path): remove(tmp_file_path) file_path = path.join(cont_dir, docker_id) if path.exists(file_path): remove(file_path) if not path.exists(cont_dir): makedirs(cont_dir) with open(tmp_file_path, 'w') as outfile: marshaller = get_type(MARSHALLER) data = marshaller.to_string(instance) outfile.write(data) rename(tmp_file_path, file_path)
def state_file_exists(docker_id): try: cont_dir = Config.container_state_dir() file_path = path.join(cont_dir, docker_id) return os.path.exists(file_path) except: return False
def check_output(*popenargs, **kwargs): global _check_output_impl if _check_output_impl is None: from cattle import Config if Config.is_eventlet() and 'e_check_output' in globals(): _check_output_impl = e_check_output else: try: import subprocess32 _check_output_impl = subprocess32.check_output except: if 'check_output' in dir(subprocess): _check_output_impl = subprocess.check_output else: _check_output_impl = _check_output try: return _check_output_impl(*popenargs, **kwargs) except subprocess.CalledProcessError as e: raise e except Exception as e: # eventlets seems to throw a CalledProcessError that isn't the same # as the subprocess package exception try: raise subprocess.CalledProcessError(e.returncode, e.cmd, e.output) except subprocess.CalledProcessError as e1: raise e1 except: # This is in case CallProcessError doesn't have returncode, cmd, # or output raise e
def update_managed_volume(instance, config, start_config): if not enabled(): return if 'binds' not in start_config: return binds_map = start_config['binds'] instance_name = config['name'] new_binds_map = {} user = "******" if 'user' in config and config['user'] is not None: user = config['user'] for bind in binds_map: src = bind dst = binds_map[bind] if src.startswith(RANCHER_PREFIX): vol_command = src[len(RANCHER_PREFIX):] words = vol_command.split("/") vol_name = words[0] command = "" old_volume_uuid = "" snapshot_uuid = "" if len(words) > 1: command = words[1] if command == "restore": assert len(words) == 4 old_volume_uuid = words[2] snapshot_uuid = words[3] else: log.error( "unsupported command %s, \ ignore and create volume", command) if command == "restore": log.info("About to restore snapshot") mount_point = _restore_snapshot( vol_name, old_volume_uuid, Config.volmgr_default_volume_size(), snapshot_uuid, instance_name, user) new_binds_map[mount_point] = dst else: mount_point = _get_volume(vol_name, Config.volmgr_default_volume_size(), instance_name, user) new_binds_map[mount_point] = dst else: new_binds_map[src] = binds_map[src] start_config['binds'] = new_binds_map
def _add_resources(self, ping, pong): if not utils.ping_include_resources(ping): return stats = None if utils.ping_include_stats(ping): try: stats = self.host_info.collect_data() except: log.exception("Error getting host info stats") physical_host = Config.physical_host() compute = { 'type': 'host', 'kind': 'docker', 'hostname': Config.hostname(), 'createLabels': self._get_host_create_labels(), 'labels': self._get_host_labels(), 'physicalHostUuid': physical_host['uuid'], 'uuid': DockerConfig.docker_uuid(), 'info': stats } pool = { 'type': 'storagePool', 'kind': 'docker', 'name': compute['hostname'] + ' Storage Pool', 'hostUuid': compute['uuid'], 'uuid': compute['uuid'] + '-pool' } resolved_ip = socket.gethostbyname(DockerConfig.docker_host_ip()) ip = { 'type': 'ipAddress', 'uuid': resolved_ip, 'address': resolved_ip, 'hostUuid': compute['uuid'], } proxy = Config.host_proxy() if proxy is not None: compute['apiProxy'] = proxy utils.ping_add_resources(pong, physical_host, compute, pool, ip)
def _add_resources(self, ping, pong): if not utils.ping_include_resources(ping): return stats = None if utils.ping_include_stats(ping): try: stats = self.host_info.collect_data() except: log.exception("Error getting host info stats") physical_host = Config.physical_host() compute = { 'type': 'host', 'kind': 'docker', 'name': Config.hostname(), 'createLabels': self._get_host_create_labels(), 'labels': self._get_host_labels(), 'physicalHostUuid': physical_host['uuid'], 'uuid': DockerConfig.docker_uuid(), 'info': stats } pool = { 'type': 'storagePool', 'kind': 'docker', 'name': compute['name'] + ' Storage Pool', 'hostUuid': compute['uuid'], 'uuid': compute['uuid'] + '-pool' } resolved_ip = socket.gethostbyname(DockerConfig.docker_host_ip()) ip = { 'type': 'ipAddress', 'uuid': resolved_ip, 'address': resolved_ip, 'hostUuid': compute['uuid'], } proxy = Config.host_proxy() if proxy is not None: compute['apiProxy'] = proxy utils.ping_add_resources(pong, physical_host, compute, pool, ip)
def update_managed_volume(instance, config, start_config): if not enabled(): return if 'binds' not in start_config: return binds_map = start_config['binds'] instance_name = config['name'] new_binds_map = {} user = "******" if 'user' in config and config['user'] is not None: user = config['user'] for bind in binds_map: src = bind dst = binds_map[bind] if src.startswith(RANCHER_PREFIX): vol_command = src[len(RANCHER_PREFIX):] words = vol_command.split("/") vol_name = words[0] command = "" old_volume_uuid = "" snapshot_uuid = "" if len(words) > 1: command = words[1] if command == "restore": assert len(words) == 4 old_volume_uuid = words[2] snapshot_uuid = words[3] else: log.error("unsupported command %s, \ ignore and create volume", command) if command == "restore": log.info("About to restore snapshot") mount_point = _restore_snapshot( vol_name, old_volume_uuid, Config.volmgr_default_volume_size(), snapshot_uuid, instance_name, user) new_binds_map[mount_point] = dst else: mount_point = _get_volume(vol_name, Config.volmgr_default_volume_size(), instance_name, user) new_binds_map[mount_point] = dst else: new_binds_map[src] = binds_map[src] start_config['binds'] = new_binds_map
def remove_state_file(container): if container: try: cont_dir = Config.container_state_dir() file_path = path.join(cont_dir, container['Id']) if os.path.exists(file_path): os.remove(file_path) except: pass
def _get_docker_client(host, version_override=None): cluster_connection = None tls_config = None try: cluster_connection = host['clusterConnection'] if cluster_connection.startswith('https'): try: account_id = host['accountId'] ca_crt = host['caCrt'] client_crt = host['clientCrt'] client_key = host['clientKey'] client_certs_dir = Config.client_certs_dir() acct_client_cert_dir = \ path.join(client_certs_dir, str(account_id)) if not path.exists(acct_client_cert_dir): log.debug('Creating client cert directory: %s', acct_client_cert_dir) makedirs(acct_client_cert_dir) if ca_crt: log.debug('Writing cert auth') with open(path.join(acct_client_cert_dir, 'ca.crt'), 'w') as f: f.write(ca_crt) if client_crt: log.debug('Writing client cert') with open(path.join(acct_client_cert_dir, 'client.crt'), 'w') as f: f.write(client_crt) if client_key: log.debug('Writing client key') with open(path.join(acct_client_cert_dir, 'client.key'), 'w') as f: f.write(client_key) if ca_crt and client_crt and client_key: tls_config = tls.TLSConfig( client_cert=( path.join(acct_client_cert_dir, 'client.crt'), path.join(acct_client_cert_dir, 'client.key') ), verify=path.join(acct_client_cert_dir, 'ca.crt'), assert_hostname=False ) except (KeyError, AttributeError) as e: raise Exception( 'Unable to process cert/keys for cluster', cluster_connection, e) except (KeyError, AttributeError): pass return docker_client( version=version_override, base_url_override=cluster_connection, tls_config=tls_config)
def before_start(self, instance, host, config, start_config): if instance.get('agentId') is None: return url = Config.config_url() if url is not None: parsed = urlparse(url) if 'localhost' == parsed.hostname: port = Config.api_proxy_listen_port() add_to_env(config, CATTLE_AGENT_INSTANCE='true', CATTLE_CONFIG_URL_SCHEME=parsed.scheme, CATTLE_CONFIG_URL_PATH=parsed.path, CATTLE_CONFIG_URL_PORT=port) else: add_to_env(config, CATTLE_CONFIG_URL=url)
def _setup_logger(): format = '%(asctime)s %(levelname)s %(name)s [%(thread)s] ' \ '[%(filename)s:%(lineno)s] %(message)s ' level = logging.INFO if Config.debug(): level = logging.DEBUG logging.root.setLevel(level) file_handler = RotatingFileHandler(Config.log(), maxBytes=_LOG_SIZE, backupCount=_LOG_COUNT) file_handler.setFormatter(logging.Formatter(format)) std_err_handler = logging.StreamHandler(sys.stderr) std_err_handler.setFormatter(logging.Formatter(format)) std_err_handler.setLevel(logging.WARN) logging.root.addHandler(file_handler) logging.root.addHandler(std_err_handler)
def on_startup(self): url = Config.config_url() if "localhost" not in url: return parsed = urlparse.urlparse(url) from_host = Config.api_proxy_listen_host() from_port = Config.api_proxy_listen_port() to_host_ip = socket.gethostbyname(parsed.hostname) to_port = get_url_port(url) log.info("Proxying %s:%s -> %s:%s", from_host, from_port, to_host_ip, to_port) listen = "TCP4-LISTEN:{0},fork,bind={1},reuseaddr".format(from_port, from_host) to = "TCP:{0}:{1}".format(to_host_ip, to_port) background(["socat", listen, to])
def _add_resources(self, ping, pong): if not utils.ping_include_resources(ping): return stats = None if utils.ping_include_stats(ping): try: stats = self.host_info.collect_data() except: log.exception("Error geting host info stats") physical_host = Config.physical_host() compute = { "type": "host", "kind": "docker", "name": Config.hostname(), "labels": Config.labels(), "physicalHostUuid": physical_host["uuid"], "uuid": DockerConfig.docker_uuid(), "info": stats, } pool = { "type": "storagePool", "kind": "docker", "name": compute["name"] + " Storage Pool", "hostUuid": compute["uuid"], "uuid": compute["uuid"] + "-pool", } ip = { "type": "ipAddress", "uuid": DockerConfig.docker_host_ip(), "address": DockerConfig.docker_host_ip(), "hostUuid": compute["uuid"], } proxy = Config.host_proxy() if proxy is not None: compute["apiProxy"] = proxy utils.ping_add_resources(pong, physical_host, compute, pool, ip)
def lock(obj): if isinstance(obj, basestring): lock_name = obj else: lock_name = "{0}-{1}".format(obj["type"], obj["id"]) lock_dir = Config.lock_dir() if not os.path.exists(lock_dir): os.mkdir(lock_dir) return LockWrapper(lock_name, portalocker.Lock(os.path.join(lock_dir, lock_name)))
def execute(self, event): if not _should_handle(self, event): return resp = utils.reply(event) if Config.do_ping(): for type in types(): if hasattr(type, 'on_ping'): type.on_ping(event, resp) return resp
def on_startup(self): url = Config.config_url() if 'localhost' not in url: return parsed = urlparse.urlparse(url) from_host = Config.api_proxy_listen_host() from_port = Config.api_proxy_listen_port() to_host_ip = socket.gethostbyname(parsed.hostname) to_port = get_url_port(url) log.info('Proxying %s:%s -> %s:%s', from_host, from_port, to_host_ip, to_port) listen = 'TCP4-LISTEN:{0},fork,bind={1},reuseaddr'.format(from_port, from_host) to = 'TCP:{0}:{1}'.format(to_host_ip, to_port) background(['socat', listen, to])
def _check_ts(): stamp_file = Config.stamp() if not os.path.exists(stamp_file): return True ts = os.path.getmtime(stamp_file) global _STAMP_TS if _STAMP_TS is None: _STAMP_TS = ts return _STAMP_TS == ts
def execute(self, event): if not _should_handle(self, event): return if len(event.data.items) == 0: return utils.reply(event) item_names = [] for item in event.data.items: # For development, don't let the server kill your agent if item.name != 'pyagent' or Config.config_update_pyagent(): item_names.append(item.name) home = Config.home() env = dict(os.environ) env['CATTLE_ACCESS_KEY'] = Config.access_key() env['CATTLE_SECRET_KEY'] = Config.secret_key() env['CATTLE_CONFIG_URL'] = Config.config_url() env['CATTLE_HOME'] = home args = [Config.config_sh()] + item_names try: output = utils.get_command_output(args, cwd=home, env=env) return utils.reply(event, {'exitCode': 0, 'output': output}) except subprocess.CalledProcessError as e: Progress(event).update('Update Failed', data={ 'exitCode': e.returncode, 'output': e.output })
def on_startup(self): cmd = ['cadvisor', '-logtostderr=true', '-listen_ip', Config.cadvisor_ip(), '-port', str(Config.cadvisor_port()), '-housekeeping_interval', Config.cadvisor_interval()] docker_root = Config.cadvisor_docker_root() if docker_root: cmd += ["-docker_root", docker_root] cadvisor_opts = Config.cadvisor_opts() if cadvisor_opts: try: cmd += shlex.split(cadvisor_opts) except ValueError: log.exception( "Error missing closing `'` in: {0}".format(cadvisor_opts)) pass wrapper = Config.cadvisor_wrapper() if len(wrapper): cmd.insert(0, wrapper) else: if os.path.exists('/host/proc/1/ns/mnt'): cmd = ['nsenter', '--mount=/host/proc/1/ns/mnt', '--'] + cmd background(cmd)
def on_startup(self): env = dict(os.environ) env['HOST_API_CATTLE_ACCESS_KEY'] = Config.access_key() env['HOST_API_CATTLE_SECRET_KEY'] = Config.secret_key() url = 'http://{0}:{1}'.format(Config.cadvisor_ip(), Config.cadvisor_port()) background([ 'host-api', '-cadvisor-url', url, '-logtostderr=true', '-ip', Config.host_api_ip(), '-port', str(Config.host_api_port()), '-auth=true', '-host-uuid', DockerConfig.docker_uuid(), '-public-key', Config.jwt_public_key_file(), '-cattle-url', Config.api_url(), '-cattle-state-dir', Config.container_state_dir() ], env=env)
def _get_docker_client(host): cluster_connection = None tls_config = None try: cluster_connection = host['clusterConnection'] if cluster_connection.startswith('https'): try: account_id = host['accountId'] ca_crt = host['caCrt'] client_crt = host['clientCrt'] client_key = host['clientKey'] client_certs_dir = Config.client_certs_dir() acct_client_cert_dir = \ path.join(client_certs_dir, str(account_id)) if not path.exists(acct_client_cert_dir): log.debug('Creating client cert directory: %s', acct_client_cert_dir) makedirs(acct_client_cert_dir) if ca_crt: log.debug('Writing cert auth') with open(path.join(acct_client_cert_dir, 'ca.crt'), 'w') as f: f.write(ca_crt) if client_crt: log.debug('Writing client cert') with open( path.join(acct_client_cert_dir, 'client.crt'), 'w') as f: f.write(client_crt) if client_key: log.debug('Writing client key') with open( path.join(acct_client_cert_dir, 'client.key'), 'w') as f: f.write(client_key) if ca_crt and client_crt and client_key: tls_config = tls.TLSConfig( client_cert=(path.join(acct_client_cert_dir, 'client.crt'), path.join(acct_client_cert_dir, 'client.key')), verify=path.join(acct_client_cert_dir, 'ca.crt'), assert_hostname=False) except (KeyError, AttributeError) as e: raise Exception('Unable to process cert/keys for cluster', cluster_connection, e) except (KeyError, AttributeError): pass return docker_client(base_url_override=cluster_connection, tls_config=tls_config)
def __init__(self, url, auth=None, workers=20, agent_id=None, queue_depth=Config.queue_depth()): if url.endswith("/schemas"): url = url[0:len(url)-len("/schemas")] self._url = url + "/subscribe" self._auth = auth self._workers = int(workers) self._children = [] self._agent_id = agent_id self._queue = Queue(queue_depth) self._ping_queue = Queue(queue_depth) type_manager.register_type(type_manager.PUBLISHER, Publisher(url + "/publish", auth))