def _stage_changes(staged_resources, conn_graph, commited_resources, staged_log): try: srt = nx.topological_sort(conn_graph) except: for cycle in nx.simple_cycles(conn_graph): log.debug('CYCLE: %s', cycle) raise for res_uid in srt: commited_data = commited_resources.get(res_uid, {}) staged_data = staged_resources.get(res_uid, {}) df = create_diff(staged_data, commited_data) if df: action = guess_action(commited_data, staged_data) log_item = data.LogItem( utils.generate_uuid(), res_uid, '{}.{}'.format(res_uid, action), df) staged_log.append(log_item) return staged_log
def notify(self, emitter): log.debug('Notify from %s value %s', emitter, emitter.value) # Copy emitter's values to receiver self.value = emitter.value for receiver in self.receivers: receiver.notify(self) self.attached_to.set_args_from_dict({self.name: self.value})
def build_edges(changes_graph, events): """ :param changes_graph: nx.DiGraph object with actions to be executed :param events: {res: [controls.Event objects]} """ events_graph = nx.MultiDiGraph() for res_evts in events.values(): for ev in res_evts: events_graph.add_edge(ev.parent_node, ev.child_node, event=ev) stack = changes_graph.nodes() visited = set() while stack: event_name = stack.pop(0) if event_name in events_graph: log.debug('Next events after %s are %s', event_name, events_graph.successors(event_name)) else: log.debug('No outgoing events based on %s', event_name) if event_name not in visited: for parent, child, data in events_graph.edges(event_name, data=True): succ_ev = data['event'] succ_ev.insert(stack, changes_graph) visited.add(event_name) return changes_graph
def copy(self, resource, _from, _to, use_sudo=False): log.debug("RSYNC: %s -> %s", _from, _to) if use_sudo: rsync_path = "sudo rsync" else: rsync_path = "rsync" rsync_props = self._rsync_props(resource) ssh_cmd = ' '.join(self._ssh_cmd(rsync_props)) rsync_cmd = ('rsync -az -e "%(ssh_cmd)s" ' '--rsync-path="%(rsync_path)s" %(_from)s ' '%(rsync_host)s:%(_to)s') % dict( rsync_path=rsync_path, ssh_cmd=ssh_cmd, rsync_host=rsync_props['host_string'], _from=_from, _to=_to) if rsync_props.get('ssh_password'): env = os.environ.copy() env['SSHPASS'] = rsync_props['ssh_password'] else: env = os.environ rsync_executor = lambda transport: execute( rsync_cmd, shell=True, env=env) log.debug("RSYNC CMD: %r" % rsync_cmd) executor = Executor(resource=resource, executor=rsync_executor, params=(_from, _to, use_sudo)) self.executors.append(executor)
def copy(self, resource, _from, _to, use_sudo=False): log.debug("RSYNC: %s -> %s", _from, _to) if use_sudo: rsync_path = "sudo rsync" else: rsync_path = "rsync" rsync_props = self._rsync_props(resource) rsync_cmd = ('rsync -az -e "ssh -i %(ssh_key)s" ' '--rsync-path="%(rsync_path)s" %(_from)s ' '%(rsync_host)s:%(_to)s') % dict( rsync_path=rsync_path, ssh_key=rsync_props['ssh_key'], rsync_host=rsync_props['host_string'], _from=_from, _to=_to) rsync_executor = lambda transport: fabric_api.local( rsync_cmd ) log.debug("RSYNC CMD: %r" % rsync_cmd) executor = Executor(resource=resource, executor=rsync_executor, params=(_from, _to, use_sudo)) self.executors.append(executor)
def action(self, resource, action_name): action_file = self._compile_action_file(resource, action_name) log.debug('action_file: %s', action_file) action_file_name = os.path.join(self.dirs[resource.name], action_file) action_file_name = action_file_name.replace( SOLAR_TEMP_LOCAL_LOCATION, '/tmp/') self._copy_templates_and_scripts(resource, action_name) self.transport_sync.copy(resource, self.dst, '/tmp') self.transport_sync.sync_all() cmd = self.transport_run.run( resource, 'bash', action_file_name, use_sudo=True, warn_only=True ) if cmd.return_code: raise errors.SolarError( 'Bash execution for {} failed with {}'.format( resource.name, cmd.return_code)) return cmd
def verify_run_result(self, cmd, result): rc, out, err = result.return_code, result.stdout, result.stderr log.debug('CMD %r RC %s OUT %s ERR %s', cmd, rc, out, err) if not result.success: message = 'CMD %r failed RC %s ERR %s' % (cmd, rc, err) log.error(message) raise errors.SolarError(result.output)
def action(self, resource, action_name): log.debug('Executing Puppet manifest %s %s', action_name, resource) action_file = self._compile_action_file(resource, action_name) log.debug('action_file: %s', action_file) self.upload_hiera_resource(resource) self.upload_manifests(resource) self.prepare_templates_and_scripts(resource, action_file, '') self.transport_sync.copy(resource, action_file, '/tmp/action.pp') self.transport_sync.sync_all() cmd = self.transport_run.run( resource, 'puppet', 'apply', '-vd', '/tmp/action.pp', '--detailed-exitcodes', env={ 'FACTER_resource_name': resource.name, }, use_sudo=True, warn_only=True ) # 0 - no changes, 2 - successfull changes if cmd.return_code not in [0, 2]: raise errors.SolarError( 'Puppet for {} failed with {}'.format( resource.name, cmd.return_code)) return cmd
def validate_token( keystone_host=None, keystone_port=None, user=None, tenant=None, password=None): token_data = requests.post( 'http://%s:%s/v2.0/tokens' % (keystone_host, keystone_port), json.dumps({ 'auth': { 'tenantName': tenant, 'passwordCredentials': { 'username': user, 'password': password, }, }, }), headers={'Content-Type': 'application/json'} ) token = token_data.json()['access']['token']['id'] log.debug('%s TOKEN: %s', user, token) return token, token_data.json()
def action(self, resource, action_name): api = HTTPClient(KubeConfig.from_file("~/.kube/config")) log.debug('Executing %s %s', action_name, resource.name) # XXX: self._configs is used in _compile_action_file via _make_args. It has to be here self._configs = self.prepare_configs(resource) action_file = self._compile_action_file(resource, action_name) log.debug('action_file: %s', action_file) # XXX: seems hacky obj = yaml.load(open(action_file).read()) k8s_class = obj['kind'] if action_name == 'run': k8s_class = getattr(pykube.objects, k8s_class) k8s_obj = k8s_class(api, obj) k8s_obj.create() self._wait_for(k8s_obj) elif action_name == 'update': k8s_class = getattr(pykube.objects, k8s_class) k8s_obj = k8s_class(api, obj) k8s_obj.reload() # generate new data new_data = self._compile_action_file(resource, 'run') new_obj = yaml.load(open(new_data).read()) _update_obj(k8s_obj.obj, new_obj) # hacky pykube.objects.jsonpatch.make_patch = jsondiff.make k8s_obj.update() self._wait_for(k8s_obj) elif action_name == 'delete': raise NotImplemented(action_name) else: raise NotImplemented(action_name)
def run(self, resource, *args, **kwargs): log.debug("RAW SSH: %s", args) commands = [] prefix = [] if kwargs.get('use_sudo', False): prefix.append('sudo') if kwargs.get('cwd'): cmd = prefix + ['cd', kwargs['cwd']] commands.append(' '.join(cmd)) env = [] if 'env' in kwargs: for key, value in kwargs['env'].items(): env.append('{}={}'.format(key, value)) cmd = prefix + env + list(args) commands.append(' '.join(cmd)) remote_cmd = '\"%s\"' % ' && '.join(commands) settings = self.settings(resource) ssh_cmd = self._ssh_cmd(settings) ssh_cmd += (self._ssh_command_host(settings), remote_cmd) log.debug("RAW SSH CMD: %r", ssh_cmd) # TODO convert it to SolarRunResult return execute(' '.join(ssh_cmd), shell=True)
def connect_single(emitter, src, receiver, dst): if ':' in dst: return connect_multi(emitter, src, receiver, dst) # Disconnect all receiver inputs # Check if receiver input is of list type first emitter_input = emitter.resource_inputs()[src] receiver_input = receiver.resource_inputs()[dst] if emitter_input.id == receiver_input.id: raise Exception( 'Trying to connect {} to itself, this is not possible'.format( emitter_input.id) ) if not receiver_input.is_list: receiver_input.receivers.delete_all_incoming(receiver_input) # Check for cycles # TODO: change to get_paths after it is implemented in drivers if emitter_input in receiver_input.receivers.as_set(): raise Exception('Prevented creating a cycle on %s::%s' % (emitter.name, emitter_input.name)) log.debug('Connecting {}::{} -> {}::{}'.format( emitter.name, emitter_input.name, receiver.name, receiver_input.name )) emitter_input.receivers.add(receiver_input)
def action(self, resource, action_name): log.debug('Executing Puppet manifest %s %s', action_name, resource) action_file = self._compile_action_file(resource, action_name) log.debug('action_file: %s', action_file) self.upload_manifests(resource) self._scp_command(resource, action_file, '/tmp/action.pp') cmd = self._ssh_command( resource, 'puppet', 'apply', '-vd', '/tmp/action.pp', '--detailed-exitcodes', env={ 'FACTER_resource_name': resource.name, }, use_sudo=True, warn_only=True, ) # 0 - no changes, 2 - successfull changes if cmd.return_code not in [0, 2]: raise errors.SolarError( 'Puppet for {} failed with {}'.format( resource.name, cmd.return_code)) return cmd
def _create_torrent(self, resource, fs, root='.', use_sudo=False): t = lt.create_torrent(fs) transports = resource.transports() torrent_transport = next( (x for x in transports if x['name'] == 'torrent')) trackers = torrent_transport['trackers'] for tracker in trackers: t.add_tracker(tracker) lt.set_piece_hashes(t, os.path.join(root, '..')) torrent = t.generate() torrent['priv'] = True # private torrent, no DHT, only trackers name = self._create_torrent_name() try: # not checking for path existence with open(name, 'wb') as f: f.write(lt.bencode(torrent)) except IOError as e: if e.errno != errno.ENOENT: raise os.makedirs(self._torrent_path) with open(name, 'wb') as f: f.write(lt.bencode(torrent)) log.debug("Created torrent file %s", name) magnet_uri = lt.make_magnet_uri(lt.torrent_info(name)) # self._torrents[root] = (name, magnet_uri) if not use_sudo: self._torrents.append((name, magnet_uri, root)) else: self._sudo_torrents.append((name, magnet_uri, root)) return name
def _ssh_command(resource, *args, **kwargs): log.debug('SSH: %s', args) executor = fabric_api.run if kwargs.get('use_sudo', False): executor = fabric_api.sudo managers = [ fabric_api.settings(**ResourceSSHMixin._fabric_settings(resource)), ] if 'cwd' in kwargs: managers.append( fabric_api.cd(kwargs['cwd']) ) if 'env' in kwargs: managers.append( fabric_api.shell_env(**kwargs['env']) ) if 'warn_only' in kwargs: managers.append( fabric_api.warn_only()) with nested(*managers): return executor(' '.join(args))
def connect_multi(emitter, src, receiver, dst): receiver_input_name, receiver_input_key = dst.split(":") if "|" in receiver_input_key: receiver_input_key, receiver_input_tag = receiver_input_key.split("|") else: receiver_input_tag = None emitter_input = emitter.resource_inputs()[src] receiver_input = receiver.resource_inputs()[receiver_input_name] if not receiver_input.is_list or receiver_input_tag: receiver_input.receivers.delete_all_incoming( receiver_input, destination_key=receiver_input_key, tag=receiver_input_tag ) # We can add default tag now receiver_input_tag = receiver_input_tag or emitter.name # NOTE: make sure that receiver.args[receiver_input] is of dict type if not receiver_input.is_hash: raise Exception("Receiver input {} must be a hash or a list of hashes".format(receiver_input_name)) log.debug( "Connecting {}::{} -> {}::{}[{}], tag={}".format( emitter.name, emitter_input.name, receiver.name, receiver_input.name, receiver_input_key, receiver_input_tag ) ) emitter_input.receivers.add_hash(receiver_input, receiver_input_key, tag=receiver_input_tag)
def __enter__(self): lk = self._acquire(self.uid, self.identity, self.stamp) if not lk.am_i_locking(self.identity): log.debug( 'Lock %s acquired by another identity %s != %s, lockers %s', self.uid, self.identity, lk.who_is_locking(), lk.lockers) while self.retries: self._before_retry(self.uid, self.identity) if lk.key in DBLock._c.obj_cache: del DBLock._c.obj_cache[lk.key] self.waiter.wait(self.uid, self.identity) lk = self._acquire(self.uid, self.identity, self.stamp) self.retries -= 1 if lk.am_i_locking(self.identity): break else: # reset stamp mark self.stamp = str(uuid4()) else: if not lk.am_i_locking(self.identity): raise RuntimeError( 'Failed to acquire {},' ' owned by identity {}'.format( lk.key, lk.who_is_locking())) self._after_acquire(self.uid, self.identity) log.debug('Lock for %s acquired by %s', self.uid, self.identity) return lk
def prepare(self, resource, action): action_file = os.path.join( resource.db_obj.actions_path, resource.actions[action]) self._copy_templates_and_scripts(resource, action) ansible_library_path = self._copy_ansible_library(resource) files = self._make_all(resource, action, action_file) playbook_file, inventory_file, extra_vars_file = files remote_playbook_file = self.adjust_path(playbook_file) remote_inventory_file = self.adjust_path(inventory_file) remote_extra_vars_file = self.adjust_path(extra_vars_file) # TODO: clarify this for ansible template handler variables = resource.args if 'roles' in variables: self.download_roles(variables['roles']) call_args = self.make_ansible_command(remote_playbook_file, remote_inventory_file, remote_extra_vars_file, ansible_library_path) log.debug('Prepared ansible command: %s', ' '.join(call_args)) return call_args
def copy(self, resource, _from, _to, use_sudo=False): log.debug("TORRENT: %s -> %s", _from, _to) executor = Executor(resource=resource, executor=None, params=(_from, _to, use_sudo)) self.executors.append(executor)
def action(self, resource, action_name): log.debug("Executing Puppet manifest %s %s", action_name, resource) action_file = self._compile_action_file(resource, action_name) log.debug("action_file: %s", action_file) self.upload_manifests(resource) self.prepare_templates_and_scripts(resource, action_file, "") self.transport_sync.copy(resource, action_file, "/tmp/action.pp") self.transport_sync.sync_all() cmd = self.transport_run.run( resource, "puppet", "apply", "-vd", "/tmp/action.pp", "--detailed-exitcodes", env={"FACTER_resource_name": resource.name}, use_sudo=True, warn_only=True, ) # 0 - no changes, 2 - successfull changes if cmd.return_code not in [0, 2]: raise errors.SolarError("Puppet for {} failed with {}".format(resource.name, cmd.return_code)) return cmd
def run(self, resource, *args, **kwargs): log.debug('SSH: %s', args) executor = fabric_api.run if kwargs.get('use_sudo', False): executor = fabric_api.sudo managers = [ fabric_api.settings(**self._fabric_settings(resource)), ] cwd = kwargs.get('cwd') if cwd: managers.append(fabric_api.cd(kwargs['cwd'])) env = kwargs.get('env') if env: managers.append(fabric_api.shell_env(**kwargs['env'])) if kwargs.get('warn_only', False): managers.append(fabric_api.warn_only()) with nested(*managers): res = executor(' '.join(args)) return self.get_result(res)
def notify(self, emitter): log.debug('Notify from %s value %s', emitter, emitter.value) # Copy emitter's values to receiver idx = self._emitter_idx(emitter) self.value[idx] = self._format_value(emitter) for receiver in self.receivers: receiver.notify(self) self.attached_to.set_args_from_dict({self.name: self.value})
def run(self, resource, *args, **kwargs): log.debug("Solard run: %s", args) client = self.get_client(resource) try: res = client.run(' '.join(args), **kwargs) return self.get_result(res, failed=False) except Exception as ex: log.exception("Exception during solard run") return self.get_result(ex, failed=True)
def action(self, resource, action): call_args = self.prepare(resource, action) log.debug('EXECUTING: %s', ' '.join(call_args)) ret, out, err = execute(call_args) if ret == 0: return else: # ansible returns errors on stdout raise errors.SolarError(out)
def run(self, transport): if self.valid: result = self._executor(transport) if isinstance(result, tuple) and len(result) == 3: # TODO Include file information in result rc, out, err = result log.debug('RC %s OUT %s ERR %s', rc, out, err) if rc: raise errors.SolarError(err)
def copy(self, resource, _from, _to, use_sudo=False): log.debug("Solard copy: %s -> %s", _from, _to) client = self.get_client(resource) executor = lambda transport: client.copy(_from, _to, use_sudo) executor = Executor(resource=resource, executor=executor, params=(_from, _to, use_sudo)) self.executors.append(executor)
def _render_action(self, resource, action): log.debug('Rendering %s %s', resource.name, action) action_file = resource.actions[action] log.debug('action file: %s', action_file) args = self._make_args(resource) with open(action_file) as f: tpl = Template(f.read()) return tpl.render(str=str, zip=zip, **args)
def _pod_status(self, pod): statuses = pod.obj['status']['containerStatuses'] for status in statuses: rc = status['restartCount'] log.debug("Checking container status %r for job", status) terminated = status.get('terminated') if terminated: reason = terminated['reason'] return rc, reason return rc, None
def _copy_ansible_library(self, resource): base_path = resource.db_obj.base_path src_ansible_library_dir = os.path.join(base_path, 'ansible_library') trg_ansible_library_dir = None if os.path.exists(src_ansible_library_dir): log.debug("Adding ansible_library for %s", resource.name) trg_ansible_library_dir = os.path.join( self.dirs[resource.name], 'ansible_library') shutil.copytree(src_ansible_library_dir, trg_ansible_library_dir) return trg_ansible_library_dir
def _render_action(self, resource, action): log.debug('Rendering %s %s', resource.name, action) action_file = resource.metadata['actions'][action] action_file = os.path.join(resource.metadata['actions_path'], action_file) log.debug('action file: %s', action_file) args = self._make_args(resource) with open(action_file) as f: tpl = Template(f.read()) return tpl.render(str=str, zip=zip, **args)
def test(resource): log.debug('Testing cinder_scheduler_puppet')
def __exit__(self, type, value, traceback): log.debug(self.dst) return shutil.rmtree(self.dst)
def wrap_session(extension, clients): log.debug('DB session for %r', extension) extension.for_all.before(lambda ctxt: ModelMeta.session_start()) extension.for_all.after(lambda ctxt: ModelMeta.session_end())