def create_docker_network(self): """ Create a new Docker network :return: List of Networks """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) docker_interface_object = docker_interface.DockerInterface() network = docker_interface_object.create_network( network_name=self.swarm.uuid) llogger.debug("Network created: %s", jsonpickle.encode(network.attrs)) return network.attrs.get("Name")
def get_mode(self): """ Returns the mode of the node (master/worker) :return: Mode of the node """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) if self._master is not None: return jsonpickle.encode(mode.Mode.MASTER) if len(self._swarm_list_of_worker) > 0: return jsonpickle.encode(mode.Mode.WORKER) return jsonpickle.encode(mode.Mode.NOT_DEFINED)
def get_open_allocations(self): """ Returns the services that are not allocated yet :return: Open allocations of the service composition """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) open_allocations = list() for service_key, worker_object in list(self._allocation.items()): if worker_object is None: open_allocations.append(service_key) return open_allocations
def clear_name_service(name_service): """ Clears all the registered objects of the name service :param name_service: NameServer object :return: """ llogger = local_logger.LocalLogger() llogger.log_call(sys._getframe().f_code.co_name) dict_of_registered_objects = name_service.list() for key_of_registered_objects, _ in list(dict_of_registered_objects.items()): llogger.debug("Remove %s from nameserver", key_of_registered_objects) name_service.remove(name=key_of_registered_objects) name_service.remove()
def parse_service_property(srv, property_key, property_value): llogger = local_logger.LocalLogger() llogger.log_call(sys._getframe().f_code.co_name) llogger.debug("Key of property name: %s --> Value of property name: %s", property_key, property_value) try: docker_service_singleton = property_options[property_key]( property_value, srv) llogger.debug("Parsed Service: %s", jsonpickle.encode(docker_service_singleton)) except KeyError: llogger.debug("Key is not supported: %s", property_key) return srv
def has_worker_with_name(self, name): """ Checks if a worker with the given name is already registered :param name: name of the worker :return: True when a worker with that name has been found """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) for _, worker in list(self._worker_list.items()): if worker.hostname == name: return True return False
def __init__(self, uuid, hostname, advertise_address=None, swarm_uuid=None): llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) self.uuid = uuid self.advertise_address = advertise_address self.hostname = hostname self.swarm_uuid = swarm_uuid self.services = []
def get_running_containers(self): """ Creates a list of all running containers run by the worker and returns it :return: List of all running containers """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) self.reload_containers() container_list = [] for container in self: if str(container.status) != 'exited': container_list.append(container) return container_list
def assign_worker_to_service(self, service_key, worker_object): """ Assign worker to service :param service_key: Key of the service :param worker_object: Object of the worker :return: """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) self._allocation.update({str(service_key): str(worker_object.uuid)}) llogger.debug("Worker: %s allocated to service %s", worker_object.uuid, service_key)
def unregister_worker_in_swarm(self, swarm_uuid, worker_uuid): """ Unregister worker at the swarm :param swarm_uuid: UUID of the swarm :param worker_uuid: UUID of the worker that should be removed :return: """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) llogger.debug("Remove worker: " + str(worker_uuid) + " from swarm: " + swarm_uuid) self.swarm.remove_worker_from_list(worker_uuid)
def worker_status(): """ The status of a specific worker. The worker can be defined by --worker_uuid and represented as a table :return: """ llogger = local_logger.LocalLogger() llogger.log_call(sys._getframe().f_code.co_name) params = CMDParser(program_path="master worker_status", description="Print the status of a single worker.", arguments=[Argument.INTERFACE, Argument.WORKER_UUID]).parse_arguments() try: network_info = network.NetworkInfo(params.interface) except NetworkException: puts( colored.red( "Host interface not valid. Specify a different host interface." )) puts( colored.red("Possible options are: " + " ".join(network.get_interface_list()))) llogger.debug( "Missing host interface. Add one with option --interface.") return try: swarmrob_daemon_proxy = pyro_interface.get_daemon_proxy( network_info.ip_address) except NetworkException as e: puts(colored.red(str(e))) daemon.check_daemon_running(network_info.interface) return swarm_info = jsonpickle.decode( swarmrob_daemon_proxy.get_swarm_status_as_json()) worker_list = list(dict(swarm_info._worker_list).items()) worker_info = None for _, worker_list_val in worker_list: worker = jsonpickle.decode(worker_list_val.get_info_as_json()) if str(worker.uuid) == params.worker_uuid: worker_info = worker if worker_info is None: puts(colored.red("No worker found with id " + params.worker_uuid)) return print(table_builder.worker_status_to_table(worker_info)) print(table_builder.service_list_to_table(worker_info))
def add_deployment(self, deployment_key, deployment_value): """ Add a deployment configuration to the service :param deployment_key: Key of the deployment configuration :param deployment_value: Value of the deployment configuration :return: """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) self._deploy.update({ os.path.expandvars(str(deployment_key)): os.path.expandvars(str(deployment_value)) })
def unregister_worker_at_master(self, swarm_uuid_as_json, worker_uuid_as_json): """ RPC method for unregistering a worker in the swarm :param swarm_uuid_as_json: UUID of the swarm as JSON :param worker_uuid_as_json: UUID of the worker as JSON :return: Swarm status as JSON """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) worker_uuid = jsonpickle.decode(worker_uuid_as_json) llogger.debug("Try to unregister worker: %s", worker_uuid) swarm_uuid = jsonpickle.decode(swarm_uuid_as_json) swarm_engine.SwarmEngine().unregister_worker_in_swarm(swarm_uuid, worker_uuid) return self.get_swarm_status_as_json()
def __init__(self, interface=None): """ Initialises NetworkInfo by collecting network information :param interface: interface used by the network """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) if interface is None: self._interface = get_default_interface() llogger.debug("Using default interface " + self._interface) else: self._interface = interface self._ip_address = get_ip_of_interface(self._interface)
def allocate_static(services, workers): llogger = local_logger.LocalLogger() llogger.log_call(sys._getframe().f_code.co_name) if len(workers) != 1: return None service_allocation_dict = dict() llogger.debug("Only one worker available. Allocation mode = static") service_list = list() for z in range(len(services)): service_list.append(services[z]) llogger.debug('Worker %s assigned to service %s.', list(workers.values())[0], services[z]) service_allocation_dict.update({list(workers.values())[0]: service_list}) return service_allocation_dict
def __init__(self): """ Initialization of a Service object """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) self._id = None self._tag = None self._environment = dict() self._deploy = dict() self._dependsOn = set() self._volumes = dict() self._devices = list()
def add_device(self, device_source, device_dest, mode='rwm'): """ Add a device to the service (e.g. /dev/usb1:/dev/usb1) :param device_source: Source of the device on the host system :param device_dest: Destination of the device on the virtualized system :param mode: Access permissions of the device (Default: rwm) :return: """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) self._devices.append( os.path.expandvars( str(device_source) + ":" + str(device_dest) + ":" + str(mode)))
def get_bandwidth(self, repository): """ RPC method for returning the network bandwidth of the worker :param repository: repository that should be used to get the network bandwidth :return: network bandwidth """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) try: return network.check_network_bandwidth_of_repository( repository).get("download") except NetworkException: return 0
def create_new_swarm(self, new_master, predefined_uuid=None): """ Factory method for creating a new swarm :param new_master: Object of the new master :param predefined_uuid: Predefined UUID of the swarm :return: Object of the new swarm """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) docker_interface_object = docker_interface.DockerInterface() docker_interface_object.init_docker_swarm(new_master.interface) self.swarm = swarm.Swarm(predefined_uuid, new_master) return self.swarm
def generate_service_sink_arcs(wc, worker_count, service_count): llogger = local_logger.LocalLogger() llogger.log_call(sys._getframe().f_code.co_name) en_arcs_leading_into_sink = [service_count + 1 + worker_count] cap_arcs_leading_into_sink = [1] costs_arcs_leading_into_sink = [0] sn_arcs_leading_into_sink = [wc] for sc in range(wc + 1, service_count + wc): sn_arcs_leading_into_sink.append(sc) en_arcs_leading_into_sink.append(service_count + wc) cap_arcs_leading_into_sink.append(1) costs_arcs_leading_into_sink.append(0) return sn_arcs_leading_into_sink, en_arcs_leading_into_sink, cap_arcs_leading_into_sink,\ costs_arcs_leading_into_sink
def main(): """ Main method of the worker :return: """ llogger = local_logger.LocalLogger() llogger.log_call(sys._getframe().f_code.co_name) args = Args() try: switch_command(str(args.get(1))) except KeyError: with indent(4, quote='>>'): puts(colored.red(str(args.get(1)) + " is not a valid command")) puts(colored.red("Type 'swarmrob help' for a command list"))
def join_docker_swarm(self, master_address, interface, join_token): llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) try: success = self._docker_env.swarm.join( remote_addrs=[master_address], join_token=join_token, advertise_addr=interface) if not success: raise DockerException( "Unable to join docker swarm with token " + join_token) except docker.errors.APIError: raise DockerException
def unregister_daemon_at_nameservice(self, host_ip): """ RPC method for unregistering the daemon at the nameservice :return: """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) try: ns = Pyro4.locateNS(host_ip) ns.remove(SWARMROBD_IDENTIFIER) except Pyro4.errors.NamingError: llogger.debug( "Status Daemon: Daemon is not running (Pyro4 NamingError)")
def switch_command(cmd): """ Switch command function of the Swarmrob Master CLI :return: """ llogger = local_logger.LocalLogger() llogger.log_call(sys._getframe().f_code.co_name) commands = { 'init': init_swarm, 'swarm_status': swarm_status, 'worker_status': worker_status, 'start_swarm': start_swarm_by_compose_file, 'help': show_help, } return commands[cmd]()
def add_env(self, env_key, env_value): """ Add a key/value pair of an environment variable to the service :param env_key: Key of the environment variable :param env_value: Value of the environment variable :return: """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) self._environment.update({ os.path.expandvars(str(env_key)): os.path.expandvars(str(env_value)) })
def parse_service_env(env_set, srv): """ Parse environment variables of the service :param env_set: Set of environment variables :param srv: Current single service of the EDF :return: Updated service object """ llogger = local_logger.LocalLogger() llogger.log_call(sys._getframe().f_code.co_name) for env in env_set: parsed_env = parse("{env.name}={env.value}", env) srv.add_env(parsed_env['env.name'], parsed_env['env.value']) llogger.debug("Parsed Environment Variable: %s=%s", parsed_env['env.name'], parsed_env['env.value']) return srv
def generate_source_worker_arcs(worker_count): llogger = local_logger.LocalLogger() llogger.log_call(sys._getframe().f_code.co_name) sn_arcs_out_of_source = [0] en_arcs_out_of_source = [1] cap_arcs_out_of_source = [1] costs_arcs_out_of_source = [0] wc = 2 for work_c in range(2, worker_count + 1): sn_arcs_out_of_source.append(0) en_arcs_out_of_source.append(wc) cap_arcs_out_of_source.append(1) costs_arcs_out_of_source.append(0) wc = work_c + 1 return wc, sn_arcs_out_of_source, en_arcs_out_of_source, cap_arcs_out_of_source, costs_arcs_out_of_source
def add_service(self, service_key, service_object): """ Add service to service composition :param service_key: Key of the service :param service_object: Object of the service :return: """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) llogger.debug("Add service: %s", service_key) llogger.debug("\n" + service_object.format_service_definition_as_table()) self._services.update({str(service_key): service_object}) self._allocation.update({str(service_key): None})
def get_boolean(self, section, option): """ Returns the value under section and option in the config file as a boolean :param section: Section Enum :param option: Option Enum :return: boolean value of setting or none """ llogger = local_logger.LocalLogger() llogger.log_method_call(self.__class__.__name__, sys._getframe().f_code.co_name) val = self.get(section, option) if val is None: return False if val.lower() == 'true' or val == '1': return True return False
def allocate_dynamic(services, workers, hardware_matrix, cost_matrix, capacity_matrix): llogger = local_logger.LocalLogger() llogger.log_call(sys._getframe().f_code.co_name) if len(workers) < 1: return None if capacity_matrix is None: capacity_matrix = numpy.ones((len(services), len(workers)), dtype=numpy.int).tolist() start_nodes, end_nodes, capacities, costs, supply = get_min_cost_flow_params( len(workers), len(services), hardware_matrix, cost_matrix, capacity_matrix) min_cost_flow = create_simple_min_flow_cost(start_nodes, end_nodes, capacities, costs, supply) return solve_using_min_flow_cost(min_cost_flow, services, workers)