def save_def(self, path=None): if path is None: path = self.project["path"] annotations = self.project.get("annotations") if annotations is None: annotations = {} p = { "id" : self.project["id"], "assembly" : self.project["assembly"], "files" : [os.path.relpath(fpath, path) for fpath in self.project["files"]], "storage_objects" : [o for o in self.project["storage_objects"]], "db" : os.path.relpath(self.project["db"], path), "annotations" : annotations } for key in self.project: if key not in ["id", "assembly", "files", "db", "annotations"]: p[key] = self.project[key] with open(os.path.join(path, "project.conf"), "w") as f: json.dump(p, f, indent=4, sort_keys=True) temp_path = self.project["temp_path"] if os.path.exists(temp_path): # for debuging purposes with open(os.path.join(temp_path, "project.conf"), "w") as f: json.dump(Data.create(self.project).to_native(), f, indent=4, sort_keys=True)
def __init__(self, rule, base_path=None, platform=None): rule = Data.create(rule) self.on = rule.get("on", {}) if isinstance(self.on, basestring): self.on = dict(task=self.on) if platform is not None: self.on["platform"] = platform self.dels = rule.get("del", default=Data.list) if not Data.is_list(self.dels): raise Exception( "Expected a list of strings for del operations of rule: {}". format(repr(rule))) for k in self.dels: if not isinstance(k, basestring): raise Exception( "Expected a list of strings for del operations of rule: {}" .format(repr(rule))) self.set = rule.get("set", default=Data.list) if not Data.is_list(self.dels): raise Exception( "Expected a list of tuples [key, value] for set operations of rule: {}" .format(repr(rule))) for s in self.set: if not Data.is_list(s) or len(s) != 2: raise Exception( "Expected a list of tuples [key, value] for set operations of rule: {}" .format(repr(rule))) self.merge = rule.get("merge") if isinstance(self.merge, basestring): if not os.path.isabs(self.merge): if base_path is None: raise Exception( "Configuration rule merge path should be absolute path: {}" .format(self.merge)) else: self.merge = os.path.join(base_path, self.merge) if not os.path.isfile(self.merge): raise Exception( "Configuration rule merge path not found: {}".format( self.merge)) self.merge = ConfigLoader(os.path.join(base_path or "", self.merge)).load() if self.merge is not None and not Data.is_element(self.merge): raise Exception( "Expected a dictionary for merge operation of rule: {}".format( repr(rule)))
def initialize(conf=None, format=None, datefmt=None, level=None): """ Initialize the logging system. If conf is a dictionary then the parameters considered for configuration are: - format: Logger format - datefmt: Date format - loggers: list of tuples (name, conf) to configure loggers If conf is a list then only the loggers are configured. If conf is an string then the default logger is configured for the logging level. """ global _initialized if conf is None: conf = Data.element() elif not isinstance(conf, basestring): conf = Data.create(conf) if Data.is_list(conf): loggers_conf = conf conf = Data.element() elif Data.is_element(conf): loggers_conf = conf.get("loggers", default=Data.list) elif isinstance(conf, basestring): loggers_conf = Data.list([["", conf]]) conf = Data.element() format = format or conf.get("format", _DEFAULT_FORMAT) datefmt = datefmt or conf.get("datefmt", _DEFAULT_DATEFMT) logging.basicConfig(format=format, datefmt=datefmt) for (log_name, log_conf) in loggers_conf: init_logger(log_name, conf=log_conf) if level is not None: init_logger("", conf=level) _initialized = True
def init_logger(logger, conf): """ Initializa a logger from configuration. Configuration can be: - An string referring to the log level - A dictionary with the following parameters: - level: log level - handlers: List of log handlers or just a handler. Each handler have the following parameters: - type - ...: each handler type has a set of parameters Supported handlers: - smtp: Send logs by email. Parameters: - host - port (optional) - user - pass - from - to - subject - level - format: can be a simple string or a list of strings that will be joint with '\n' """ if isinstance(logger, basestring): logger = get_logger(logger) if isinstance(conf, basestring): conf = Data.element(dict(level=conf)) else: conf = Data.create(conf) level = conf.get("level") if level is not None: logger.setLevel(get_level(level)) handlers_conf = conf.get("handlers", default=Data.list) if Data.is_element(handlers_conf): handlers_conf = Data.list([handlers_conf]) for handler_conf in handlers_conf: handler = get_handler(logger, handler_conf) logger.addHandler(handler)
def __init__(self, rule, base_path=None, platform=None): rule = Data.create(rule) self.on = rule.get("on", {}) if isinstance(self.on, basestring): self.on = dict(task=self.on) if platform is not None: self.on["platform"] = platform self.dels = rule.get("del", default=Data.list) if not Data.is_list(self.dels): raise Exception("Expected a list of strings for del operations of rule: {}".format(repr(rule))) for k in self.dels: if not isinstance(k, basestring): raise Exception("Expected a list of strings for del operations of rule: {}".format(repr(rule))) self.set = rule.get("set", default=Data.list) if not Data.is_list(self.dels): raise Exception("Expected a list of tuples [key, value] for set operations of rule: {}".format(repr(rule))) for s in self.set: if not Data.is_list(s) or len(s) != 2: raise Exception("Expected a list of tuples [key, value] for set operations of rule: {}".format(repr(rule))) self.merge = rule.get("merge") if isinstance(self.merge, basestring): if not os.path.isabs(self.merge): if base_path is None: raise Exception("Configuration rule merge path should be absolute path: {}".format(self.merge)) else: self.merge = os.path.join(base_path, self.merge) if not os.path.isfile(self.merge): raise Exception("Configuration rule merge path not found: {}".format(self.merge)) self.merge = ConfigLoader(os.path.join(base_path or "", self.merge)).load() if self.merge is not None and not Data.is_element(self.merge): raise Exception("Expected a dictionary for merge operation of rule: {}".format(repr(rule)))
def _partition_task(self, task): """ Partition the input data for a task into work items. It is an iterator of WorkItems. """ # Calculate input sizes and the minimum wsize psizes = [] mwsize = sys.maxint for port in task.in_ports: psize = 0 for data_ref in port.data.refs: port_data = task.platform.data.open_port_data( self.name, data_ref) data_ref.size = port_data.size() psize += data_ref.size port.data.size = psize psizes += [psize] pwsize = port.wsize self._log.debug("[{}] {}: size={}, wsize={}".format( self.name, port.cname, psize, pwsize)) if pwsize < mwsize: mwsize = pwsize if len(psizes) == 0: # Submit a task for the module without input ports information workitem = WorkItemNode(parent=task, index=0, namespace=task.namespace) out_ports = [] for port in task.out_ports: port_data = port.data.partition() out_ports += [dict(name=port.name, data=port_data.to_native())] workitem.partition["ports"] = Data.create({"out": out_ports}) yield workitem else: # Check whether all inputs have the same size psize = psizes[0] for i in xrange(1, len(psizes)): if psizes[i] != psize: psize = -1 break # Partition the data on input ports if psize == -1: num_partitions = 1 self._log.warn( "[{}] Unable to partition a task with input ports of different size" .format(task.cname)) else: if mwsize == 0: num_partitions = 1 self._log.warn("[{}] Empty port, no partitioning".format( task.cname)) else: num_partitions = int(math.ceil(psize / float(mwsize))) maxpar = task.maxpar self._log.debug("[{}] {}: maxpar={}".format( self.name, task.cname, maxpar)) if maxpar > 0 and num_partitions > maxpar: mwsize = int(math.ceil(psize / float(maxpar))) num_partitions = int(math.ceil(psize / float(mwsize))) self._log.debug( "[{}] {}: num_par={}, psize={}, mwsize={}".format( self.name, task.cname, num_partitions, psize, mwsize)) start = 0 for i in xrange(num_partitions): workitem = WorkItemNode(parent=task, index=i, namespace=task.namespace) end = min(start + mwsize, psize) size = end - start in_ports = [] for port in task.in_ports: #workitem.in_port_data.append((port.name, port.data.slice(start, size))) port_data = port.data.slice(start, size) in_ports += [ dict(name=port.name, data=port_data.to_native()) ] out_ports = [] for port in task.out_ports: #workitem.out_port_data.append((port.name, port.data.partition())) port_data = port.data.partition() out_ports += [ dict(name=port.name, data=port_data.to_native()) ] workitem.partition["ports"] = Data.create({ "in": in_ports, "out": out_ports }) self._log.debug( "[{}] {}[{:04d}]: start={}, end={}, size={}".format( self.name, task.cname, i, start, end, size)) start += mwsize yield workitem
def process_result_value(self, value, dialect): return Data.create(json.loads(value)) if value is not None else None
def prepare(self, case, task, index): execution = task.execution exec_conf = execution.conf if exec_conf is None: exec_conf = Data.element() if "script_path" not in exec_conf: raise MissingValueError("script_path") script_path = exec_conf["script_path"] lang = exec_conf.get("language", "python") case_conf = case.conf.clone().expand_vars() # Environment variables env = Data.element() #for k, v in os.environ.items(): # env[k] = v env.merge(task.conf.get(rtconf.TASK_ENV)) env.merge(exec_conf.get("env")) # Default module script path platform_project_path = task.conf.get(rtconf.PROJECT_PATH, case.project.path) flow_path = os.path.abspath(os.path.dirname(task.flow_path)) flow_rel_path = os.path.relpath(flow_path, case.project.path) platform_script_path = os.path.join(platform_project_path, flow_rel_path, script_path) env[ENV_PROJECT_PATH] = platform_project_path env[ENV_FLOW_PATH] = flow_rel_path env[ENV_SCRIPT_PATH] = script_path env[ENV_PLATFORM_SCRIPT_PATH] = platform_script_path script = [] sources = task.conf.get(rtconf.TASK_SOURCES, default=Data.list) if isinstance(sources, basestring): sources = Data.list([sources]) for source in sources: script += ['source "{}"'.format(source)] if lang == "python": virtualenv = task.conf.get(rtconf.TASK_PYTHON_VIRTUALENV) if virtualenv is not None: #script += ["set -x"] #script += ["echo Activating virtualenv {} ...".format(virtualenv)] script += ['source "{}"'.format(os.path.join(virtualenv, "bin", "activate"))] #script += ["set +x"] #script += ["echo Running workitem ..."] cmd = [task.conf.get(rtconf.TASK_PYTHON_BIN, "python")] cmd += ["${}".format(ENV_PLATFORM_SCRIPT_PATH)] lib_path = task.conf.get(rtconf.TASK_PYTHON_LIBS) if lib_path is not None: if Data.is_list(lib_path): lib_path = ":".join(lib_path) if "PYTHONPATH" in env: env["PYTHONPATH"] = lib_path + ":" + env["PYTHONPATH"] else: env["PYTHONPATH"] = lib_path else: raise LanguageError(lang) cmd += ["-D", "case={}".format(case.name), "-D", "task={}".format(task.cname), "-D", "index={}".format(index)] #for key, value in self._storage_conf(workitem.case.engine.storage.basic_conf): # cmd += ["-D", "storage.{}={}".format(key, value)] for key, value in self._plain_conf(Data.create(task.platform.data.context_conf(CTX_EXEC))): cmd += ["-D", "data.{}={}".format(key, value)] for key, value in self._plain_conf(task.platform.storage.context_conf(CTX_EXEC)): cmd += ["-D", "storage.{}={}".format(key, value)] script += [" ".join(cmd)] return "\n".join(script), env.to_native()
def unmarshall(self, raw): value = json.loads(raw) if self.__enhanced and isinstance(value, (list, dict)): value = Data.create(value) return value
def unmarshall(self, raw): raw = raw.replace(r"\n", "\n") value = pickle.loads(raw) if self.__enhanced and isinstance(value, (list, dict)): value = Data.create(value) return value
def _partition_task(self, task): """ Partition the input data for a task into work items. It is an iterator of WorkItems. """ # Calculate input sizes and the minimum wsize psizes = [] mwsize = sys.maxint for port in task.in_ports: psize = 0 for data_ref in port.data.refs: port_data = task.platform.data.open_port_data(self.name, data_ref) data_ref.size = port_data.size() psize += data_ref.size port.data.size = psize psizes += [psize] pwsize = port.wsize self._log.debug("[{}] {}: size={}, wsize={}".format(self.name, port.cname, psize, pwsize)) if pwsize < mwsize: mwsize = pwsize if len(psizes) == 0: # Submit a task for the module without input ports information workitem = WorkItemNode(parent=task, index=0, namespace=task.namespace) out_ports = [] for port in task.out_ports: port_data = port.data.partition() out_ports += [dict( name=port.name, data=port_data.to_native())] workitem.partition["ports"] = Data.create({"out" : out_ports}) yield workitem else: # Check whether all inputs have the same size psize = psizes[0] for i in xrange(1, len(psizes)): if psizes[i] != psize: psize = -1 break # Partition the data on input ports if psize == -1: num_partitions = 1 self._log.warn("[{}] Unable to partition a task with input ports of different size".format(task.cname)) else: if mwsize == 0: num_partitions = 1 self._log.warn("[{}] Empty port, no partitioning".format(task.cname)) else: num_partitions = int(math.ceil(psize / float(mwsize))) maxpar = task.maxpar self._log.debug("[{}] {}: maxpar={}".format(self.name, task.cname, maxpar)) if maxpar > 0 and num_partitions > maxpar: mwsize = int(math.ceil(psize / float(maxpar))) num_partitions = int(math.ceil(psize / float(mwsize))) self._log.debug("[{}] {}: num_par={}, psize={}, mwsize={}".format( self.name, task.cname, num_partitions, psize, mwsize)) start = 0 for i in xrange(num_partitions): workitem = WorkItemNode(parent=task, index=i, namespace=task.namespace) end = min(start + mwsize, psize) size = end - start in_ports = [] for port in task.in_ports: #workitem.in_port_data.append((port.name, port.data.slice(start, size))) port_data = port.data.slice(start, size) in_ports += [dict( name=port.name, data=port_data.to_native())] out_ports = [] for port in task.out_ports: #workitem.out_port_data.append((port.name, port.data.partition())) port_data = port.data.partition() out_ports += [dict( name=port.name, data=port_data.to_native())] workitem.partition["ports"] = Data.create({"in" : in_ports, "out" : out_ports}) self._log.debug("[{}] {}[{:04d}]: start={}, end={}, size={}".format(self.name, task.cname, i, start, end, size)) start += mwsize yield workitem
def __init__(self): # Get task key and storage configuration cmd_conf = OptionsConfig(required=["case", "task", "index", "data.type", "storage.type"]) # Register signals self._signal_names = {} for signame in [x for x in dir(signal) if x.startswith("SIG")]: try: signum = getattr(signal, signame) signal.signal(signum, self.__signal_handler) self._signal_names[signum] = signame except: pass # command line configuration case_name = cmd_conf["case"] task_cname = cmd_conf["task"] workitem_index = cmd_conf["index"] # initialize the data provider provider_conf = cmd_conf["data"] self._provider = data_provider_factory.create(provider_conf) self._provider.start() # initialize storage storage_conf = cmd_conf["storage"] self.storage = storage_factory.create(storage_conf) self.storage = self.storage.get_container(case_name) # load the module and task descriptors task_desc = self._provider.load_task(case_name, task_cname) workitem_desc = self._provider.load_workitem(case_name, task_cname, workitem_index) partition = workitem_desc["partition"] # setup task configuration self.conf = Data.create(task_desc["conf"]) self.conf["__task_index"] = workitem_index self.conf.expand_vars() # setup task attributes self.case = workitem_desc["case"] self.task = workitem_desc["task"] self.id = workitem_desc["cname"] self.name = workitem_desc["name"] self.index = workitem_index # initialize decorators self._main = None self._sources = [] self._foreach = None self._begin = None self._end = None self._start_time = 0 self._end_time = self._start_time # intialize task logging log_conf = self.conf.get("logging") logger.initialize(log_conf) self.logger = logger.get_logger(self.name) self.logger.debug("Task descriptor: {}".format(Data.create(task_desc))) self.logger.debug("WorkItem descriptor: {}".format(Data.create(workitem_desc))) # Initialize input stream self._stream = Stream(self._provider, task_desc["stream"]) # Initialize ports self._ports = {} self._in_ports = [] self._out_ports = [] self._open_ports = {} if "ports" in task_desc and "ports" in partition: port_descriptors = Data.create(task_desc["ports"]) for port_desc in port_descriptors.get("in", default=list): port_desc["mode"] = PORT_MODE_IN self._ports[port_desc["name"]] = port_desc self._in_ports += [port_desc] for port_desc in port_descriptors.get("out", default=list): port_desc["mode"] = PORT_MODE_OUT self._ports[port_desc["name"]] = port_desc self._out_ports += [port_desc] port_descriptors = Data.create(partition["ports"]) for port_desc in port_descriptors.get("in", default=list): task_port_desc = self._ports[port_desc["name"]] task_port_desc["data"] = port_desc["data"] for port_desc in port_descriptors.get("out", default=list): task_port_desc = self._ports[port_desc["name"]] task_port_desc["data"] = port_desc["data"] # Get hostname try: import socket self.hostname = socket.gethostname() except: self.hostname = "unknown" # The context field is free to be used by the task user to # save variables related with the whole task life cycle. # By default it is initialized with a dictionary but can be # overwrote with any value by the user. Wok will never use it. self.context = {}
def get_project_conf(conf, project, key, default=None, dtype=None): value = conf.get(key, default=default, dtype=dtype) if not Data.is_element(project): project = Data.create(project) return project.get(key, default=value, dtype=dtype)
class NativeCommmandBuilder(CommmandBuilder): def _plain_conf(self, value, path=None): if path is None: path = [] if not Data.is_element(value): yield (".".join(path), value) else: for key in value.keys(): for k, v in self._plain_conf(value[key], path + [key]): yield (k, v) def prepare(self, case, task, index): execution = task.execution exec_conf = execution.conf if exec_conf is None: exec_conf = Data.element() if "script_path" not in exec_conf: raise MissingValueError("script_path") script_path = exec_conf["script_path"] lang = exec_conf.get("language", "python") case_conf = case.conf.clone().expand_vars() # Environment variables env = Data.element() #for k, v in os.environ.items(): # env[k] = v env.merge(task.conf.get(rtconf.TASK_ENV)) env.merge(exec_conf.get("env")) # Default module script path platform_project_path = task.conf.get(rtconf.PROJECT_PATH, case.project.path) flow_path = os.path.abspath(os.path.dirname(task.flow_path)) flow_rel_path = os.path.relpath(flow_path, case.project.path) platform_script_path = os.path.join(platform_project_path, flow_rel_path, script_path) env[ENV_PROJECT_PATH] = platform_project_path env[ENV_FLOW_PATH] = flow_rel_path env[ENV_SCRIPT_PATH] = script_path env[ENV_PLATFORM_SCRIPT_PATH] = platform_script_path script = [] sources = task.conf.get(rtconf.TASK_SOURCES, default=Data.list) if isinstance(sources, basestring): sources = Data.list([sources]) for source in sources: script += ['source "{}"'.format(source)] if lang == "python": virtualenv = task.conf.get(rtconf.TASK_PYTHON_VIRTUALENV) if virtualenv is not None: #script += ["set -x"] #script += ["echo Activating virtualenv {} ...".format(virtualenv)] script += [ 'source "{}"'.format( os.path.join(virtualenv, "bin", "activate")) ] #script += ["set +x"] #script += ["echo Running workitem ..."] cmd = [task.conf.get(rtconf.TASK_PYTHON_BIN, "python")] cmd += ["${}".format(ENV_PLATFORM_SCRIPT_PATH)] lib_path = task.conf.get(rtconf.TASK_PYTHON_LIBS) if lib_path is not None: if Data.is_list(lib_path): lib_path = ":".join(lib_path) if "PYTHONPATH" in env: env["PYTHONPATH"] = lib_path + ":" + env["PYTHONPATH"] else: env["PYTHONPATH"] = lib_path else: raise LanguageError(lang) cmd += [ "-D", "case={}".format(case.name), "-D", "task={}".format(task.cname), "-D", "index={}".format(index) ] #for key, value in self._storage_conf(workitem.case.engine.storage.basic_conf): # cmd += ["-D", "storage.{}={}".format(key, value)] for key, value in self._plain_conf( Data.create(task.platform.data.context_conf(CTX_EXEC))): cmd += ["-D", "data.{}={}".format(key, value)] for key, value in self._plain_conf( task.platform.storage.context_conf(CTX_EXEC)): cmd += ["-D", "storage.{}={}".format(key, value)] script += [" ".join(cmd)] return "\n".join(script), env.to_native()
def __init__(self): # Get task key and storage configuration cmd_conf = OptionsConfig( required=["case", "task", "index", "data.type", "storage.type"]) # Register signals self._signal_names = {} for signame in [x for x in dir(signal) if x.startswith("SIG")]: try: signum = getattr(signal, signame) signal.signal(signum, self.__signal_handler) self._signal_names[signum] = signame except: pass # command line configuration case_name = cmd_conf["case"] task_cname = cmd_conf["task"] workitem_index = cmd_conf["index"] # initialize the data provider provider_conf = cmd_conf["data"] self._provider = data_provider_factory.create(provider_conf) self._provider.start() # initialize storage storage_conf = cmd_conf["storage"] self.storage = storage_factory.create(storage_conf) self.storage = self.storage.get_container(case_name) # load the module and task descriptors task_desc = self._provider.load_task(case_name, task_cname) workitem_desc = self._provider.load_workitem(case_name, task_cname, workitem_index) partition = workitem_desc["partition"] # setup task configuration self.conf = Data.create(task_desc["conf"]) self.conf["__task_index"] = workitem_index self.conf.expand_vars() # setup task attributes self.case = workitem_desc["case"] self.task = workitem_desc["task"] self.id = workitem_desc["cname"] self.name = workitem_desc["name"] self.index = workitem_index # initialize decorators self._main = None self._sources = [] self._foreach = None self._begin = None self._end = None self._start_time = 0 self._end_time = self._start_time # intialize task logging log_conf = self.conf.get("logging") logger.initialize(log_conf) self.logger = logger.get_logger(self.name) self.logger.debug("Task descriptor: {}".format(Data.create(task_desc))) self.logger.debug("WorkItem descriptor: {}".format( Data.create(workitem_desc))) # Initialize input stream self._stream = Stream(self._provider, task_desc["stream"]) # Initialize ports self._ports = {} self._in_ports = [] self._out_ports = [] self._open_ports = {} if "ports" in task_desc and "ports" in partition: port_descriptors = Data.create(task_desc["ports"]) for port_desc in port_descriptors.get("in", default=list): port_desc["mode"] = PORT_MODE_IN self._ports[port_desc["name"]] = port_desc self._in_ports += [port_desc] for port_desc in port_descriptors.get("out", default=list): port_desc["mode"] = PORT_MODE_OUT self._ports[port_desc["name"]] = port_desc self._out_ports += [port_desc] port_descriptors = Data.create(partition["ports"]) for port_desc in port_descriptors.get("in", default=list): task_port_desc = self._ports[port_desc["name"]] task_port_desc["data"] = port_desc["data"] for port_desc in port_descriptors.get("out", default=list): task_port_desc = self._ports[port_desc["name"]] task_port_desc["data"] = port_desc["data"] # Get hostname try: import socket self.hostname = socket.gethostname() except: self.hostname = "unknown" # The context field is free to be used by the task user to # save variables related with the whole task life cycle. # By default it is initialized with a dictionary but can be # overwrote with any value by the user. Wok will never use it. self.context = {}
def build_conf(self): "Called from run() to prepare configuration before expansion of values" conf_files = self.required_conf_files user_conf_file = os.path.join(self.conf_path, "user.conf") if os.path.exists(user_conf_file): conf_files += [user_conf_file] conf_files += self.user_conf_files # Check that configuration files exist missing_conf_files = [cf for cf in conf_files if not os.path.exists(cf)] if len(missing_conf_files) > 0: self.log.error("Configuration files not found:\n{}".format( "\n".join(" {}".format(cf) for cf in missing_conf_files))) exit(-1) # Build the configuration self.conf_builder = ConfigBuilder() if self.instance_name is None: if self.workspace is not None: self.instance_name = self.workspace else: self.instance_name = datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f") self.conf_builder.add_value("wok.instance.name", self.instance_name) # Files for cf in conf_files: self.conf_builder.add_file(cf) # General conf self.conf_builder.add_value("workspace", self.workspace) self.conf_builder.add_value("workflows_path", self.workflows_path) if self.max_cores > 0: self.conf_builder.add_value("wok.platform.jobs.max_cores", self.max_cores) if self.args.log_level is not None: self.conf_builder.add_value("log.level", self.args.log_level) self.conf_builder.add_value("wok.engine", self.args.log_level) # Incorporate user and extra configuration data for data in self.extra_conf_data + self.user_conf_data: try: pos = data.index("=") key = data[0:pos] value = data[pos+1:] try: v = json.loads(value) except: v = value self.conf_builder.add_value(key, Data.create(v)) except: raise Exception("Wrong configuration data: KEY=VALUE expected but found '{}'".format(data))