Exemple #1
0
    def _process_leases(cls, leases):
        from datetime import datetime
        import time
        import dateutil.parser 
        import calendar
        ret = list()
        try:
            for lease in leases:
                lease['resource'] = lease.pop('component_id')
                lease['slice']    = lease.pop('slice_id')

                # UTC YYYY-MM-DD hh:mm:ss -> timestamp
                Log.tmp("PARSING - convert UTC %s to timestamp", lease['start_time'])
                lease['start_time'] = calendar.timegm(dateutil.parser.parse(lease['start_time']).utctimetuple())
                Log.tmp("PARSING - convert UTC to timestamp %s", lease['start_time'])
                lease['duration'] = int(lease['duration'])
                if 'end_time' in lease:
                    lease['end_time'] = int(lease['end_time'])
                if not 'end_time' in lease and set(['start_time', 'duration']) <= set(lease.keys()):
                    lease['end_time'] = lease['start_time'] + lease['duration'] * cls.get_grain()
                elif not 'duration' in lease and  set(lease.keys()) <= set(['start_time', 'end_time']):
                    lease['duration'] = (lease['end_time'] - lease['start_time']) / cls.get_grain()

                # XXX GRANULARITY Hardcoded for the moment
                if 'granularity' not in lease:
                    lease['granularity'] = cls.get_grain() 

                ret.append(lease)
        except Exception, e:
            print "EEE::", e
            import traceback
            traceback.print_exc()
Exemple #2
0
 def send(self, record):
     """
     \brief calls the parent callback with the record passed in parameter
     """
     if self.identifier:
         Log.record("[#%04d] [ %r ]" % (self.identifier, record))
     self.callback(record)
Exemple #3
0
    def import_file_h(self, directory, platform, gateway_type):
        """
        Import a .h file (see manifold.metadata/*.h)
        Args:
            directory: A String instance containing directory storing the .h files
                Example: STATIC_ROUTES_DIR = "/usr/share/manifold/metadata/"
            platform: A String instance containing the name of the platform
                Examples: "ple", "senslab", "tdmi", "omf", ...
            gateway_types: A String instnace containing the type of the Gateway
                Examples: "SFA", "XMLRPC", "MaxMind", "tdmi"
                See:
                    sqlite3 /var/myslice/db.sqlite
                    > select gateway_type from platform;
        Returns:
            A list of Announce instances, each Announce embeds a Table instance.
            This list may be empty.
        """
        # Check path
        filename = os.path.join(directory, "%s.h" % gateway_type)
        if not os.path.exists(filename):
            filename = os.path.join(directory,
                                    "%s-%s.h" % (gateway_type, platform))
            if not os.path.exists(filename):
                Log.debug(
                    "Metadata file '%s' not found (platform = %r, gateway_type = %r)"
                    % (filename, platform, gateway_type))
                return []

        # Read input file
        Log.debug("Platform %s: Processing %s" % (platform, filename))
        return import_file_h(filename, platform)
Exemple #4
0
    def parse_manifest(cls, rspec, rspec_version = 'GENI 3', slice_urn = None, start_time = None):
        rspec = RSpec(rspec, version=rspec_version)

        _resources   = cls._get_resources(rspec)
        _nodes       = cls._get_nodes(rspec)
        # XXX Not supported yet
        #_channels    = cls._get_channels(rspec)
        #_links       = cls._get_links(rspec)
        _leases      = cls._get_leases(rspec)

        # XXX Until WiLab supports Leases
        end_time     = cls._get_expiration(rspec)
        if start_time is None:
            start_time = 1388530800


        resources = list()
        resources.extend(cls._process_resources(_resources))
        resources.extend(cls._process_nodes(_nodes))
        #resources.extend(cls._process_channels(_channels))
        #resources.extend(cls._process_links(_links))

        Log.warning("XXX Until WiLab supports Leases")
        # XXX Generate Leases based on the Resources instead of Leases
        leases = cls._process_leases(resources, slice_urn, start_time, end_time)
        return {'resource': resources, 'lease': leases }
Exemple #5
0
    def _process_lease(cls, lease):
        # Keep only necessary information in leases
        new_lease = dict()
        authority = 'urn:publicid:IDN+wilab2.ilabt.iminds.be+authority+cm'
        if (not 'component_manager_id' in lease) or (lease['component_manager_id'] != authority):
            Log.warning("Authority is not WiLab - Ignore lease = ",lease)
            #return None
        new_lease['resource'] = lease.pop('component_id')
        new_lease['lease_id'] = None
        new_lease['slice']    = lease.pop('slice_urn')
        new_lease['start_time'] = int(lease['start_time'])
        new_lease['duration'] = int(lease['duration'])
        if 'end_time' in lease:
            new_lease['end_time'] = int(lease['end_time'])
        if not 'end_time' in lease and set(['start_time', 'duration']) <= set(lease.keys()):
            new_lease['end_time'] = lease['start_time'] + lease['duration'] * cls.get_grain()
        elif not 'duration' in lease and  set(lease.keys()) <= set(['start_time', 'end_time']):
            new_lease['duration'] = (lease['end_time'] - lease['start_time']) / cls.get_grain()

        # XXX GRANULARITY Hardcoded for the moment
        if 'granularity' not in lease:
            new_lease['granularity'] = cls.get_grain()
        else:
            new_lease['granularity'] = lease['granularity']

        return new_lease
Exemple #6
0
def make_sub_graph(metadata, relevant_fields):
    """
    \brief Create a reduced graph based on g.
        We only keep vertices having a key in relevant_fields
    \param g A DiGraph instance (the full 3nf graph)
    \param relevant_fields A dictionnary {Table: Fields}
        indicating for each Table which Field(s) are relevant.
    \return The corresponding sub-3nf-graph
    """
    g = metadata.graph
    sub_graph = DiGraph()
    copy = dict()
    vertices_to_keep = set(relevant_fields.keys())

    # Copy relevant vertices from g
    for u in vertices_to_keep:
        copy_u = Table.make_table_from_fields(u, relevant_fields[u])
        copy[u] = copy_u
        sub_graph.add_node(copy_u)  # no data on nodes

    # Copy relevant arcs from g
    for u, v in g.edges():
        try:
            copy_u, copy_v = copy[u], copy[v]
        except:
            continue

        sub_graph.add_edge(copy_u, copy_v, deepcopy(g.edge[u][v]))
        Log.debug("Adding copy of : %s" % metadata.print_arc(u, v))

    return sub_graph
Exemple #7
0
    def __init__(self, allowed_capabilities = None):
        """
        Create an Interface instance.
        Args:
            platforms: A list of Platforms.
            allowed_capabilities: A Capabilities instance or None
        """
        # Register the list of Gateways
        Log.info("Registering gateways")
        register_gateways()

        # self.platforms is list(dict) where each dict describes a platform.
        # See platform table in the Storage.
        self.platforms = Storage.execute(Query().get("platform").filter_by("disabled", "=", False)) #, format = "object")

        # self.allowed_capabilities is a Capabilities instance (or None)
        self.allowed_capabilities = allowed_capabilities

        # self.data is {String : list(Announce)} dictionnary mapping each
        # platform name (= namespace) with its corresponding Announces.
        self.metadata = dict() 

        # self.gateways is a {String : Gateway} which maps a platform name to
        # the appropriate Gateway instance.
        self.gateways = dict()

        self.policy = Policy(self)

        self.boot()
Exemple #8
0
    def get_dialect_and_field_info(self, table):
        t = self.config[table]
        filename = t['filename']

        with open(filename, 'rb') as f:
            sample = f.read(1024)
            dialect = csv.Sniffer().sniff(sample)
            self.has_headers[table] = csv.Sniffer().has_header(sample)

        HAS_FIELDS_OK, HAS_FIELDS_KO, HAS_FIELDS_ERR = range(1, 4)
        HAS_TYPES_OK, HAS_TYPES_KO, HAS_TYPES_ERR = range(1, 4)

        has_fields = HAS_FIELDS_KO
        has_types = HAS_TYPES_KO

        if isinstance(t, dict):
            if 'fields' in t:
                try:
                    field_names, field_types = [], []
                    for name, type in t['fields']:
                        field_names.append(name)
                        has_fields = HAS_FIELDS_OK
                        field_types.append(type)
                        has_types = HAS_TYPES_OK
                except Exception, e:
                    Log.warning(
                        "Wrong format for fields in platform configuration")
                    has_fields = HAS_FIELDS_ERR
                    has_types = HAS_TYPES_ERR
Exemple #9
0
    def manifold_to_sfa_leases(cls, leases, slice_urn):
        from datetime import datetime
        sfa_leases = []
        for lease in leases:
            sfa_lease = dict()
            # sfa_lease_id = 
            sfa_lease['component_id'] = lease['resource']
            sfa_lease['slice_id']     = slice_urn
            sfa_lease['start_time']   = lease['start_time']
            
            grain = cls.get_grain() # in seconds
            min_duration = cls.get_min_duration() # in seconds
            
            # We either need end_time or duration
            # end_time is choosen if both are specified !
            if 'end_time' in lease:
                sfa_lease['end_time'] = lease['end_time']

                duration =  (int(lease['end_time']) - int(lease['start_time'])) / grain
                if duration < min_duration:
                    raise Exception, 'duration < min_duration'
                sfa_lease['duration'] = duration
            elif 'duration' in lease:
                sfa_lease['duration'] = lease['duration']
                sfa_lease['end_time'] = lease['start_time'] + lease['duration']
            else:
                raise Exception, 'Lease not specifying neither end_time nor duration'
            # timestamp -> UTC YYYY-MM-DD hh:mm:ss
            Log.tmp("manifold to sfa - convert timestamp %s to UTC", sfa_lease['start_time'])
            sfa_lease['start_time'] = datetime.utcfromtimestamp(int(sfa_lease['start_time'])).strftime('%Y-%m-%d %H:%M:%S')
            Log.tmp("manifold to sfa - convert timestamp to UTC %s", sfa_lease['start_time'])
            sfa_lease['end_time'] = datetime.utcfromtimestamp(int(sfa_lease['end_time'])).strftime('%Y-%m-%d %H:%M:%S')
            sfa_leases.append(sfa_lease)
        return sfa_leases
Exemple #10
0
    def optimize_projection(self, fields):
        """
        Propagate a SELECT clause through a FROM Node.
        Args:
            fields: A set of String instances (queried fields).
        """
        if self.capabilities.projection or self.capabilities.fullquery:
            self.query.select().select(fields)

        if self.capabilities.projection:
            # Push fields into the From node
            return self
        else:
            # Provided fields is set to None if it corresponds to SELECT *
            provided_fields = self.get_query().get_select()

            # Test whether this From node can return every queried Fields.
            if provided_fields and fields - provided_fields:
                Log.warning(
                    "From::optimize_projection: some requested fields (%s) are not provided by {%s} From node. Available fields are: {%s}"
                    % (', '.join(list(fields - provided_fields)),
                       self.get_query().get_from(), ', '.join(
                           list(provided_fields))))

            # If this From node returns more Fields than those explicitely queried
            # (because the projection capability is not enabled), create an additional
            # Projection Node above this From Node in order to guarantee that
            # we only return queried fields
            if not provided_fields or provided_fields - fields:
                return Projection(self, fields)
                #projection.query = self.query.copy().filter_by(filter) # XXX
            return self
Exemple #11
0
    def execute_query(self, namespace, query, annotations, is_deferred=False):
        if annotations:
            user = annotations.get('user', None)
        else:
            user = None

        # Code duplication with Interface() class
        if namespace is not None:
            allowed_platforms = [
                p['platform'] for p in self.platforms
                if p['platform'] == namespace
            ]
        else:
            allowed_platforms = [p['platform'] for p in self.platforms]

        qp = QueryPlan()
        qp.build(query, self.g_3nf, allowed_platforms,
                 self.allowed_capabilities, user)
        Log.tmp("QUERY PLAN")
        qp.dump()

        self.instanciate_gateways(qp, user)
        Log.info("QUERY PLAN:\n%s" % (qp.dump()))

        return self.execute_query_plan(namespace, query, annotations, qp,
                                       is_deferred)
Exemple #12
0
 def register(package):
     prefix = package.__name__ + "."
     for importer, modname, ispkg in pkgutil.iter_modules(package.__path__, prefix):
         try:
             module = __import__(modname, fromlist="dummy")
         except Exception, e:
             Log.info("Could not load %s : %s" % (modname, e))
Exemple #13
0
    def right_callback(self, record):
        """
        \brief Process records received from the right child
        \param record A dictionary representing the received record 
        """
        if record.is_last():
            self._on_right_done()
            return

        # Skip records missing information necessary to join
#DEPRECATED|        if self.predicate.value not in record or not record[self.predicate.value]:
#Log.tmp("%s <= %s" %(set(self.predicate.get_value()) , set(record.keys())))
        if not set([self.predicate.get_value()]) <= set(record.keys()) \
        or Record.is_empty_record(record, set([self.predicate.get_value()])):
            Log.warning("Missing LEFTJOIN predicate %s in right record %r: ignored" % \
                    (self.predicate, record))
            return

        # We expect to receive information about keys we asked, and only these,
        # so we are confident the key exists in the map
        # XXX Dangers of duplicates ?
        key = Record.get_value(record, self.predicate.value)
        left_records = self.left_map.get(key, None)
        if left_records:
            for left_record in self.left_map[key]:
                left_record.update(record)
                self.send(left_record)

            del self.left_map[key]
Exemple #14
0
    def boot(self):
        """
        Boot the Interface (prepare metadata, etc.).
        """
        assert isinstance(self.platforms, list), "Invalid platforms"

        for platform in self.platforms:
            # Get platform configuration
            platform_config = platform['config']
            if platform_config:
                platform_config = json.loads(platform_config)

            platform_name = platform['platform']
            args = [None, platform_name, None, platform_config, {}, None]
            gateway = Gateway.get(platform['gateway_type'])(*args)
            try:
                announces = gateway.get_metadata()
            except Exception, e:
                # ROUTERV2
                Log.warning("Cannot get metadata for platform %s: %s" % (platform_name, e))
                # XXX Disable platform ?
                announces = list()
            self.metadata[platform_name] = list() 
            for announce in announces:
                self.metadata[platform_name].append(announce)
Exemple #15
0
 def dump(self):
     """
     (Debug function). Dump the ExploreTask embeded in this Stack
     using the logger.
     """
     for priority in [TASK_11, TASK_1Nsq, TASK_1N]:
         Log.tmp("PRIO %d : %r" % (priority, self.tasks[priority]))
Exemple #16
0
    def __init__(self,
                 router=None,
                 platform=None,
                 query=None,
                 config=None,
                 user_config=None,
                 user=None,
                 format='record'):

        assert format in ['record', 'object'
                          ], 'Unknown return format for gateway SQLAlchemy'
        if format == 'object':
            Log.tmp("Objects should not be used")
        self.format = format

        super(SQLAlchemyGateway, self).__init__(router, platform, query,
                                                config, user_config, user)

        from manifold.models.base import Base
        Base = declarative_base(cls=Base)

        # Models
        from manifold.models.platform import Platform as DBPlatform
        from manifold.models.user import User as DBUser
        from manifold.models.account import Account as DBAccount
        from manifold.models.session import Session as DBSession

        engine = create_engine(config['url'], echo=False, pool_recycle=3600)

        Base.metadata.create_all(engine)

        Session = sessionmaker(bind=engine)
        self.db = Session()
Exemple #17
0
    def local_query_get(self, query):
        #
        # XXX How are we handling subqueries
        #

        fields = query.fields
        # XXX else tap into metadata

        cls = self.map_object[query.object]

        # Transform a Filter into a sqlalchemy expression
        _filters = get_sqla_filters(cls, query.filters)
        _fields = xgetattr(cls, query.fields) if query.fields else None

        res = db.query(*_fields) if _fields else db.query(cls)
        if query.filters:
            for _filter in _filters:
                res = res.filter(_filter)

        # Do we need to limit to the user's own results
        try:
            if self.user and cls.restrict_to_self and self.user[
                    'email'] != ADMIN_USER:
                res = res.filter(cls.user_id == self.user['user_id'])
        except AttributeError:
            pass
        try:
            tuplelist = res.all()
            return tuplelist
        except SQLAlchemyError, e:
            Log.error("SQLAlchemyError trying to rollback db session: %s" % e)
            db.rollback()
            self.local_query_get(query)
            return list()
Exemple #18
0
 def remove_pid_file(self):
     """
     \brief Remove the pid file (internal usage)
     """
     # The lock file is implicitely released while removing the pid file
     Log.debug("Removing %s" % Options().pid_filename)
     if os.path.exists(Options().pid_filename) == True:
         os.remove(Options().pid_filename)
Exemple #19
0
 def xrn_hook(resource):
     urn = resource.get('component_id')
     if not urn:
         Log.warning('No urn !!!')
         return resource
     resource['urn'] = urn
     resource['hrn'] = urn_to_hrn(urn)[0]
     return resource
Exemple #20
0
def main():
    XMLRPCDaemon.init_options()
    Log.init_options()
    Daemon.init_options()
    DBStorage.init_options()
    Options().parse()
    
    XMLRPCDaemon().start()
Exemple #21
0
 def remove_pid_file(self):
     """
     \brief Remove the pid file (internal usage)
     """
     # The lock file is implicitely released while removing the pid file
     Log.debug("Removing %s" % Options().pid_filename)
     if os.path.exists(Options().pid_filename) == True:
         os.remove(Options().pid_filename)
Exemple #22
0
 def callback_error(self, error):
     """
     (Internal usage) See ManifoldGateway::receive_impl.
     Args:
         packet: A QUERY Packet.
         error: The corresponding error message.
     """
     Log.error("Error during Manifold call: %r" % error)
     self.send(LastRecord())
Exemple #23
0
 def inject_at(self, query):
     """
     Update From Nodes of the QueryPlan in order to take into account AT
     clause involved in a user Query.
     Args:
         query: The Query issued by the user.
     """
     Log.warning("HARDCODED: AT injection in FROM Nodes: %r" % self.froms)
     for from_node in self.froms:
         from_node.query.timestamp = query.get_timestamp()
Exemple #24
0
 def delete_cache(self, annotations=None):
     try:
         Log.tmp("----------> DELETE CACHE PER USER <------------")
         Log.tmp(annotations)
         if annotations is not None:
             user_id = annotations['user']['user_id']
             if user_id in self._cache_user:
                 del self._cache_user[user_id]
     except:
         import traceback
         traceback.print_exc()
Exemple #25
0
 def get_slicename(self, filters):
     # XXX If slicename is not in WHERE of the Query it will cause an error
     filters = {'value': filters['slice_hrn']}
     fields = ['name']
     plc_api = xmlrpclib.ServerProxy(API_URL, allow_none=True)
     result = plc_api.GetSliceTags(self._get_auth(), filters, fields)
     if not result:
         Log.warning("No Slice name for this hrn ", filters)
         return None
     else:
         return result[0]['name']
Exemple #26
0
    def _process_resources(cls, resources):
        ret = list()

        for resource in resources:
            Log.tmp("LOIC - SFAWrap parser type = %s , resource = %r" % (type(resource),resource))
            new_resource = cls._process_resource(resource)
            if not new_resource:
                continue
            # We suppose we have children of dict that cannot be serialized
            # with xmlrpc, let's make dict
            ret.append(cls.make_dict_rec(new_resource))
        return ret
Exemple #27
0
    def start(self):
        """
        Fetch records stored in the postgresql database according to self.query
        """

        Log.tmp("Received: %s" % self.query)

        # Results of the query (TODO)
        rows = []

        # Sending rows to parent processing node in the AST
        map(self.send, Records(rows))
        self.send(LastRecord())
Exemple #28
0
 def build_rspec(cls,
                 slice_hrn,
                 resources,
                 leases,
                 flowspace,
                 vms,
                 rspec_version=None):
     Log.warning("NitosBroker Parser build")
     rspec = []
     cls.rspec_add_header(rspec)
     lease_map = cls.rspec_add_leases(rspec, leases)
     cls.rspec_add_resources(rspec, resources, lease_map)
     cls.rspec_add_footer(rspec)
     return "\n".join(rspec)
Exemple #29
0
    def get_cache(self, annotations=None):
        user = annotations.get('user')
        user_id = user.get('user_id') if user else None

        if not user_id:
            # Use global cache
            Log.warning("Use of global cache for query, annotations=%r" %
                        (annotations, ))
            return self._cache

        # Use per-user cache
        if user_id not in self._cache_user:
            self._cache_user[user_id] = Cache()
        return self._cache_user[user_id]
Exemple #30
0
 def get_location(cls, city):
     location = None
     try:
         #from geopy.geocoders import Nominatim
         #geolocator = Nominatim()
         #from geopy.geocoders import GeoNames
         #geolocator = GeoNames()
         from geopy.geocoders import GoogleV3
         geolocator = GoogleV3()
       
         location = geolocator.geocode(city)
     except Exception, e:
         Log.warning("geopy.geocoders failed to get coordinates for city = ",city)
         Log.warning(e)
Exemple #31
0
 def make_lock_file(self):
     """
     \brief Prepare the lock file required to manage the pid file
         Initialize Options().lock_file
     """
     if Options().pid_filename and Options().no_daemon == False:
         Log.debug("Daemonizing using pid file '%s'" % Options().pid_filename)
         Options().lock_file = lockfile.FileLock(Options().pid_filename)
         if Options().lock_file.is_locked() == True:
             log_error("'%s' is already running ('%s' is locked)." % (Options().get_name(), Options().pid_filename))
             self.terminate()
         Options().lock_file.acquire()
     else:
         Options().lock_file = None
Exemple #32
0
    def left_callback(self, record):
        """
        \brief Process records received by the left child
        \param record A dictionary representing the received record 
        """
        if record.is_last():
            # left_done. Injection is not the right way to do this.
            # We need to insert a filter on the key in the right member
            predicate = Predicate(self.predicate.get_value(), included,
                                  self.left_map.keys())

            if self.right.get_query().action == ACTION_CREATE:
                # XXX If multiple insert, we need to match the right ID with the
                # right inserted items
                if len(self.left_map.keys()) > 1:
                    raise NotImplemented

                # Pass the id as a param
                keys = self.left_map.keys()
                if not keys:
                    # No JOIN possible
                    self.left_done = True
                    self._on_right_done()
                    return
                key = self.left_map.keys()[0]
                query = self.right.get_query()
                query.params[self.predicate.get_value()] = key
            else:  # pass the id as a filter which is the normal behaviour
                self.right = self.right.optimize_selection(
                    Filter().filter_by(predicate))
                self.right.set_callback(
                    self.right_callback)  # already done in __init__ ?

            self.left_done = True
            self.right.start()
            return

        # Directly send records missing information necessary to join
        # XXXX !!! XXX XXX XXX
        if not Record.has_fields(record, self.predicate.get_field_names()):
            Log.warning("Missing LEFTJOIN predicate %s in left record %r : forwarding" % \
                    (self.predicate, record))
            self.send(record)

        # Store the result in a hash for joining later
        hash_key = Record.get_value(record, self.predicate.key)
        if not hash_key in self.left_map:
            self.left_map[hash_key] = []
        self.left_map[hash_key].append(record)
Exemple #33
0
 def check_python_daemon(self):
     """
     \brief Check whether python-daemon is properly installed
     \return True if everything is file, False otherwise
     """
     # http://www.python.org/dev/peps/pep-3143/    
     ret = False 
     try:
         import daemon
         getattr(daemon, "DaemonContext")
         ret = True 
     except AttributeError, e:
         print e
         # daemon and python-daemon conflict with each other
         Log.critical("Please install python-daemon instead of daemon. Remove daemon first.")
Exemple #34
0
    def start(self):
        """
        \brief Start the daemon
        """
        # Check whether daemon module is properly installed
        if self.check_python_daemon() == False:
            self.terminate()
        import daemon

        # Prepare Options().lock_file
        self.make_lock_file()

        # Prepare the daemon context
        dcontext = daemon.DaemonContext(
            detach_process     = (not Options().no_daemon),
            working_directory  = Options().working_directory,
            pidfile            = Options().lock_file if not Options().no_daemon else None,
            stdin              = sys.stdin,
            stdout             = sys.stdout,
            stderr             = sys.stderr,
            uid                = Options().uid,
            gid                = Options().gid,
            files_preserve     = Log().files_to_keep
        )

        # Prepare signal handling to stop properly if the daemon is killed 
        # Note that signal.SIGKILL can't be handled:
        # http://crunchtools.com/unixlinux-signals-101/
        dcontext.signal_map = {
            signal.SIGTERM : self.signal_handler,
            signal.SIGQUIT : self.signal_handler,
            signal.SIGINT  : self.signal_handler
        }

        if Options().debugmode == True:
            self.main()
        else:
            with dcontext:
                self.make_pid_file()
                try:
                    self.main()
                except Exception, why:
                    Log.error("Unhandled exception in start: %s" % why)
Exemple #35
0
 def stop(self):
     Log.debug("Stopping '%s'" % self.daemon_name)