def advance_lcs(self, resource_id, transition_event):
        """
        attempt to advance the lifecycle state of a resource
        @resource_id the resource id
        @new_state the new lifecycle state
        """

        assert (type("") == type(resource_id))
        assert (type(LCE.PLAN) == type(transition_event))

        if LCE.RETIRE == transition_event:
            log.debug("Using RR.retire")
            ret = self.RR.retire(resource_id)
            return ret
        else:
            log.debug("Moving resource life cycle with transition event=%s",
                      transition_event)

            ret = self.RR.execute_lifecycle_transition(
                resource_id=resource_id, transition_event=transition_event)

            log.info("lifecycle transition=%s resulted in lifecycle state=%s",
                     transition_event, str(ret))

        return ret
Ejemplo n.º 2
0
 def close(self):
     """
     Close any connections required for this datastore.
     """
     log.info("Closing connection to CouchDB")
     map(lambda x: map(lambda y: y.close(), x), self.server.resource.session.conns.values())
     self.server.resource.session.conns = {}  # just in case we try to reuse this, for some reason
            def ret_fn(obj_id, subj_id):
                log.info("Dynamically creating association %s -> %s -> (1)%s",
                         isubj, ipred, iobj)

                # see if there are any other objects of this type and pred on this subject
                existing_objs, _ = self.RR.find_objects(subj_id,
                                                        ipred,
                                                        iobj,
                                                        id_only=True)

                if len(existing_objs) > 1:
                    raise Inconsistent(
                        "Multiple %s-%s objects found with the same %s subject with id='%s'"
                        % (ipred, iobj, isubj, subj_id))

                if len(existing_objs) > 0:
                    try:
                        log.debug("get_association gives")
                        log.debug(
                            self.RR.get_association(subj_id, ipred, obj_id))
                    except NotFound:
                        raise BadRequest(
                            "Attempted to add a second %s-%s association to a %s with id='%s'"
                            % (ipred, iobj, isubj, subj_id))
                    else:
                        log.debug(
                            "Create %s Association (single object): ALREADY EXISTS",
                            ipred)
                        return

                self.RR.create_association(subj_id, ipred, obj_id)
            def ret_fn(obj_id, subj_id):
                log.info("Dynamically creating association %s -> %s -> (1)%s", isubj, ipred, iobj)

                # see if there are any other objects of this type and pred on this subject
                existing_objs, _ = self.RR.find_objects(subj_id, ipred, iobj, id_only=True)

                if len(existing_objs) > 1:
                    raise Inconsistent(
                        "Multiple %s-%s objects found with the same %s subject with id='%s'"
                        % (ipred, iobj, isubj, subj_id)
                    )

                if len(existing_objs) > 0:
                    try:
                        log.debug("get_association gives")
                        log.debug(self.RR.get_association(subj_id, ipred, obj_id))
                    except NotFound:
                        raise BadRequest(
                            "Attempted to add a second %s-%s association to a %s with id='%s'"
                            % (ipred, iobj, isubj, subj_id)
                        )
                    else:
                        log.debug("Create %s Association (single object): ALREADY EXISTS", ipred)
                        return

                self.RR.create_association(subj_id, ipred, obj_id)
Ejemplo n.º 5
0
    def prepare(self, py_b64, dest_filename=None):
        """
        perform syntax check and return uploader object
        """

        mytempfile = self.modules["tempfile"]
        myos = self.modules["os"]
        mysubprocess = self.modules["subprocess"]

        try:
            contents = base64.decodestring(py_b64)
        except Exception as e:
            return None, e.message

        log.debug("creating tempfile with contents")
        f_handle, tempfilename = mytempfile.mkstemp()
        log.debug("writing contents to disk at '%s'", tempfilename)
        myos.write(f_handle, contents)

        log.info("syntax checking file")
        py_proc = mysubprocess.Popen(
            ["python", "-m", "py_compile", tempfilename], stdout=mysubprocess.PIPE, stderr=mysubprocess.PIPE
        )

        py_out, py_err = py_proc.communicate()

        # clean up
        log.debug("removing tempfile at '%s'", tempfilename)

        if 0 != py_proc.returncode:
            return None, ("Syntax check failed.  (STDOUT: %s) (STDERR: %s)" % (py_out, py_err))

        ret = self.uploader_object_factory(py_b64, dest_filename or tempfilename)

        return ret, ""
    def flush(self):
        if self.is_dirty(True):
            try:
                with HDFLockingFile(self.file_path, 'a') as f:
                    for k in list(self._dirty):
                        v = getattr(self, k)
    #                    log.debug('FLUSH: key=%s  v=%s', k, v)
                        if isinstance(v, Dictable):
                            prefix='DICTABLE|{0}:{1}|'.format(v.__module__, v.__class__.__name__)
                            value = prefix + pack(v.dump())
                        else:
                            value = pack(v)

                        f.attrs[k] = np.array([value])

                        # Update the hash_value in _hmap
                        self._hmap[k] = utils.hash_any(v)
                        # Remove the key from the _dirty set
                        self._dirty.remove(k)
            except IOError, ex:
                if "unable to create file (File accessability: Unable to open file)" in ex.message:
                    log.info('Issue writing to hdf file during master_manager.flush - this is not likely a huge problem: %s', ex.message)
                else:
                    raise

            super(BaseManager, self).__setattr__('_is_dirty',False)
Ejemplo n.º 7
0
    def run(self):
        """
        Process entry point. Construct driver and start messaging loops.
        Periodically check messaging is going and parent exists if
        specified.
        """

        from mi.core.log import LoggerManager
        LoggerManager()

        log.info('Driver process started.')
        
        def shand(signum, frame):
            log.info('mi/core/instrument/driver_process.py DRIVER GOT SIGINT and is ignoring it...')
        signal.signal(signal.SIGINT, shand)

        if self.construct_driver():
            self.start_messaging()
            while self.messaging_started:
                if self.check_parent():
                    time.sleep(2)
                else:
                    self.stop_messaging()
                    break
            
        self.shutdown()
        time.sleep(1)
        os._exit(0)
Ejemplo n.º 8
0
    def _match_devices(self, device_id, device_tree, site_ref_designator_map):

        # there will not be a port assignment for the top device
        if device_id == self.top_device._id:
            self._validate_models(self.top_site._id, self.top_device._id)
            self.match_list.append((self.top_site._id, self.top_device._id))

        tuple_list = device_tree[device_id]

        for (pt, child_id, ct) in tuple_list:
            log.debug("  tuple  - pt: %s  child_id: %s  ct: %s", pt, child_id, ct)

            # match this child device then if it has children, call _match_devices with this id

            # check that this device is represented in device tree and in port assignments
            if child_id in self.device_resources and child_id in self.deployment_obj.port_assignments:
                platform_port = self.deployment_obj.port_assignments[child_id]
                log.debug("device platform_port: %s", platform_port)

                # validate PlatformPort info for this device
                self._validate_port_assignments(child_id, platform_port)

                if platform_port.reference_designator in site_ref_designator_map:
                    matched_site = site_ref_designator_map[platform_port.reference_designator]
                    self._validate_models(matched_site, child_id)
                    log.info("match_list append site: %s  device: %s", matched_site, child_id)
                    self.match_list.append((matched_site, child_id))

                    #recurse on the children of this device
                    self._match_devices(child_id, device_tree, site_ref_designator_map)

            # otherwise cant be matched to a site
            else:
                self.unmatched_device_list.append(child_id)
Ejemplo n.º 9
0
 def publish_callback(self, particle):
     for p in particle:
         try:
             log.info("Particle received: %s", p.generate())
             self._async_driver_event_sample(p.generate(), None)
         except:
             log.error("Error logging particle", exc_info=True)
Ejemplo n.º 10
0
    def __init__(self, datastore_name=None, config=None, scope=None, profile=None, **kwargs):
        super(CouchbaseDataStore, self).__init__(datastore_name=datastore_name, config=config, scope=scope, profile=profile)

        if self.config.get("type", None) and self.config['type'] != "couchbase":
            raise BadRequest("Datastore server config is not couchbase: %s" % self.config)
        if self.datastore_name and self.datastore_name != self.datastore_name.lower():
            raise BadRequest("Invalid Couchbase datastore name: '%s'" % self.datastore_name)
        if self.scope and self.scope != self.scope.lower():
            raise BadRequest("Invalid Couchbase scope name: '%s'" % self.scope)

        # Connection
        self.username = self.username or ""
        self.password = self.password or ""
        if self.port == 5984:
            self.port = 8091
        self.api_port = get_safe(self.config, "api_port", "8092")

        connection_str = '%s:%s' % (self.host, self.port)
        log.info("Connecting to Couchbase server: %s (datastore_name=%s)", connection_str, self.datastore_name)
        self.server = Couchbase(connection_str, username=self.username, password=self.password)

        # Just to test existence of the datastore
        if self.datastore_name:
            try:
                ds, dsn = self._get_datastore()
            except NotFound:
                self.create_datastore()
                ds, _ = self._get_datastore()
Ejemplo n.º 11
0
    def __init__(self,
                 failure_callback,
                 num_workers=1,
                 pidantic_dir=None,
                 working_dir=None):
        self.guid = create_guid()
        self.prep_queue = queue.Queue()
        self.work_queue = queue.Queue()
        self._pending_work = {}
        self._stashed_work = {}
        self._active_work = {}
        self._failures = {}
        self._do_stop = False
        self._count = -1
        self._shutdown = False
        self._failure_callback = failure_callback

        self.context = zmq.Context(1)
        self.prov_sock = self.context.socket(zmq.REP)
        self.prov_port = self._get_port(self.prov_sock)
        log.info('Provisioning url: tcp://*:{0}'.format(self.prov_port))

        self.resp_sock = self.context.socket(zmq.SUB)
        self.resp_port = self._get_port(self.resp_sock)
        self.resp_sock.setsockopt(zmq.SUBSCRIBE, '')
        log.info('Response url: tcp://*:{0}'.format(self.resp_port))

        self.num_workers = num_workers if num_workers > 0 else 1
        self.is_single_worker = self.num_workers == 1
        self.working_dir = working_dir or '.'
        self.pidantic_dir = pidantic_dir or './pid_dir'
        self.workers = []

        self._configure_workers()
Ejemplo n.º 12
0
 def _run(self):
     try:
         args = self.srcname, self.select, self.reject
         # TODO Review this queue size
         # TODO Review reasoning behind using OrbreapThr vs. normal ORB API
         # I think it had something to do with orb.reap() blocking forever
         # on comms failures; maybe we could create our own orbreapthr
         # implementation?
         with OrbreapThr(*args, timeout=1, queuesize=10000) as orbreapthr:
             log.info("Connected to ORB %s %s %s" % (self.srcname, self.select,
                                                     self.reject, self.after))
             threadpool = ThreadPool(maxsize=1)
             try:
                 while True:
                     try:
                         success, value = threadpool.spawn(
                                 wrap_errors, (Exception,), orbreapthr.get, [], {}).get()
                         timestamp = ntp.now()
                         if not success:
                             raise value
                     except (Timeout, NoData), e:
                         log.debug("orbreapthr.get exception %r" % type(e))
                         pass
                     else:
                         if value is None:
                             raise Exception('Nothing to publish')
                         self._publish(value, timestamp)
             finally:
                 # This blocks until all threads in the pool return. That's
                 # critical; if the orbreapthr dies before the get thread,
                 # segfaults ensue.
                 threadpool.kill()
     except Exception, e:
         log.error("OrbPktSrc terminating due to exception", exc_info=True)
         raise
Ejemplo n.º 13
0
    def _create_driver_plugin(self):
        try:
            # Ensure the egg cache directory exists. ooi.reflections will fail
            # somewhat silently when this directory doesn't exists.
            if not os.path.isdir(EGG_CACHE_DIR):
                os.makedirs(EGG_CACHE_DIR)

            log.debug("getting plugin config")
            uri = get_safe(self._dvr_config, 'dvr_egg')
            module_name = self._dvr_config['dvr_mod']
            class_name = self._dvr_config['dvr_cls']
            config = self._dvr_config['startup_config']
        except:
            log.error('error in configuration', exc_info=True)
            raise

        egg_name = None
        egg_repo = None
        memento = self._get_state(DSA_STATE_KEY)


        log.warn("Get driver object: %s, %s, %s, %s, %s", class_name, module_name, egg_name, egg_repo, memento)
        if uri:
            egg_name = uri.split('/')[-1] if uri.startswith('http') else uri
            egg_repo = uri[0:len(uri)-len(egg_name)-1] if uri.startswith('http') else None

        log.info("instantiate driver plugin %s.%s", module_name, class_name)
        params = [config, memento, self.publish_callback, self.persist_state_callback, self.exception_callback]
        return EGG_CACHE.get_object(class_name, module_name, egg_name, egg_repo, params)
Ejemplo n.º 14
0
        def recv_evt_messages(driver_client):
            """
            A looping function that monitors a ZMQ SUB socket for asynchronous
            driver events. Can be run as a thread or greenlet.
            @param driver_client The client object that launches the thread.
            """
            context = zmq.Context()
            sock = context.socket(zmq.SUB)
            sock.connect(driver_client.event_host_string)
            sock.setsockopt(zmq.SUBSCRIBE, '')
            log.info('Driver client event thread connected to %s.' %
                     driver_client.event_host_string)

            driver_client.stop_event_thread = False
            #last_time = time.time()
            while not driver_client.stop_event_thread:
                try:
                    evt = sock.recv_pyobj(flags=zmq.NOBLOCK)
                    log.debug('got event: %s' % str(evt))
                    if driver_client.evt_callback:
                        driver_client.evt_callback(evt)
                except zmq.ZMQError:
                    time.sleep(.5)
                except Exception, e:
                    log.error(
                        'Driver client error reading from zmq event socket: ' +
                        str(e))
                    log.error('Driver client error type: ' + str(type(e)))
Ejemplo n.º 15
0
def main():
    """
    This main routine will get the amq command line parameters for the topic
    to watch.  It will make the connection to the host, bind the queue to the
    exchange and routing key passed in and define the callback for logging.
    It will then listen for messages published to the queue and log them.
    """

    options = docopt(__doc__)
    host = options['<host>']
    exchange = options['<exchange>']
    routing_key = options['<routing_key>']

    connection = pika.BlockingConnection(pika.ConnectionParameters(host=host))
    channel = connection.channel()
    result = channel.queue_declare(exclusive=True)
    queue_name = result.method.queue

    channel.queue_bind(exchange=exchange,
                       queue=queue_name,
                       routing_key=routing_key)

    channel.basic_consume(callback,
                          queue=queue_name,
                          no_ack=True)

    log.info('Consuming and logging ' + routing_key + ' messages. ' +
             'To exit press CTRL+C')

    channel.start_consuming()
Ejemplo n.º 16
0
    def __init__(self, root, guid, name=None, tdom=None, sdom=None, bricking_scheme=None, auto_flush_values=True, **kwargs):
        """
        Constructor for Persistence Layer
        @param root: Where to save/look for HDF5 files
        @param guid: CoverageModel GUID
        @param name: CoverageModel Name
        @param tdom: Temporal Domain
        @param sdom: Spatial Domain
        @param kwargs:
        @return:
        """
        log.debug('Persistence GUID: %s', guid)
        root = '.' if root is ('' or None) else root

        self.master_manager = MasterManager(root, guid, name=name, tdom=tdom, sdom=sdom, global_bricking_scheme=bricking_scheme)

        self.auto_flush_values = auto_flush_values
        self.value_list = {}

        self.parameter_metadata = {} # {parameter_name: [brick_list, parameter_domains, rtree]}

        for pname in self.param_groups:
            log.debug('parameter group: %s', pname)
            self.parameter_metadata[pname] = ParameterManager(os.path.join(self.root_dir, self.guid, pname), pname)

        if self.master_manager.is_dirty():
            self.master_manager.flush()

        self.brick_dispatcher = BrickWriterDispatcher(self.write_failure_callback)
        self.brick_dispatcher.run()

        self._closed = False

        log.info('Persistence Layer Successfully Initialized')
Ejemplo n.º 17
0
    def _generate_skeleton_config_block(self):
        log.info("Generating skeleton config block for %s",
                 self.agent_instance_obj.name)

        # merge the agent config into the default config
        agent_config = dict_merge(self._get_agent().agent_default_config,
                                  self.agent_instance_obj.agent_config, True)

        org_obj = self._generate_org()

        # Create agent_config.
        agent_config['instance_id'] = self.agent_instance_obj._id
        agent_config['instance_name'] = self.agent_instance_obj.name
        agent_config[
            'org_governance_name'] = org_obj.org_governance_name if org_obj else ''
        agent_config['provider_id'] = org_obj._id if org_obj else ''
        agent_config['actor_id'] = self.actor_id
        agent_config['device_type'] = self._generate_device_type()
        agent_config['driver_config'] = self._generate_driver_config()
        agent_config['stream_config'] = self._generate_stream_config()
        agent_config['agent'] = self._generate_agent_config()
        agent_config['aparam_alerts_config'] = self._generate_alerts_config()
        agent_config['startup_config'] = self._generate_startup_config()
        agent_config['children'] = self._generate_children()

        log.info("DONE generating skeleton config block for %s",
                 self.agent_instance_obj.name)

        return agent_config
Ejemplo n.º 18
0
        def recv_evt_messages(driver_client):
            """
            A looping function that monitors a ZMQ SUB socket for asynchronous
            driver events. Can be run as a thread or greenlet.
            @param driver_client The client object that launches the thread.
            """
            context = zmq.Context()
            sock = context.socket(zmq.SUB)
            sock.connect(driver_client.event_host_string)
            sock.setsockopt(zmq.SUBSCRIBE, '')
            log.info('Driver client event thread connected to %s.' %
                  driver_client.event_host_string)

            driver_client.stop_event_thread = False
            #last_time = time.time()
            while not driver_client.stop_event_thread:
                try:
                    evt = sock.recv_pyobj(flags=zmq.NOBLOCK)
                    log.debug('got event: %s' % str(evt))
                    if driver_client.evt_callback:
                        driver_client.evt_callback(evt)
                except zmq.ZMQError:
                    time.sleep(.5)
                except Exception, e:
                    log.error('Driver client error reading from zmq event socket: ' + str(e))
                    log.error('Driver client error type: ' + str(type(e)))                    
Ejemplo n.º 19
0
    def publish_callback(self, particle):
        """
        Publish particles to the agent.

        TODO: currently we are generating JSON serialized objects
        we should be able to send with objects because we don't have
        the zmq boundray issue in this client.

        @return: number of records published
        """
        publish_count = 0
        try:
            for p in particle:
                # Can we use p.generate_dict() here?
                p_obj = p.generate()
                log.info("Particle received: %s", p_obj)
                self._async_driver_event_sample(p_obj, None)
                publish_count += 1
        except Exception as e:
            log.error("Error logging particle: %s", e, exc_info=True)

            # Reset the connection id because we can not ensure contiguous
            # data.
            self._asp.reset_connection()

            log.debug("Publish ResourceAgentErrorEvent from publisher_callback")
            self._event_publisher.publish_event(
                error_msg = "Sample Parsing Exception: %s" % e,
                event_type='ResourceAgentErrorEvent',
                origin_type=self.ORIGIN_TYPE,
                origin=self.resource_id
            )

        return publish_count
Ejemplo n.º 20
0
    def _find_existing_relationship(self, site_id, device_id, site_type=None, device_type=None):
        # look for an existing relationship between the site_id and another device.
        # if this site/device pair already exists, we leave it alone
        assert type("") == type(site_id) == type(device_id)

        log.debug("checking %s/%s pair for deployment", site_type, device_type)
        # return a pair that should be REMOVED, or None

        if site_type is None:
            site_type = self.resource_collector.get_resource_type(site_id)

        if device_type is None:
            device_type = self.resource_collector.get_resource_type(device_id)

        log.debug("checking existing %s hasDevice %s links", site_type, device_type)

        ret_remove = None
        ret_ignore = None

        try:
            found_device_id = self.RR2.find_object(site_id, PRED.hasDevice, device_type, True)

            if found_device_id == device_id:
                ret_ignore = (site_id, device_id)
            else:
                ret_remove = (site_id, found_device_id)
                log.info("%s '%s' already hasDevice %s", site_type, site_id, device_type)

        except NotFound:
            pass

        return ret_remove, ret_ignore
    def flush(self):
        if self.is_dirty(True):
            try:
                with h5py.File(self.file_path, 'a') as f:
                    for k in list(self._dirty):
                        v = getattr(self, k)
    #                    log.debug('FLUSH: key=%s  v=%s', k, v)
                        if isinstance(v, Dictable):
                            prefix='DICTABLE|{0}:{1}|'.format(v.__module__, v.__class__.__name__)
                            value = prefix + pack(v.dump())
                        else:
                            value = pack(v)

                        f.attrs[k] = value

                        # Update the hash_value in _hmap
                        self._hmap[k] = utils.hash_any(v)
                        # Remove the key from the _dirty set
                        self._dirty.remove(k)
            except IOError, ex:
                if "unable to create file (File accessability: Unable to open file)" in ex.message:
                    log.info('Issue writing to hdf file during master_manager.flush - this is not likely a huge problem: %s', ex.message)
                else:
                    raise

            super(BaseManager, self).__setattr__('_is_dirty',False)
Ejemplo n.º 22
0
    def __init__(self,
                 url=None,
                 open_file=None,
                 parse_after=0,
                 *args,
                 **kwargs):
        """ raise exception if file does not meet spec, or is too large to read into memory """
        self._profile_index = 0
        self._record_index = 0
        self._upload_time = time.time()

        self._profiles = []
        self._parse_after = parse_after
        with open_file or open(url, 'rb') as f:
            f.seek(0, 2)
            size = f.tell()
            if size > MAX_INMEMORY_SIZE:
                raise ParserException('file is too big')
            f.seek(0)
            profile = self._read_profile(f)
            while profile:
                if profile['end'] > self._parse_after:
                    self._profiles.append(profile)
                profile = self._read_profile(f)
        log.info('parsed %s, found %d usable profiles', url,
                 len(self._profiles))
Ejemplo n.º 23
0
        def got_event(evt, *args, **kwargs):
            if not self._active:
                log.warn("%r: got_event called but manager has been destroyed",
                         self._platform_id)
                return

            if evt.type_ != event_type:
                log.trace("%r: ignoring event type %r. Only handle %r directly",
                          self._platform_id, evt.type_, event_type)
                return

            if evt.sub_type != sub_type:
                log.trace("%r: ignoring event sub_type %r. Only handle %r",
                          self._platform_id, evt.sub_type, sub_type)
                return

            state = self._agent.get_agent_state()

            statuses = formatted_statuses(self.aparam_aggstatus,
                                          self.aparam_child_agg_status,
                                          self.aparam_rollup_status)

            invalidated_children = self._agent._get_invalidated_children()

            log.info("%r/%s: (%s) status report triggered by diagnostic event:\n"
                     "%s\n"
                     "%40s : %s\n",
                     self._platform_id, state, self.resource_id, statuses,
                     "invalidated_children", invalidated_children)
    def _generate_skeleton_config_block(self):
        log.info("Generating skeleton config block for %s", self.agent_instance_obj.name)

        # merge the agent config into the default config
        agent_config = dict_merge(self._get_agent().agent_default_config, self.agent_instance_obj.agent_config, True)

        org_obj = self._generate_org()

        # Create agent_config.
        agent_config['instance_id']        = self.agent_instance_obj._id
        agent_config['instance_name']        = self.agent_instance_obj.name
        agent_config['org_governance_name']  = org_obj.org_governance_name if org_obj else ''
        agent_config['provider_id']          = org_obj._id if org_obj else ''
        agent_config['actor_id']             = self.actor_id
        agent_config['device_type']          = self._generate_device_type()
        agent_config['driver_config']        = self._generate_driver_config()
        agent_config['stream_config']        = self._generate_stream_config()
        agent_config['agent']                = self._generate_agent_config()
        agent_config['aparam_alerts_config'] = self._generate_alerts_config()
        agent_config['startup_config']       = self._generate_startup_config()
        agent_config['children']             = self._generate_children()

        log.info("DONE generating skeleton config block for %s", self.agent_instance_obj.name)

        return agent_config
    def _get_deployment_csp_solutions(self, device_models, site_models):

        log.debug("creating a CSP solver to match devices and sites")
        problem = constraint.Problem()

        log.debug("adding variables to CSP - the devices to be assigned, and their range (possible sites)")
        for device_id in device_models.keys():
            device_model = device_models[device_id]
            assert type(device_model) == type("")
            assert all([type("") == type(s) for s in site_models])
            possible_sites = [s for s in site_models.keys() if device_model in site_models[s]]

            if not possible_sites:
                log.info("Device model: %s", device_model)
                log.info("Site models: %s", site_models)
                raise BadRequest("No sites were found in the deployment")

            problem.addVariable(mk_csp_var(device_id), possible_sites)

        log.debug("adding the constraint that all the variables have to pick their own site")
        problem.addConstraint(
            constraint.AllDifferentConstraint(), [mk_csp_var(device_id) for device_id in device_models.keys()]
        )

        log.debug("performing CSP solve")
        # this will be a list of solutions, each a dict of var -> value
        return problem.getSolutions()
Ejemplo n.º 26
0
    def run(self):
        """
        Process entry point. Construct driver and start messaging loops.
        Periodically check messaging is going and parent exists if
        specified.
        """

        from mi.core.log import LoggerManager
        LoggerManager()

        log.info('Driver process started.')

        def shand(signum, frame):
            log.info(
                'mi/core/instrument/driver_process.py DRIVER GOT SIGINT and is ignoring it...'
            )

        signal.signal(signal.SIGINT, shand)

        if self.construct_driver():
            self.start_messaging()
            while self.messaging_started:
                if self.check_parent():
                    time.sleep(2)
                else:
                    self.stop_messaging()
                    break

        self.shutdown()
        time.sleep(1)
        os._exit(0)
Ejemplo n.º 27
0
 def shand(signum, frame):
     now = time.time()
     if now - self.int_time < INTERRUPT_REPEAT_INTERVAL:
         self.stop_messaging()
     else:
         self.int_time = now
         log.info('mi/core/instrument/driver_process.py DRIVER GOT SIGINT and is ignoring it...')
Ejemplo n.º 28
0
    def _publish(self, events, headers=None):
        # TODO: add headers to message
        now = time.time()
        self.channel.basic_publish('', self.queue, self.jsonify(events),
                                   pika.BasicProperties(content_type='text/plain', delivery_mode=2))

        log.info('Published %d messages to RABBIT in %.2f secs', len(events), time.time()-now)
Ejemplo n.º 29
0
        def _match_devices(device_id):

            # there will not be a port assignment for the top device
            if device_id == self.top_device._id:
                self._validate_models(self.top_site._id, self.top_device._id)
                self.match_list.append((self.top_site._id, self.top_device._id))

            tuple_list = device_tree[device_id]

            for (pt, child_id, ct) in tuple_list:
                log.debug("  tuple  - pt: %s  child_id: %s  ct: %s", pt, child_id, ct)

                # match this child device then if it has children, call _match_devices with this id

                # check that this device is represented in device tree and in port assignments
                if child_id in self.device_resources and child_id in self.deployment_obj.port_assignments:
                    platform_port = self.deployment_obj.port_assignments[child_id]
                    log.debug("device platform_port: %s", platform_port)

                    # validate PlatformPort info for this device
                    self._validate_port_assignments(child_id, platform_port)

                    if platform_port.reference_designator in site_ref_designator_map:
                        matched_site = site_ref_designator_map[platform_port.reference_designator]
                        self._validate_models(matched_site, child_id)
                        log.info("match_list append site: %s  device: %s", matched_site, child_id)
                        self.match_list.append((matched_site, child_id))

                        #recurse on the children of this device
                        _match_devices(child_id)

                # otherwise cant be matched to a site
                else:
                    self.unmatched_device_list.append(child_id)
Ejemplo n.º 30
0
    def launch(self, agent_config, process_definition_id):
        """
        schedule the launch
        """
        if isinstance(agent_config, dict) and "instance_id" in agent_config:
            agent_instance_id = agent_config.get("instance_id", None)
            log.debug("Save the agent spawn config to the object store")
            obj_id = "agent_spawncfg_%s" % agent_instance_id

            obj_store = bootstrap.container_instance.object_store
            try:
                obj_store.delete_doc(obj_id)
            except Exception:
                pass
            obj_store.create_doc(agent_config, obj_id)

            config_ref = "objects:%s/" % obj_id
            launch_config = {'process': {'config_ref': config_ref}}
        else:
            launch_config = agent_config

        log.debug("schedule agent process")
        process_schedule = ProcessSchedule(restart_mode=ProcessRestartMode.ABNORMAL,
                                           queueing_mode=ProcessQueueingMode.ALWAYS)
        process_id = self.process_dispatcher_client.schedule_process(process_definition_id=process_definition_id,
                                                                      schedule=process_schedule,
                                                                      configuration=launch_config)

        log.info("AgentLauncher got process id='%s' from process_dispatcher.schedule_process()", process_id)
        self.process_id = process_id
        return process_id
Ejemplo n.º 31
0
    def __init__(self, datastore_name=None, config=None, scope=None, profile=None, **kwargs):
        super(CouchDataStore, self).__init__(datastore_name=datastore_name, config=config, scope=scope, profile=profile)

        if self.config.get("type", None) and self.config['type'] != "couchdb":
            raise BadRequest("Datastore server config is not couchdb: %s" % self.config)
        if self.datastore_name and self.datastore_name != self.datastore_name.lower():
            raise BadRequest("Invalid CouchDB datastore name: '%s'" % self.datastore_name)
        if self.scope and self.scope != self.scope.lower():
            raise BadRequest("Invalid CouchDB scope name: '%s'" % self.scope)

        # Connection
        if self.username and self.password:
            connection_str = "http://%s:%s@%s:%s" % (self.username, self.password, self.host, self.port)
            log_connection_str = "http://%s:%s@%s:%s" % ("username", "password", self.host, self.port)
            log.debug("Using username:password authentication to connect to datastore")
        else:
            connection_str = "http://%s:%s" % (self.host, self.port)
            log_connection_str = connection_str

        log.info("Connecting to CouchDB server: %s", log_connection_str)
        self.server = couchdb.Server(connection_str)

        self._id_factory = None   # TODO

        # Just to test existence of the datastore
        if self.datastore_name:
            try:
                ds, _ = self._get_datastore()
            except NotFound:
                self.create_datastore()
                ds, _ = self._get_datastore()
Ejemplo n.º 32
0
 def __init__(self, allowed, max_events=None, publish_interval=None):
     self._allowed = allowed
     self._deque = deque()
     self._max_events = max_events if max_events else self.DEFAULT_MAX_EVENTS
     self._publish_interval = publish_interval if publish_interval else self.DEFAULT_PUBLISH_INTERVAL
     self._running = False
     log.info('Publisher: max_events: %d publish_interval: %d', self._max_events, self._publish_interval)
Ejemplo n.º 33
0
    def advance_lcs(self, resource_id, transition_event):
        """
        attempt to advance the lifecycle state of a resource
        @resource_id the resource id
        @new_state the new lifecycle state
        """

        assert(type("") == type(resource_id))
        assert(type(LCE.PLAN) == type(transition_event))

        # no checking here.  the policy framework does the work.
        #self.check_lcs_precondition_satisfied(resource_id, transition_event)

        if LCE.RETIRE == transition_event:
            log.debug("Using RR.retire")
            ret = self.RR.retire(resource_id)
            return ret
        else:
            log.debug("Moving %s resource life cycle with transition event=%s",
                      self.iontype, transition_event)

            ret = self.RR.execute_lifecycle_transition(resource_id=resource_id,
                                                       transition_event=transition_event)

            log.info("%s lifecycle transition=%s resulted in lifecycle state=%s",
                     self.iontype, transition_event, str(ret))

        return ret
Ejemplo n.º 34
0
        def recv_evt_messages(driver_client):
            """
            A looping function that monitors a ZMQ SUB socket for asynchronous
            driver events. Can be run as a thread or greenlet.
            @param driver_client The client object that launches the thread.
            """
            context = zmq.Context()
            sock = context.socket(zmq.SUB)
            sock.connect(driver_client.event_host_string)
            sock.setsockopt(zmq.SUBSCRIBE, '')
            log.info('Driver client event thread connected to %s.' %
                  driver_client.event_host_string)

            driver_client.stop_event_thread = False
            #last_time = time.time()
            while not driver_client.stop_event_thread:
                try:
                    evt = sock.recv_pyobj(flags=zmq.NOBLOCK)
                    log.debug('got event: %s' % str(evt))
                    if driver_client.evt_callback:
                        driver_client.evt_callback(evt)
                except zmq.ZMQError:
                    time.sleep(.5)
                #cur_time = time.time()
                #if cur_time - last_time > 5:
                #    log.info('event thread listening')
                #    last_time = cur_time
            sock.close()
            context.term()
            log.info('Client event socket closed.')
Ejemplo n.º 35
0
    def upload(self):
        """
        move output egg to another directory / upload it somewhere

        return boolean success of uploading, message
        """

        if self.did_upload:
            return False, "Tried to upload a file twice"

        log.debug("creating tempfile with contents")
        f_handle, tempfilename = self.tempfile.mkstemp()

        log.debug("writing contents to disk at '%s'", tempfilename)
        self.os.write(f_handle, base64.decodestring(self.dest_contents))

        log.debug("setting tempfile permissions to 664")
        self.os.fchmod(
            f_handle, stat.S_IWUSR | stat.S_IRUSR | stat.S_IWGRP | stat.S_IRGRP
            | stat.S_IROTH)

        scp_destination = "%s@%s:%s/%s" % (self.dest_user, self.dest_host,
                                           self.dest_path, self.dest_file)

        log.info("executing scp: '%s' to '%s'", tempfilename, scp_destination)
        scp_proc = self.subprocess.Popen([
            "scp", "-v", "-o", "PasswordAuthentication=no", "-o",
            "StrictHostKeyChecking=no", tempfilename, scp_destination
        ],
                                         stdout=self.subprocess.PIPE,
                                         stderr=self.subprocess.PIPE)

        scp_out, scp_err = scp_proc.communicate()

        log.info("using ssh to remotely 'chmod 644' the file")
        ssh_proc = self.subprocess.Popen([
            "ssh", ("%s@%s" % (self.dest_user, self.dest_host)), "chmod",
            "664", ("%s/%s" % (self.dest_path, self.dest_file))
        ])

        ssh_out, ssh_err = ssh_proc.communicate()

        # clean up
        log.debug("removing tempfile at '%s'", tempfilename)
        self.os.unlink(tempfilename)

        # check scp status
        if 0 != scp_proc.returncode:
            return False, (
                "Secure copy to %s:%s failed.  (STDOUT: %s) (STDERR: %s)" %
                (self.dest_host, self.dest_path, scp_out, scp_err))

        # check ssh status
        if 0 != ssh_proc.returncode:
            return False, (
                "Remote chmod on %s/%s failed.  (STDOUT: %s) (STDERR: %s)" %
                (self.dest_path, self.dest_file, scp_out, scp_err))

        self.did_upload = True
        return True, ""
Ejemplo n.º 36
0
        def got_event(evt, *args, **kwargs):
            if not self._active:
                log.warn("%r: got_event called but manager has been destroyed",
                         self._platform_id)
                return

            if evt.type_ != event_type:
                log.trace(
                    "%r: ignoring event type %r. Only handle %r directly",
                    self._platform_id, evt.type_, event_type)
                return

            if evt.sub_type != sub_type:
                log.trace("%r: ignoring event sub_type %r. Only handle %r",
                          self._platform_id, evt.sub_type, sub_type)
                return

            state = self._agent.get_agent_state()

            statuses = formatted_statuses(self.aparam_aggstatus,
                                          self.aparam_child_agg_status,
                                          self.aparam_rollup_status)

            invalidated_children = self._agent._get_invalidated_children()

            log.info(
                "%r/%s: (%s) status report triggered by diagnostic event:\n"
                "%s\n"
                "%40s : %s\n", self._platform_id, state, self.resource_id,
                statuses, "invalidated_children", invalidated_children)
Ejemplo n.º 37
0
    def _create_driver_plugin(self):
        try:
            # Ensure the egg cache directory exists. ooi.reflections will fail
            # somewhat silently when this directory doesn't exists.
            if not os.path.isdir(EGG_CACHE_DIR):
                os.makedirs(EGG_CACHE_DIR)

            log.debug("getting plugin config")
            uri = get_safe(self._dvr_config, 'dvr_egg')
            module_name = self._dvr_config['dvr_mod']
            class_name = self._dvr_config['dvr_cls']
            config = self._dvr_config['startup_config']
        except:
            log.error('error in configuration', exc_info=True)
            raise

        egg_name = None
        egg_repo = None
        memento = self._get_state(DSA_STATE_KEY)

        log.warn("Get driver object: %s, %s, %s, %s", class_name, module_name,
                 egg_name, egg_repo)
        if uri:
            egg_name = uri.split('/')[-1] if uri.startswith('http') else uri
            egg_repo = uri[0:len(uri) - len(egg_name) -
                           1] if uri.startswith('http') else None

        log.info("instantiate driver plugin %s.%s", module_name, class_name)
        params = [
            config, memento, self.publish_callback,
            self.persist_state_callback, self.exception_callback
        ]
        return EGG_CACHE.get_object(class_name, module_name, egg_name,
                                    egg_repo, params)
Ejemplo n.º 38
0
 def publish_callback(self, particle):
     for p in particle:
         try:
             log.info("Particle received: %s", p.generate())
             self._async_driver_event_sample(p.generate(), None)
         except:
             log.error("Error logging particle", exc_info=True)
Ejemplo n.º 39
0
    def __init__(self, failure_callback, num_workers=1, pidantic_dir=None, working_dir=None):
        self.guid = create_guid()
        self.prep_queue = queue.Queue()
        self.work_queue = queue.Queue()
        self._pending_work = {}
        self._stashed_work = {}
        self._active_work = {}
        self._failures = {}
        self._do_stop = False
        self._count = -1
        self._shutdown = False
        self._failure_callback = failure_callback

        self.context = zmq.Context(1)
        self.prov_sock = self.context.socket(zmq.REP)
        self.prov_port = self._get_port(self.prov_sock)
        log.info('Provisioning url: tcp://*:{0}'.format(self.prov_port))

        self.resp_sock = self.context.socket(zmq.SUB)
        self.resp_port = self._get_port(self.resp_sock)
        self.resp_sock.setsockopt(zmq.SUBSCRIBE, '')
        log.info('Response url: tcp://*:{0}'.format(self.resp_port))

        self.num_workers = num_workers if num_workers > 0 else 1
        self.is_single_worker = self.num_workers == 1
        self.working_dir = working_dir or '.'
        self.pidantic_dir = pidantic_dir or './pid_dir'
        self.workers = []

        self._configure_workers()
Ejemplo n.º 40
0
 def shutdown(self):
     """
     Shutdown function prior to process exit.
     """
     log.info('Driver process shutting down.')
     self.driver_module = None
     self.driver_class = None
     self.driver = None
Ejemplo n.º 41
0
 def start(self):
     self._do_stop = False
     self._g = spawn(self._run, self.name)
     log.info(
         'Brick writer worker \'%s\' started: req_port=%s, resp_port=%s',
         self.name, 'tcp://localhost:{0}'.format(self.req_port),
         'tcp://localhost:{0}'.format(self.resp_port))
     return self._g
Ejemplo n.º 42
0
def cov_get_by_integer():
    cov = oneparamcov()
    dat = cov._range_value.time
    for s in range(len(dat)):
        log.info(s)
        log.info('\t%s', dat[s])

    return cov
 def ret_fn(obj_id):
     log.info("Dynamically finding subject %s <- %s <- %s", iobj,
              ipred, isubj)
     ret = self.find_subject(subject_type=isubj,
                             predicate=ipred,
                             object=obj_id,
                             id_only=False)
     return ret
 def ret_fn(subj_id):
     log.info("Dynamically finding object_id %s -> %s -> %s", isubj,
              ipred, iobj)
     ret = self.find_object(subject=subj_id,
                            predicate=ipred,
                            object_type=iobj,
                            id_only=True)
     return ret
Ejemplo n.º 45
0
 def __init__(self, allowed, max_events=None, publish_interval=None):
     self._allowed = allowed
     self._deque = deque()
     self._max_events = max_events if max_events else self.DEFAULT_MAX_EVENTS
     self._publish_interval = publish_interval if publish_interval else self.DEFAULT_PUBLISH_INTERVAL
     self._running = False
     self._headers = {}
     log.info('Publisher: max_events: %d publish_interval: %d', self._max_events, self._publish_interval)
 def add_device_model(device_id, model_id):
     if device_id in device_models:
         log.info("Device '%s' was already collected in deployment '%s'", device_id, deployment_id)
         if model_id != device_models[device_id]:
             log.warn("Device '%s' being assigned a different model.  old='%s', new='%s'",
                      device_id, device_models[device_id], model_id)
     device_models[device_id] = model_id
     self._model_lookup[device_id] = model_id
 def add_site_models(site_id, model_ids):
     if site_id in site_models:
         log.info("Site '%s' was already collected in deployment '%s'", site_id, deployment_id)
         if model_ids != site_models[site_id]:
             log.warn("Device '%s' being assigned a different model.  old=%s, new=%s",
                      site_id, site_models[site_id], model_ids)
     site_models[site_id] = model_ids
     self._model_lookup[site_id] = model_ids
 def ret_fn(obj):
     log.info("Dynamically finding subject_ids %s <- %s <- %s",
              iobj, ipred, isubj)
     obj_id, _ = self._extract_id_and_type(obj)
     ret, _ = self.RR.find_subjects(subject_type=isubj,
                                    predicate=ipred,
                                    object=obj_id,
                                    id_only=True)
     return ret
Ejemplo n.º 49
0
    def _prepare_using_csp(self):
        """
        use the previously collected resoures in a CSP problem
        """
        site_tree = self.resource_collector.collected_site_tree()
        device_tree = self.resource_collector.collected_device_tree()
        device_models = self.resource_collector.collected_models_by_device()
        site_models = self.resource_collector.collected_models_by_site()

        log.debug("Collected %s device models, %s site models",
                  len(device_models), len(site_models))

        # csp solver can't handle multiple platforms, because it doesn't understand hierarchy.
        #             (parent-platformsite---hasmodel-a, child-platformsite---hasmodel-b)
        # would match (parent-platformdevice-hasmodel-b, child-platformdevice-hasmodel-a)
        #
        # we can avoid this by simply restricting the deployment to 1 platform device/site in this case

        #        n_pdev = sum(RT.PlatformDevice == self.resource_collector.get_resource_type(d) for d in device_models.keys())
        #        if 1 < n_pdev:
        #            raise BadRequest("Deployment activation without port_assignment is limited to 1 PlatformDevice, got %s" % n_pdev)
        #
        #        n_psite = sum(RT.PlatformSite == self.resource_collector.get_resource_type(d) for d in site_models.keys())
        #        if 1 < n_psite:
        #            raise BadRequest("Deployment activation without port_assignment is limited to 1 PlatformSite, got %s" % n_psite)

        solutions = self._get_deployment_csp_solutions(device_tree, site_tree,
                                                       device_models,
                                                       site_models)

        if 1 > len(solutions):
            raise BadRequest(
                "The set of devices could not be mapped to the set of sites, based on matching "
                + "models")  # and streamdefs")

        if 1 == len(solutions):
            log.info(
                "Found one possible way to map devices and sites.  Best case scenario!"
            )
        else:
            log.info("Found %d possible ways to map device and site",
                     len(solutions))
            log.trace("Here is the %s of all of them:",
                      type(solutions).__name__)
            for i, s in enumerate(solutions):
                log.trace("Option %d: %s", i + 1,
                          self._csp_solution_to_string(s))
            uhoh = (
                "The set of devices could be mapped to the set of sites in %s ways based only "
                + "on matching models, and no port assignments were specified."
            ) % len(solutions)
            #raise BadRequest(uhoh)
            log.warn(uhoh + "  PICKING THE FIRST AVAILABLE OPTION.")

        # return list of site_id, device_id
        return [(solutions[0][mk_csp_var(device_id)], device_id)
                for device_id in device_models.keys()]
Ejemplo n.º 50
0
    def _publish(self, events, headers=None):
        # TODO: add headers to message
        now = time.time()
        self.channel.basic_publish(
            '', self.queue, self.jsonify(events),
            pika.BasicProperties(content_type='text/plain', delivery_mode=2))

        log.info('Published %d messages to RABBIT in %.2f secs', len(events),
                 time.time() - now)
Ejemplo n.º 51
0
 def _publish(self, events, headers):
     for e in events:
         try:
             json.dumps(e)
         except (ValueError, UnicodeDecodeError) as err:
             log.exception('Unable to publish event: %r %r', e, err)
     count = len(events)
     self.total += count
     log.info('Publish %d events (%d total)', count, self.total)
Ejemplo n.º 52
0
 def close(self):
     """
     Close any connections required for this datastore.
     """
     log.info("Closing connection to CouchDB")
     map(lambda x: map(lambda y: y.close(), x),
         self.server.resource.session.conns.values())
     self.server.resource.session.conns = {
     }  # just in case we try to reuse this, for some reason
 def ret_fn(subj):
     log.info("Dynamically finding objects %s -> %s -> %s", isubj,
              ipred, iobj)
     subj_id, _ = self._extract_id_and_type(subj)
     ret, _ = self.RR.find_objects(subject=subj_id,
                                   predicate=ipred,
                                   object_type=iobj,
                                   id_only=False)
     return ret
Ejemplo n.º 54
0
    def __init__(self,
                 datastore_name=None,
                 host=None,
                 port=None,
                 username=None,
                 password=None,
                 config=None,
                 newlog=None,
                 scope=None,
                 **kwargs):
        """
        @param datastore_name  Name of datastore within server. Should be scoped by caller with sysname
        @param config  A standard config dict with connection params
        @param scope  Identifier to prefix the datastore name (e.g. sysname)
        """
        global log
        if newlog:
            log = newlog

        # Connection
        self.host = host or get_safe(config,
                                     'server.couchdb.host') or 'localhost'
        self.port = port or get_safe(config, 'server.couchdb.port') or 5984
        self.username = username or get_safe(config, 'server.couchdb.username')
        self.password = password or get_safe(config, 'server.couchdb.password')
        if self.username and self.password:
            connection_str = "http://%s:%s@%s:%s" % (
                self.username, self.password, self.host, self.port)
            log.debug(
                "Using username:password authentication to connect to datastore"
            )
        else:
            connection_str = "http://%s:%s" % (self.host, self.port)

        # TODO: Potential security risk to emit password into log.
        log.info('Connecting to CouchDB server: %s' % connection_str)
        self.server = couchdb.Server(connection_str)

        self._datastore_cache = {}

        # Datastore (couch database) handling. Scope with given scope (sysname) and make all lowercase
        self.scope = scope
        if self.scope:
            self.datastore_name = ("%s_%s" % (self.scope, datastore_name)
                                   ).lower() if datastore_name else None
        else:
            self.datastore_name = datastore_name.lower(
            ) if datastore_name else None

        # Just to test existence of the datastore
        if self.datastore_name:
            try:
                ds, _ = self._get_datastore()
            except NotFound:
                self.create_datastore()
                ds, _ = self._get_datastore()
Ejemplo n.º 55
0
 def _spawn(self, spawnargs):
     """
     Launch a process using popen
     @param spawnargs a list of arguments for the Popen command line.  
                      The first argument must be a path to a
                      program and arguments much be in additional list elements.
     @return subprocess.Popen object
     """
     log.info('spawnargs: %s', spawnargs)
     return subprocess.Popen(spawnargs, env=os.environ, close_fds=True)
Ejemplo n.º 56
0
 def add_site_models(site_id, model_ids):
     if site_id in site_models:
         log.info("Site '%s' was already collected in deployment '%s'",
                  site_id, deployment_id)
         if model_ids != site_models[site_id]:
             log.warn(
                 "Device '%s' being assigned a different model.  old=%s, new=%s",
                 site_id, site_models[site_id], model_ids)
     site_models[site_id] = model_ids
     self._model_lookup[site_id] = model_ids
Ejemplo n.º 57
0
 def add_device_model(device_id, model_id):
     if device_id in device_models:
         log.info(
             "Device '%s' was already collected in deployment '%s'",
             device_id, deployment_id)
         if model_id != device_models[device_id]:
             log.warn(
                 "Device '%s' being assigned a different model.  old='%s', new='%s'",
                 device_id, device_models[device_id], model_id)
     device_models[device_id] = model_id
     self._model_lookup[device_id] = model_id