Example #1
0
    def execute_method_with_resource(self, resource_id, method_name, **kwargs):

        try:
            args = [resource_id]
            return execute_method(self, method_name, *args, **kwargs)

        except Unauthorized:
            # No need to do anything if the user was unauthorized. This is NOT an error, just means the user does not have the proper rights.
            pass

        except Exception, e:
            log.error("Error executing method %s for resource id %s: %s" % (method_name, resource_id, str(e)))
    def launch(self):
        """
        @brief Launch the driver process and driver client.  This is used in the
        integration and qualification tests.  The port agent abstracts the physical
        interface with the instrument.
        @retval return the pid to the logger process
        """
        log.info("Startup Port Agent")
        # Create port agent object.
        this_pid = os.getpid() if self._test_mode else None

        log.debug( " -- our pid: %s" % this_pid)
        log.debug( " -- address: %s, port: %s" % (self._device_addr, self._device_port))

        # Working dir and delim are hard coded here because this launch process
        # will change with the new port agent.
        self.port_agent = EthernetDeviceLogger.launch_process(
            self._device_addr,
            self._device_port,
            self._working_dir,
            self._delimiter,
            this_pid)


        log.debug( " Port agent object created" )

        start_time = time.time()
        expire_time = start_time + int(self._timeout)
        pid = self.port_agent.get_pid()
        while not pid:
            gevent.sleep(.1)
            pid = self.port_agent.get_pid()
            if time.time() > expire_time:
                log.error("!!!! Failed to start Port Agent !!!!")
                raise PortAgentTimeout('port agent could not be started')
        self._pid = pid

        port = self.port_agent.get_port()

        start_time = time.time()
        expire_time = start_time + int(self._timeout)
        while not port:
            gevent.sleep(.1)
            port = self.port_agent.get_port()
            if time.time() > expire_time:
                log.error("!!!! Port Agent could not bind to port !!!!")
                self.stop()
                raise PortAgentTimeout('port agent could not bind to port')
        self._data_port = port

        log.info('Started port agent pid %s listening at port %s' % (pid, port))
        return port
    def launch(self):
        """
        @brief Launch the driver process and driver client.  This is used in the
        integration and qualification tests.  The port agent abstracts the physical
        interface with the instrument.
        @retval return the pid to the logger process
        """
        log.info("Startup Port Agent")
        # Create port agent object.
        this_pid = os.getpid() if self._test_mode else None

        log.debug(" -- our pid: %s" % this_pid)
        log.debug(" -- address: %s, port: %s" %
                  (self._device_addr, self._device_port))

        # Working dir and delim are hard coded here because this launch process
        # will change with the new port agent.
        self.port_agent = EthernetDeviceLogger.launch_process(
            self._device_addr, self._device_port, self._working_dir,
            self._delimiter, this_pid)

        log.debug(" Port agent object created")

        start_time = time.time()
        expire_time = start_time + int(self._timeout)
        pid = self.port_agent.get_pid()
        while not pid:
            gevent.sleep(.1)
            pid = self.port_agent.get_pid()
            if time.time() > expire_time:
                log.error("!!!! Failed to start Port Agent !!!!")
                raise PortAgentTimeout('port agent could not be started')
        self._pid = pid

        port = self.port_agent.get_port()

        start_time = time.time()
        expire_time = start_time + int(self._timeout)
        while not port:
            gevent.sleep(.1)
            port = self.port_agent.get_port()
            if time.time() > expire_time:
                log.error("!!!! Port Agent could not bind to port !!!!")
                self.stop()
                raise PortAgentTimeout('port agent could not bind to port')
        self._data_port = port

        log.info('Started port agent pid %s listening at port %s' %
                 (pid, port))
        return port
 def get_client(self):
     """
     Get a python client for the driver process.
     @return an client object for the driver process
     """
     # Start client messaging and verify messaging.
     if not self._driver_client:
         try:
             driver_client = ZmqDriverClient('localhost', self._command_port, self._event_port)
             self._driver_client = driver_client
         except Exception, e:
             self.stop()
             log.error('Error starting driver client: %s', e)
             raise DriverLaunchException('Error starting driver client.')
Example #5
0
 def get_client(self):
     """
     Get a python client for the driver process.
     @return an client object for the driver process
     """
     # Start client messaging and verify messaging.
     if not self._driver_client:
         try:
             driver_client = ZmqDriverClient('localhost',
                                             self._command_port,
                                             self._event_port)
             self._driver_client = driver_client
         except Exception, e:
             self.stop()
             log.error('Error starting driver client: %s', e)
             raise DriverLaunchException('Error starting driver client.')
    def run_command(self, command_line):
        log.debug("run command: " + str(command_line))
        process = subprocess.Popen(command_line, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
        gevent.sleep(1)

        process.poll()

        # We have failed!
        if process.returncode and process.pid:
            output, error_message = process.communicate()
            log.error("Failed to run command: STDERR: %s", error_message)
            raise PortAgentLaunchException("failed to launch port agent")

        log.debug("command successful. pid: %d", process.pid)

        return process.pid
    def run_command(self, command_line):
        log.debug("run command: " + str(command_line))
        process = subprocess.Popen(command_line,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   close_fds=True)
        gevent.sleep(1)

        process.poll()

        # We have failed!
        if (process.returncode and process.pid):
            output, error_message = process.communicate()
            log.error("Failed to run command: STDERR: %s" % (error_message))
            raise PortAgentLaunchException("failed to launch port agent")

        log.debug("command successful.  pid: %d" % (process.pid))

        return process.pid
    def launch(self):
        """
        Launch the driver process. Once the process is launched read the two status files that contain the event port
        and command port for the driver.
        @raises DriverLaunchException
        """
        log.info("Launch driver process")

        cmd = self._process_command()
        self._driver_process = self._spawn(cmd)

        if not self._driver_process and not self.poll():
            log.error("Failed to launch driver: %s", cmd)
            raise DriverLaunchException('Error starting driver process')

        log.debug("driver process started, pid: %s", self.getpid())

        self._command_port = self._get_port_from_file(self._driver_command_port_file())
        self._event_port = self._get_port_from_file(self._driver_event_port_file())

        log.debug("-- command port: %s, event port: %s", self._command_port, self._event_port)
    def stop(self):
        log.info('Stop port agent')
        # When calling stop, IMS grabs a new port agent process via PortAgentProcess.get_process
        # So self._pid is None and needs to be initialized
        pid_file = PID_FILE % (PROCESS_BASE_DIR, self._command_port)
        try:
            with open(pid_file, 'r') as f:
                pid = f.read().strip('\0\n\4')
                if pid:
                    try:
                        self._pid = int(pid)
                    except ValueError:
                        pass
        except IOError:
            log.exception('Port agent pid file not found!')

        command_line = [self._binary_path]

        command_line.append("-c")
        command_line.append(self._tmp_config.name)

        command_line.append("-k")

        command_line.append("-p")
        command_line.append("%s" % (self._command_port))

        self.run_command(command_line)
        timeout = Timeout(5)
        timeout.start()
        try:
            while self.poll():
                log.warn('WAITING HERE with pid %s' % self._pid)
                gevent.sleep(1)
        except Timeout, t:
            log.error(
                'Timed out waiting for pagent to die.  Going in for kill.')
            os.kill(self._pid, signal.SIGKILL)
Example #10
0
    def launch(self):
        """
        Launch the driver process. Once the process is launched read the two status files that contain the event port
        and command port for the driver.
        @raises DriverLaunchException
        """
        log.info("Launch driver process")

        cmd = self._process_command()
        self._driver_process = self._spawn(cmd)

        if not self._driver_process and not self.poll():
            log.error("Failed to launch driver: %s", cmd)
            raise DriverLaunchException('Error starting driver process')

        log.debug("driver process started, pid: %s", self.getpid())

        self._command_port = self._get_port_from_file(
            self._driver_command_port_file())
        self._event_port = self._get_port_from_file(
            self._driver_event_port_file())

        log.debug("-- command port: %s, event port: %s", self._command_port,
                  self._event_port)
    def stop(self):
        log.info('Stop port agent')
        # When calling stop, IMS grabs a new port agent process via PortAgentProcess.get_process
        # So self._pid is None and needs to be initialized
        pid_file = PID_FILE % (PROCESS_BASE_DIR, self._command_port)
        try:
            with open(pid_file, 'r') as f:
                pid = f.read().strip('\0\n\4')
                if pid:
                    try:
                        self._pid = int(pid)
                    except ValueError:
                        pass
        except IOError:
            log.exception('Port agent pid file not found!')

        command_line = [ self._binary_path ]

        command_line.append("-c")
        command_line.append(self._tmp_config.name);

        command_line.append("-k")
        
        command_line.append("-p")
        command_line.append("%s" % (self._command_port));
        
        self.run_command(command_line);
        timeout = Timeout(5)
        timeout.start()
        try:
            while self.poll():
                log.warn('WAITING HERE with pid %s' % self._pid)
                gevent.sleep(1)
        except Timeout, t:
            log.error('Timed out waiting for pagent to die.  Going in for kill.')
            os.kill(self._pid, signal.SIGKILL)
Example #12
0
    def stop(self, force=False):
        """
        Stop the driver process.  We try to stop gracefully using the driver client if we can, otherwise a simple kill
        does the job.
        """
        if self._driver_process:

            if not force and self._driver_client:
                try:
                    log.info('Stopping driver process.')
                    self._driver_client.done()
                    self._driver_process.wait()
                    log.info('Driver process stopped.')
                except:
                    try:
                        log.error(
                            'Exception stopping driver process...killing.')
                        self._driver_client.stop_messaging()
                        self._driver_process.poll()
                        if not self._driver_process.returncode:
                            self._driver_process.kill()
                            self._driver_process.wait()
                            log.info('Driver process killed.')
                    except Exception as ex:
                        log.error('Exception killing driver process')
                        log.error(type(ex))
                        log.error(ex)

            else:
                try:
                    log.info('Killing driver process.')
                    self._driver_client.stop_messaging()
                    self._driver_process.poll()
                    if not self._driver_process.returncode:
                        self._driver_process.kill()
                        self._driver_process.wait()
                        log.info('Driver process killed.')
                except Exception as ex:
                    log.error('Exception killing driver process.')
                    log.error(type(ex))
                    log.error(ex)

        self._driver_process = None
        self._driver_client = None
    def __init__(self,
                 config,
                 stream_handle,
                 exception_callback):

        # no sieve function since we are not using the chunker here
        super(CgDclEngDclParser, self).__init__(config,
                                                stream_handle,
                                                exception_callback=None)

        try:
            particle_classes_dict = config[DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT]
            self._msg_counts_particle_class = particle_classes_dict[
                ParticleClassTypes.MSG_COUNTS_PARTICLE_CLASS]
            self._cpu_uptime_particle_class = particle_classes_dict[
                ParticleClassTypes.CPU_UPTIME_PARTICLE_CLASS]
            self._error_particle_class = particle_classes_dict[
                ParticleClassTypes.ERROR_PARTICLE_CLASS]
            self._gps_particle_class = particle_classes_dict[
                ParticleClassTypes.GPS_PARTICLE_CLASS]
            self._pps_particle_class = particle_classes_dict[
                ParticleClassTypes.PPS_PARTICLE_CLASS]
            self._superv_particle_class = particle_classes_dict[
                ParticleClassTypes.SUPERV_PARTICLE_CLASS]
            self._dlog_mgr_particle_class = particle_classes_dict[
                ParticleClassTypes.DLOG_MGR_PARTICLE_CLASS]
            self._dlog_status_particle_class = particle_classes_dict[
                ParticleClassTypes.DLOG_STATUS_PARTICLE_CLASS]
            self._status_particle_class = particle_classes_dict[
                ParticleClassTypes.STATUS_PARTICLE_CLASS]
            self._dlog_aarm_particle_class = particle_classes_dict[
                ParticleClassTypes.DLOG_AARM_PARTICLE_CLASS]

            self._particle_classes = {
                ParticleClassTypes.MSG_COUNTS_PARTICLE_CLASS: self._msg_counts_particle_class,
                ParticleClassTypes.CPU_UPTIME_PARTICLE_CLASS: self._cpu_uptime_particle_class,
                ParticleClassTypes.ERROR_PARTICLE_CLASS: self._error_particle_class,
                ParticleClassTypes.GPS_PARTICLE_CLASS: self._gps_particle_class,
                ParticleClassTypes.PPS_PARTICLE_CLASS: self._pps_particle_class,
                ParticleClassTypes.SUPERV_PARTICLE_CLASS: self._superv_particle_class,
                ParticleClassTypes.DLOG_MGR_PARTICLE_CLASS: self._dlog_mgr_particle_class,
                ParticleClassTypes.DLOG_STATUS_PARTICLE_CLASS: self._dlog_status_particle_class,
                ParticleClassTypes.DLOG_AARM_PARTICLE_CLASS: self._dlog_aarm_particle_class,
                ParticleClassTypes.STATUS_PARTICLE_CLASS: self._status_particle_class,
            }
            self._particle_regex = {
                ParticleClassTypes.MSG_COUNTS_PARTICLE_CLASS: MSG_COUNTS_REGEX,
                ParticleClassTypes.CPU_UPTIME_PARTICLE_CLASS: CPU_UPTIME_REGEX,
                ParticleClassTypes.ERROR_PARTICLE_CLASS: ERROR_REGEX,
                ParticleClassTypes.GPS_PARTICLE_CLASS: GPS_REGEX,
                ParticleClassTypes.PPS_PARTICLE_CLASS: PPS_REGEX,
                ParticleClassTypes.SUPERV_PARTICLE_CLASS: SUPERV_REGEX,
                ParticleClassTypes.DLOG_MGR_PARTICLE_CLASS: DLOG_MGR_REGEX,
                ParticleClassTypes.DLOG_STATUS_PARTICLE_CLASS: DLOG_STATUS_REGEX,
                ParticleClassTypes.DLOG_AARM_PARTICLE_CLASS: DLOG_AARM_REGEX,
                ParticleClassTypes.STATUS_PARTICLE_CLASS: D_STATUS_NTP_REGEX,
            }

        except (KeyError, AttributeError):
            message = "Invalid cg_dcl_eng_dcl configuration parameters."
            log.error("Error: %s", message)
            raise ConfigurationException(message)
    def parse_file(self):
        """
        This method will parse a cg_Dcl_eng_Dcl input file and collect the
        particles.
        """
        for line in self._stream_handle:
            particle_class = None

            log.trace("Line: %s", line)
            fields = line.split()
            if len(fields) < 6:
                continue

            l1, l2, l3, l4 = fields[2:6]

            # Identify the particle type
            if l1 == 'MSG':
                if l2 == 'D_STATUS':
                    if l3 == 'STATUS:':  # e.g. 2013/12/20 00:05:39.802 MSG D_STATUS STATUS: ...
                        particle_class = ParticleClassTypes.MSG_COUNTS_PARTICLE_CLASS
                    elif l3 == 'CPU':  # 2013/12/20 00:10:40.248 MSG D_STATUS CPU ...
                        particle_class = ParticleClassTypes.CPU_UPTIME_PARTICLE_CLASS
                elif l2 == 'D_CTL':
                    continue  # ignore
            elif l1 in ['ERR', 'ALM', 'WNG']:  # 2013/12/20 01:33:45.515 ALM ...
                particle_class = ParticleClassTypes.ERROR_PARTICLE_CLASS
            elif l1 == 'DAT':
                if l2 == 'D_GPS':  # 2013/12/20 01:30:45.503 DAT D_GPS ...
                    particle_class = ParticleClassTypes.GPS_PARTICLE_CLASS
                elif l2 == 'D_PPS':  # 2013/12/20 01:30:45.504 DAT D_PPS D_PPS: ...
                    particle_class = ParticleClassTypes.PPS_PARTICLE_CLASS
                elif l2 == 'SUPERV':  # 2013/12/20 01:20:44.908 DAT SUPERV ...
                    particle_class = ParticleClassTypes.SUPERV_PARTICLE_CLASS
                elif l2 == 'DLOG_MGR':  # 2013/12/20 18:57:10.822 DAT DLOG_MGR ...
                    particle_class = ParticleClassTypes.DLOG_MGR_PARTICLE_CLASS
                elif 'DLOGP' in l2:
                    if l3 == 'istatus:':  # 2014/09/15 00:54:26.910 DAT DLOGP5 istatus: ...
                        particle_class = ParticleClassTypes.DLOG_STATUS_PARTICLE_CLASS
                    elif l4 == 'CB_AARM':  # 2014/09/15 22:22:50.917 DAT DLOGP1 3DM CB_AARM ...
                        particle_class = ParticleClassTypes.DLOG_AARM_PARTICLE_CLASS
                elif l3 == 'NTP:':  # 2014/09/15 00:04:20.260 DAT D_STATUS NTP: ...
                    particle_class = ParticleClassTypes.STATUS_PARTICLE_CLASS

            # Extract the particle
            if particle_class:
                regex_match = re.match(self._particle_regex[particle_class], line)
                if not regex_match:
                    log.error('failed to match expected particle regex: %s not in %s',
                              self._particle_regex[particle_class], line)
                    continue
                gdict = regex_match.groupdict()

                try:
                    sample = self._extract_sample(self._particle_classes[particle_class], None, gdict)
                    if sample:
                        self._record_buffer.append(sample)
                    else:
                        log.error('failed to extract sample from line: %r', line)
                except Exception as e:
                    log.exception('exception (%r) extracting sample from line: %r', e, line)

            else:
                log.debug("Non-match .. ignoring line: %r", line)

        # Set an indication that the file was fully parsed
        self._file_parsed = True
Example #15
0
        while (start_time + DEFAULT_TIMEOUT > time.time()):
            try:
                file = open(pid_file)
                pid = file.read().strip('\0\n\r')
                if (pid):
                    int(pid)
                    log.info("port agent pid: [%s]" % (pid))
                    return int(pid)
            except ValueError, e:
                log.warn("Failed to convert %s to an int '%s" % (pid, e))
                break
            except:
                log.warn("Failed to open pid file: %s" % (pid_file))
                gevent.sleep(1)

        log.error("port agent startup failed")

        return None

    def _read_config(self):
        self._tmp_config.seek(0)
        return "".join(self._tmp_config.readlines())

    def stop(self):
        log.info('Stop port agent')
        # When calling stop, IMS grabs a new port agent process via PortAgentProcess.get_process
        # So self._pid is None and needs to be initialized
        pid_file = PID_FILE % (PROCESS_BASE_DIR, self._command_port)
        try:
            with open(pid_file, 'r') as f:
                pid = f.read().strip('\0\n\4')
    def stop(self, force=False):
        """
        Stop the driver process.  We try to stop gracefully using the driver client if we can, otherwise a simple kill
        does the job.
        """
        if self._driver_process:

            if not force and self._driver_client:
                try:
                    log.info('Stopping driver process.')
                    self._driver_client.done()
                    self._driver_process.wait()
                    log.info('Driver process stopped.')
                except:
                    try:
                        log.error('Exception stopping driver process...killing.')
                        self._driver_client.stop_messaging()
                        self._driver_process.poll()
                        if not self._driver_process.returncode:
                            self._driver_process.kill()
                            self._driver_process.wait()
                            log.info('Driver process killed.')
                    except Exception as ex:
                        log.error('Exception killing driver process')
                        log.error(type(ex))
                        log.error(ex)

            else:
                try:
                    log.info('Killing driver process.')
                    self._driver_client.stop_messaging()
                    self._driver_process.poll()
                    if not self._driver_process.returncode:
                        self._driver_process.kill()
                        self._driver_process.wait()
                        log.info('Driver process killed.')
                except Exception as ex:
                    log.error('Exception killing driver process.')
                    log.error(type(ex))
                    log.error(ex)

        self._driver_process = None
        self._driver_client = None
Example #17
0
    def set_object_field_values(self, obj, resource, ext_exclude, **kwargs):
        """
        Iterate through all fields of the given object and set values according
        to the field type and decorator definition in the object type schema.
        """

        # Step 1: Determine needs to fill fields with resource objects.
        field_needs = []  # Fields that need to be set in a subsequent step
        resource_needs = set()  # Resources to read by id based on needs
        assoc_needs = set()  # Compound associations to follow
        final_target_types = {}  # Keeps track of what resource type filter is desired

        for field in obj._schema:

            # Skip any fields that were specifically to be excluded
            if ext_exclude is not None and field in ext_exclude:
                continue

            # Iterate over all of the decorators for the field
            for decorator in obj._schema[field]["decorators"]:
                field_start_time = time.time()

                # Field gets value from method or service call (local to current executing process)
                if decorator == "Method":
                    deco_value = obj.get_decorator_value(field, decorator)
                    method_name = deco_value if deco_value else "get_" + field

                    ret_val = self.execute_method_with_resource(resource._id, method_name, **kwargs)
                    if ret_val is not None:
                        setattr(obj, field, ret_val)

                elif decorator == "ServiceRequest":
                    deco_value = obj.get_decorator_value(field, decorator)
                    if obj._schema[field]["type"] != "ServiceRequest":
                        log.error("The field %s is an incorrect type for a ServiceRequest decorator.", field)
                        continue

                    method_name = deco_value if deco_value else "get_" + field

                    if method_name.find(".") == -1:
                        raise Inconsistent(
                            "The field %s decorated as a ServiceRequest only supports remote operations.", field
                        )

                    service_client, operation = get_remote_info(self, method_name)
                    rmi_call = method_name.split(".")
                    parms = {"resource_id": resource._id}
                    parms.update(get_method_arguments(service_client, operation, **kwargs))
                    ret_val = IonObject(
                        OT.ServiceRequest,
                        service_name=rmi_call[0],
                        service_operation=operation,
                        request_parameters=parms,
                    )
                    setattr(obj, field, ret_val)

                # Fill field based on compound association chains. Results in nested lists of resource objects
                elif self.is_compound_association(decorator):
                    target_type = obj.get_decorator_value(field, decorator)
                    if (
                        target_type and "," in target_type
                    ):  # Can specify multiple type filters, only handles two levels for now
                        target_type, final_target_type = target_type.split(",")
                        final_target_types[field] = final_target_type  # Keep track for later

                    predicates = self.get_compound_association_predicates(decorator)
                    assoc_list = self._find_associated_resources(resource, predicates[0], target_type)
                    field_needs.append((field, "A", (assoc_list, predicates)))
                    for target_id, assoc in assoc_list:
                        assoc_needs.add((target_id, predicates[1]))

                # Fill field based on association with list of resource objects
                elif self.is_association_predicate(decorator):
                    target_type = obj.get_decorator_value(field, decorator)
                    if target_type and "," in target_type:  # Can specify list of target types
                        target_type = target_type.split(",")
                    assoc_list = self._find_associated_resources(resource, decorator, target_type)
                    if obj._schema[field]["type"] == "list":
                        if assoc_list:
                            field_needs.append((field, "L", assoc_list))
                            [resource_needs.add(target_id) for target_id, assoc in assoc_list]
                    elif obj._schema[field]["type"] == "int":
                        setattr(obj, field, len(assoc_list))
                    else:  # Can be nested object or None
                        if assoc_list:
                            first_assoc = assoc_list[0]
                            if len(assoc_list) != 1:
                                # WARNING: Swallow random further objects here!
                                log.warn(
                                    "Extended object field %s uses only 1 of %d associated resources",
                                    field,
                                    len(assoc_list),
                                )
                            field_needs.append((field, "O", first_assoc))
                            resource_needs.add(first_assoc[0])
                        else:
                            setattr(obj, field, None)
                else:
                    log.debug("Unknown decorator %s for field %s of resource %s", decorator, field, resource._id)

                field_stop_time = time.time()

                # log.debug("Time to process field %s(%s) %f secs", field, decorator, field_stop_time - field_start_time)

        # field_needs contains a list of what's needed to load in next step (different cases)
        if not field_needs:
            return

        # Step 2: Read second level of compound associations as needed
        # @TODO Can only do 2 level compounds for now. Make recursive someday
        if assoc_needs:
            assocs = self._rr.find_associations(anyside=list(assoc_needs), id_only=False)
            self._add_associations(assocs)

            # Determine resource ids to read for compound associations
            for field, need_type, needs in field_needs:
                if need_type == "A":
                    assoc_list, predicates = needs
                    for target_id, assoc in assoc_list:
                        res_type = assoc.ot if target_id == assoc.o else assoc.st
                        assoc_list1 = self._find_associated_resources(target_id, predicates[1], None, res_type)
                        for target_id1, assoc1 in assoc_list1:
                            resource_needs.add(target_id1)

        # Step 3: Read resource objects based on needs
        res_list = self._rr.read_mult(list(resource_needs))
        res_objs = dict(zip(resource_needs, res_list))

        # Step 4: Set fields to loaded resource objects based on type
        for field, need_type, needs in field_needs:
            if need_type == "L":  # case list
                obj_list = [res_objs[target_id] for target_id, assoc in needs]
                setattr(obj, field, obj_list)
            elif need_type == "O":  # case nested object
                target_id, assoc = needs
                setattr(obj, field, res_objs[target_id])
            elif need_type == "A":  # case compound
                assoc_list, predicates = needs
                obj_list = []
                for target_id, assoc in assoc_list:
                    res_type = assoc.ot if target_id == assoc.o else assoc.st
                    assoc_list1 = self._find_associated_resources(target_id, predicates[1], None, res_type)
                    obj_list.append([res_objs[target_id1] for target_id1, assoc1 in assoc_list1])

                # Filter the list to remove objects that might match the current resource type
                result_obj_list = []
                for ol_nested in obj_list:
                    if ol_nested:
                        # Only get the object types which don't match the current resource type and may match a final type
                        if final_target_types.has_key(field):
                            result_obj_list.extend(
                                [
                                    target_obj
                                    for target_obj in ol_nested
                                    if (
                                        target_obj.type_ != resource.type_
                                        and final_target_types[field] in target_obj._get_extends()
                                    )
                                ]
                            )
                        else:
                            result_obj_list.extend(
                                [target_obj for target_obj in ol_nested if (target_obj.type_ != resource.type_)]
                            )

                if obj._schema[field]["type"] == "list":
                    if result_obj_list:
                        setattr(obj, field, result_obj_list)
                elif obj._schema[field]["type"] == "int":
                    setattr(obj, field, len(result_obj_list))
                else:
                    if result_obj_list:
                        if len(result_obj_list) != 1:
                            # WARNING: Swallow random further objects here!
                            log.warn(
                                "Extended object field %s uses only 1 of %d compound associated resources",
                                field,
                                len(result_obj_list),
                            )
                        setattr(obj, field, result_obj_list[0])
                    else:
                        setattr(obj, field, None)
        while(start_time + DEFAULT_TIMEOUT > time.time()):
            try:
                file = open(pid_file)
                pid = file.read().strip('\0\n\r')
                if(pid):
                    int(pid)
                    log.info("port agent pid: [%s]" % (pid))
                    return int(pid)
            except ValueError, e:
                log.warn("Failed to convert %s to an int '%s" % (pid, e) )
                break
            except:
                log.warn("Failed to open pid file: %s" % (pid_file))
                gevent.sleep(1);

        log.error("port agent startup failed");

        return None;

    def _read_config(self):
        self._tmp_config.seek(0);
        return "".join(self._tmp_config.readlines());
        

    def stop(self):
        log.info('Stop port agent')
        # When calling stop, IMS grabs a new port agent process via PortAgentProcess.get_process
        # So self._pid is None and needs to be initialized
        pid_file = PID_FILE % (PROCESS_BASE_DIR, self._command_port)
        try:
            with open(pid_file, 'r') as f:
Example #19
0
    def create_extended_resource_container(
        self,
        extended_resource_type,
        resource_id,
        computed_resource_type=None,
        ext_associations=None,
        ext_exclude=None,
        **kwargs
    ):
        """
        Returns an extended resource container for a given resource_id.
        """
        overall_start_time = time.time()
        self.ctx = None  # Clear the context in case this instance gets reused

        if not isinstance(resource_id, types.StringType):
            raise Inconsistent("The parameter resource_id is not a single resource id string")

        if not self.service_provider or not self._rr:
            raise Inconsistent("This class is not initialized properly")

        if extended_resource_type not in getextends(OT.ResourceContainer):
            raise BadRequest(
                "The requested resource %s is not extended from %s" % (extended_resource_type, OT.ResourceContainer)
            )

        if computed_resource_type and computed_resource_type not in getextends(OT.BaseComputedAttributes):
            raise BadRequest(
                "The requested resource %s is not extended from %s"
                % (computed_resource_type, OT.BaseComputedAttributes)
            )

        resource_object = self._rr.read(resource_id)

        if not resource_object:
            raise NotFound("The Resource %s does not exist" % resource_id)

        res_container = IonObject(extended_resource_type)

        # Check to make sure the extended resource decorator raise OriginResourceType matches the type of the resource type
        originResourceType = res_container.get_class_decorator_value("OriginResourceType")
        if originResourceType is None:
            log.error(
                "The requested extended resource %s does not contain an OriginResourceType decorator.",
                extended_resource_type,
            )

        elif originResourceType != resource_object.type_ and not issubtype(resource_object.type_, originResourceType):
            raise Inconsistent(
                "The OriginResourceType decorator of the requested resource %s(%s) does not match the type of the specified resource id(%s)."
                % (extended_resource_type, originResourceType, resource_object.type_)
            )

        res_container._id = resource_object._id
        res_container.resource = resource_object

        # Initialize context object field and load resource associations
        self._prepare_context(resource_object._id)

        # Fill lcstate related resource container fields
        self.set_container_lcstate_info(res_container)

        # Fill resource container info; currently only type_version
        self.set_res_container_info(res_container)

        # Fill resource container fields
        self.set_container_field_values(res_container, ext_exclude, **kwargs)

        # Fill computed attributes
        self.set_computed_attributes(res_container, computed_resource_type, ext_exclude, **kwargs)

        # Fill additional associations
        self.set_extended_associations(res_container, ext_associations, ext_exclude)

        res_container.ts_created = get_ion_ts()

        overall_stop_time = time.time()

        log.debug(
            "Time to process extended resource container %s %f secs",
            extended_resource_type,
            overall_stop_time - overall_start_time,
        )

        # log.info("ResourceContainer: %s" % res_container)

        return res_container