예제 #1
0
    def _transform(self, obj):
        # Note: This check to detect an IonObject is a bit risky (only type_)
        if isinstance(obj, dict) and "type_" in obj:
            objc  = obj
            otype = objc['type_'].encode('ascii')   # Correct?

            # don't supply a dict - we want the object to initialize with all its defaults intact,
            # which preserves things like IonEnumObject and invokes the setattr behavior we want there.
            ion_obj = self._obj_registry.new(otype)

            # get outdated attributes in data that are not defined in the current schema
            extra_attributes = objc.viewkeys() - ion_obj._schema.viewkeys() - BUILT_IN_ATTRS
            for extra in extra_attributes:
                objc.pop(extra)
                log.info('discard %s not in current schema' % extra)

            for k, v in objc.iteritems():

                # unicode translate to utf8
                if isinstance(v, unicode):
                    v = str(v.encode('utf8'))

                # CouchDB adds _attachments and puts metadata in it
                # in pyon metadata is in the document
                # so we discard _attachments while transforming between the two
                if k not in ("type_", "_attachments", "_conflicts"):
                    setattr(ion_obj, k, v)
                if k == "_conflicts":
                    log.warn("CouchDB conflict detected for ID=%S (ignored): %s", obj.get('_id', None), v)

            return ion_obj

        return obj
예제 #2
0
 def assertNoParticleRegression(self, filepath, data_handler):
     """
     Compares particles with previous run. If no YAML file exists, creates one.
     :param filepath:  fully qualified name of the input file (that was parsed)
     :param data_handler:  ParticleDataHandler returned from parse()
     :return:
     """
     yaml_file = os.path.splitext(filepath)[0] + '.yml'
     particles = data_handler._samples
     if os.path.isfile(yaml_file):
         with open(yaml_file, 'r') as stream:
             prev_particles = yaml.load(stream)
             # particle key names should match
             self.assertListEqual(sorted(prev_particles.keys()),
                                  sorted(particles.keys()))
             # compare number of samples across one of the particle keys
             for p in prev_particles.keys():
                 log.debug('%s: %d %d', p, len(prev_particles[p]),
                           len(particles[p]))
                 self.assertEqual(len(prev_particles[p]), len(particles[p]))
     else:
         with open(yaml_file, 'w') as stream:
             log.warn(
                 'creating yaml output file for regression testing - commit %s',
                 yaml_file)
             yaml.dump(particles, stream, default_flow_style=False)
예제 #3
0
 def getpid(self):
     """
     Get the pid of the current running process and ensure that it is running.
     @returns the pid of the driver process if it is running, otherwise None
     """
     if self._driver_process:
         if self.poll():
             return self._driver_process.pid
         else:
             log.warn("Driver process found, but poll failed for pid %s" % self._driver_process.pid)
     else:
         return None
예제 #4
0
 def getpid(self):
     """
     Get the pid of the current running process and ensure that it is running.
     @returns the pid of the driver process if it is running, otherwise None
     """
     if self._driver_process:
         if self.poll():
             return self._driver_process.pid
         else:
             log.warn("Driver process found, but poll failed for pid %s" %
                      self._driver_process.pid)
     else:
         return None
    def poll(self):
        """
        Check to see if the port agent process is alive.
        @return true if process is running, false otherwise
        """

        if not self._pid:
            return False

        try:
            os.kill(self._pid, 0)
        except OSError, e:
            log.warn("Could not send a signal to the driver, pid: %s" % self._pid)
            return False
예제 #6
0
    def __str__(self):
        ds = str(self.__dict__)
        try:
            # Remove the type_ from the dict str - cheaper this way than copying the dict
            typeidx = ds.find("'type_': '")
            if typeidx:
                endidx = ds.find("'", typeidx + 10)
                if ds[typeidx - 2] == ",":
                    typeidx -= 2
                ds = ds[:typeidx] + ds[endidx + 1:]
        except Exception as ex:
            log.warn("Could not create IonObject __str__ representation")

        # This is a more eye pleasing variant but does not eval
        return "%s(%s)" % (self.__class__.__name__, ds)
예제 #7
0
    def __str__(self):
        ds = str(self.__dict__)
        try:
            # Remove the type_ from the dict str - cheaper this way than copying the dict
            typeidx = ds.find("'type_': '")
            if typeidx:
                endidx = ds.find("'", typeidx+10)
                if ds[typeidx-2] == ",":
                    typeidx -= 2
                ds = ds[:typeidx] + ds[endidx+1:]
        except Exception as ex:
            log.warn("Could not create IonObject __str__ representation")

        # This is a more eye pleasing variant but does not eval
        return "%s(%s)" % (self.__class__.__name__, ds)
예제 #8
0
    def poll(self):
        """
        Check to see if the port agent process is alive.
        @return true if process is running, false otherwise
        """

        if not self._pid:
            return False

        try:
            os.kill(self._pid, 0)
        except OSError, e:
            log.warn("Could not send a signal to the driver, pid: %s" %
                     self._pid)
            return False
예제 #9
0
    def poll(self):
        """
        Check to see if the driver process is alive.
        @return true if driver process is running, false otherwise
        """

        # The Popen.poll() doesn't seem to be returning reliable results.  Sending a signal 0 to the process might be
        # more reliable.

        if not self._driver_process:
            return False

        try:
            os.kill(self._driver_process.pid, 0)
        except OSError, e:
            log.warn("Could not send a signal to the driver, pid: %s" % self._driver_process.pid)
            return False
예제 #10
0
    def poll(self):
        """
        Check to see if the driver process is alive.
        @return true if driver process is running, false otherwise
        """

        # The Popen.poll() doesn't seem to be returning reliable results.  Sending a signal 0 to the process might be
        # more reliable.

        if not self._driver_process:
            return False

        try:
            os.kill(self._driver_process.pid, 0)
        except OSError, e:
            log.warn("Could not send a signal to the driver, pid: %s" %
                     self._driver_process.pid)
            return False
예제 #11
0
    def _read_pid(self):
        pid_file = PID_FILE % (PROCESS_BASE_DIR, self._command_port)
        start_time = time.time()
        boo = 0

        log.debug("read pid file: " + pid_file)
        while (start_time + DEFAULT_TIMEOUT > time.time()):
            try:
                file = open(pid_file)
                pid = file.read().strip('\0\n\r')
                if (pid):
                    int(pid)
                    log.info("port agent pid: [%s]" % (pid))
                    return int(pid)
            except ValueError, e:
                log.warn("Failed to convert %s to an int '%s" % (pid, e))
                break
            except:
예제 #12
0
    def _read_pid(self):
        pid_file = PID_FILE % (PROCESS_BASE_DIR, self._command_port)
        start_time = time.time()
        boo = 0;

        log.debug("read pid file: " + pid_file)
        while(start_time + DEFAULT_TIMEOUT > time.time()):
            try:
                file = open(pid_file)
                pid = file.read().strip('\0\n\r')
                if(pid):
                    int(pid)
                    log.info("port agent pid: [%s]" % (pid))
                    return int(pid)
            except ValueError, e:
                log.warn("Failed to convert %s to an int '%s" % (pid, e) )
                break
            except:
예제 #13
0
def get_method_arguments(module, method_name, **kwargs):
    """
    Returns a dict of the allowable method parameters
    @param module:
    @param method_name:
    @param kwargs:
    @return:
    """
    param_dict = {}

    if hasattr(module,method_name):
        try:
            #This will fail running unit tests with mock objects - BOO!  
            method_args = inspect.getargspec(getattr(module,method_name))
            for arg in method_args[0]:
                if kwargs.has_key(arg):
                    param_dict[arg] = kwargs[arg]

        except Exception, e:
            #Log a warning and simply return an empty dict
            log.warn('Cannot determine the arguments for method: %s in module: %s: %s',module, method_name, e.message )
예제 #14
0
    def is_service_available(self, service_name, local_rr_only=False):

        try:
            service_resource = None
            #from pyon.core.bootstrap import container_instance
            from mi.core.bootstrap import container_instance
            from interface.objects import ServiceStateEnum
            # Use container direct RR connection if available, otherwise use messaging to the RR service
            if hasattr(container_instance, 'has_capability') and container_instance.has_capability('RESOURCE_REGISTRY'):
                service_resource, _ = container_instance.resource_registry.find_resources(restype='Service', name=service_name)
            elif not local_rr_only:
                from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
                rr_client = ResourceRegistryServiceClient(container_instance.node)
                service_resource, _ = rr_client.find_resources(restype='Service', name=service_name)
            else:
                log.warn("is_service_available(%s) - No RR connection" % service_name)

            # The service is available only of there is a single RR object for it and it is in one of these states:
            if service_resource and len(service_resource) > 1:
                log.warn("is_service_available(%s) - Found multiple service instances: %s", service_name, service_resource)

            # MM 2013-08-17: Added PENDING, because this means service will be there shortly
            if service_resource and service_resource[0].state in (ServiceStateEnum.READY, ServiceStateEnum.STEADY, ServiceStateEnum.PENDING):
                return True
            elif service_resource:
                log.warn("is_service_available(%s) - Service resource in invalid state", service_resource)

            return False

        except Exception as ex:
            return False
예제 #15
0
    def memory_usage(self):
        """
        Get the current memory usage for the current driver process.
        @returns memory usage in KB of the current driver process
        """
        driver_pid = self.getpid()
        if not driver_pid:
            log.warn("no process running")
            return 0

        ps_process = subprocess.Popen(["ps", "-o rss,pid", "-p %s" % self.getpid()], stdout=subprocess.PIPE)
        ps_process.poll()

        usage = 0
        for line in ps_process.stdout:
            if not line.strip().startswith('RSS'):
                try:
                    fields = line.split()
                    pid = int(fields[1])
                    if pid == driver_pid:
                        usage = int(fields[0])
                except:
                    log.warn("Failed to parse output for memory usage: %s" % line)
                    usage = 0

        if usage:
            log.info("process memory usage: %dk" % usage)
        else:
            log.warn("process not running")

        return usage
예제 #16
0
    def memory_usage(self):
        """
        Get the current memory usage for the current driver process.
        @returns memory usage in KB of the current driver process
        """
        driver_pid = self.getpid()
        if not driver_pid:
            log.warn("no process running")
            return 0

        ps_process = subprocess.Popen(
            ["ps", "-o rss,pid", "-p %s" % self.getpid()],
            stdout=subprocess.PIPE)
        retcode = ps_process.poll()

        usage = 0
        for line in ps_process.stdout:
            if not line.strip().startswith('RSS'):
                try:
                    fields = line.split()
                    pid = int(fields[1])
                    if pid == driver_pid:
                        usage = int(fields[0])
                except:
                    log.warn("Failed to parse output for memory usage: %s" %
                             line)
                    usage = 0

        if usage:
            log.info("process memory usage: %dk" % usage)
        else:
            log.warn("process not running")

        return usage
 def assertNoParticleRegression(self, filepath, data_handler):
     """
     Compares particles with previous run. If no YAML file exists, creates one.
     :param filepath:  fully qualified name of the input file (that was parsed)
     :param data_handler:  ParticleDataHandler returned from parse()
     :return:
     """
     yaml_file = os.path.splitext(filepath)[0] + '.yml'
     particles = data_handler._samples
     if os.path.isfile(yaml_file):
         with open(yaml_file, 'r') as stream:
             prev_particles = yaml.load(stream)
             # particle key names should match
             self.assertListEqual(sorted(prev_particles.keys()), sorted(particles.keys()))
             # compare number of samples across one of the particle keys
             for p in prev_particles.keys():
                 log.debug('%s: %d %d', p, len(prev_particles[p]), len(particles[p]))
                 self.assertEqual(len(prev_particles[p]), len(particles[p]))
     else:
         with open(yaml_file, 'w') as stream:
             log.warn('creating yaml output file for regression testing - commit %s', yaml_file)
             yaml.dump(particles, stream, default_flow_style=False)
예제 #18
0
def get_method_arguments(module, method_name, **kwargs):
    """
    Returns a dict of the allowable method parameters
    @param module:
    @param method_name:
    @param kwargs:
    @return:
    """
    param_dict = {}

    if hasattr(module, method_name):
        try:
            #This will fail running unit tests with mock objects - BOO!
            method_args = inspect.getargspec(getattr(module, method_name))
            for arg in method_args[0]:
                if kwargs.has_key(arg):
                    param_dict[arg] = kwargs[arg]

        except Exception, e:
            #Log a warning and simply return an empty dict
            log.warn(
                'Cannot determine the arguments for method: %s in module: %s: %s',
                module, method_name, e.message)
예제 #19
0
    def stop(self):
        log.info('Stop port agent')
        # When calling stop, IMS grabs a new port agent process via PortAgentProcess.get_process
        # So self._pid is None and needs to be initialized
        pid_file = PID_FILE % (PROCESS_BASE_DIR, self._command_port)
        try:
            with open(pid_file, 'r') as f:
                pid = f.read().strip('\0\n\4')
                if pid:
                    try:
                        self._pid = int(pid)
                    except ValueError:
                        pass
        except IOError:
            log.exception('Port agent pid file not found!')

        command_line = [self._binary_path]

        command_line.append("-c")
        command_line.append(self._tmp_config.name)

        command_line.append("-k")

        command_line.append("-p")
        command_line.append("%s" % (self._command_port))

        self.run_command(command_line)
        timeout = Timeout(5)
        timeout.start()
        try:
            while self.poll():
                log.warn('WAITING HERE with pid %s' % self._pid)
                gevent.sleep(1)
        except Timeout, t:
            log.error(
                'Timed out waiting for pagent to die.  Going in for kill.')
            os.kill(self._pid, signal.SIGKILL)
예제 #20
0
    def _transform(self, obj):
        # Note: This check to detect an IonObject is a bit risky (only type_)
        if isinstance(obj, dict) and "type_" in obj:
            objc = obj
            otype = objc['type_'].encode('ascii')  # Correct?

            # don't supply a dict - we want the object to initialize with all its defaults intact,
            # which preserves things like IonEnumObject and invokes the setattr behavior we want there.
            ion_obj = self._obj_registry.new(otype)

            # get outdated attributes in data that are not defined in the current schema
            extra_attributes = objc.viewkeys() - ion_obj._schema.viewkeys(
            ) - BUILT_IN_ATTRS
            for extra in extra_attributes:
                objc.pop(extra)
                log.info('discard %s not in current schema' % extra)

            for k, v in objc.iteritems():

                # unicode translate to utf8
                if isinstance(v, unicode):
                    v = str(v.encode('utf8'))

                # CouchDB adds _attachments and puts metadata in it
                # in pyon metadata is in the document
                # so we discard _attachments while transforming between the two
                if k not in ("type_", "_attachments", "_conflicts"):
                    setattr(ion_obj, k, v)
                if k == "_conflicts":
                    log.warn(
                        "CouchDB conflict detected for ID=%S (ignored): %s",
                        obj.get('_id', None), v)

            return ion_obj

        return obj
예제 #21
0
    def stop(self):
        log.info('Stop port agent')
        # When calling stop, IMS grabs a new port agent process via PortAgentProcess.get_process
        # So self._pid is None and needs to be initialized
        pid_file = PID_FILE % (PROCESS_BASE_DIR, self._command_port)
        try:
            with open(pid_file, 'r') as f:
                pid = f.read().strip('\0\n\4')
                if pid:
                    try:
                        self._pid = int(pid)
                    except ValueError:
                        pass
        except IOError:
            log.exception('Port agent pid file not found!')

        command_line = [ self._binary_path ]

        command_line.append("-c")
        command_line.append(self._tmp_config.name);

        command_line.append("-k")
        
        command_line.append("-p")
        command_line.append("%s" % (self._command_port));
        
        self.run_command(command_line);
        timeout = Timeout(5)
        timeout.start()
        try:
            while self.poll():
                log.warn('WAITING HERE with pid %s' % self._pid)
                gevent.sleep(1)
        except Timeout, t:
            log.error('Timed out waiting for pagent to die.  Going in for kill.')
            os.kill(self._pid, signal.SIGKILL)
예제 #22
0
    def is_service_available(self, service_name, local_rr_only=False):

        try:
            service_resource = None
            #from pyon.core.bootstrap import container_instance
            from mi.core.bootstrap import container_instance
            from interface.objects import ServiceStateEnum
            # Use container direct RR connection if available, otherwise use messaging to the RR service
            if hasattr(container_instance,
                       'has_capability') and container_instance.has_capability(
                           'RESOURCE_REGISTRY'):
                service_resource, _ = container_instance.resource_registry.find_resources(
                    restype='Service', name=service_name)
            elif not local_rr_only:
                from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
                rr_client = ResourceRegistryServiceClient(
                    container_instance.node)
                service_resource, _ = rr_client.find_resources(
                    restype='Service', name=service_name)
            else:
                log.warn("is_service_available(%s) - No RR connection" %
                         service_name)

            # The service is available only of there is a single RR object for it and it is in one of these states:
            if service_resource and len(service_resource) > 1:
                log.warn(
                    "is_service_available(%s) - Found multiple service instances: %s",
                    service_name, service_resource)

            # MM 2013-08-17: Added PENDING, because this means service will be there shortly
            if service_resource and service_resource[0].state in (
                    ServiceStateEnum.READY, ServiceStateEnum.STEADY,
                    ServiceStateEnum.PENDING):
                return True
            elif service_resource:
                log.warn(
                    "is_service_available(%s) - Service resource in invalid state",
                    service_resource)

            return False

        except Exception as ex:
            return False
예제 #23
0
    def _validate(self):
        """
        Compare fields to the schema and raise AttributeError if mismatched.
        Named _validate instead of validate because the data may have a field named "validate".
        """
        fields, schema = self.__dict__, self._schema

        # Check for extra fields not defined in the schema
        extra_fields = fields.viewkeys() - schema.viewkeys() - BUILT_IN_ATTRS
        if len(extra_fields) > 0:
            raise AttributeError('Fields found that are not in the schema: %r' % (list(extra_fields)))

        required_decorator = 'Required'
        content_type_decorator = 'ContentType'
        content_count_decorator = 'ContentCount'
        value_range_decorator = 'ValueRange'
        value_pattern_decorator = 'ValuePattern'

        # Check required field criteria met
        for key in schema:
            if 'decorators' in schema[key] and required_decorator in schema[key]['decorators']:
                if not key in fields or fields[key] is None:
                    raise AttributeError('Required value "%s" not set' % key)

        # Check each attribute
        for key in fields.iterkeys():
            if key in BUILT_IN_ATTRS:
                continue

            schema_val = schema[key]

            # Correct any float or long types that got downgraded to int
            if isinstance(fields[key], int):
                if schema_val['type'] == 'float':
                    fields[key] = float(fields[key])
                elif schema_val['type'] == 'long':
                    fields[key] = long(fields[key])

            # argh, annoying work around for OrderedDict vs dict issue
            if type(fields[key]) == dict and schema_val['type'] == 'OrderedDict':
                fields[key] = OrderedDict(fields[key])

            # Basic type checking
            field_val = fields[key]

            if type(field_val).__name__ != schema_val['type']:

                # if the schema doesn't define a type, we can't very well validate it
                if schema_val['type'] == 'NoneType':
                    continue

                # Allow unicode instead of str. This may be too lenient.
                if schema_val['type'] == 'str' and type(field_val).__name__ == 'unicode':
                    continue

                # Already checked for required above.  Assume optional and continue
                if field_val is None:
                    continue

                # Allow unicode instead of str. This may be too lenient.
                if schema_val['type'] == 'str' and type(field_val).__name__ == 'unicode':
                    continue

                # IonObjects are ok for dict fields too!
                if isinstance(field_val, IonObjectBase) and schema_val['type'] == 'OrderedDict':
                    continue

                if not key in fields or fields[key] is None:
                    raise AttributeError('Required value "%s" not set' % key)

                # Check for inheritance
                if self.check_inheritance_chain(type(field_val), schema_val['type']):
                    continue

                # Check enum types
                from pyon.core.registry import enum_classes
                if isinstance(field_val, int) and schema_val['type'] in enum_classes:
                    if field_val not in enum_classes(schema_val['type'])._str_map:
                        raise AttributeError('Invalid enum value "%d" for field "%s.%s", should be between 1 and %d' %
                                     (fields[key], type(self).__name__, key, len(enum_classes(schema_val['type'])._str_map)))
                    else:
                        continue

                if type(field_val) == tuple and schema_val['type'] == 'list':
                    continue

                if isinstance(field_val, IonObjectBase) and schema_val['type'] == 'dict':
                    log.warn('TODO: Please convert generic dict attribute type to abstract type for field "%s.%s"' % (type(self).__name__, key))
                    continue

                # Special case check for ION object being passed where default type is dict or str
                if 'decorators' in schema_val:
                    if content_type_decorator in schema_val['decorators']:
                        if isinstance(field_val, IonObjectBase) and schema_val['type'] == 'dict' or schema_val['type'] == 'str':
                            self.check_content(key, field_val, schema_val['decorators'][content_type_decorator])
                            continue

                raise AttributeError('Invalid type "%s" for field "%s.%s", should be "%s"' %
                                     (type(fields[key]), type(self).__name__, key, schema_val['type']))

            if type(field_val).__name__ == 'str':
                if value_pattern_decorator in schema_val['decorators']:
                    self.check_string_pattern_match(key, field_val, schema_val['decorators'][value_pattern_decorator])

            if type(field_val).__name__ in ['int', 'float', 'long']:
                if value_range_decorator in schema_val['decorators']:
                    self.check_numeric_value_range(key, field_val, schema_val['decorators'][value_range_decorator])

            if 'decorators' in schema_val:
                if content_type_decorator in schema_val['decorators']:
                    if schema_val['type'] == 'list':
                        self.check_collection_content(key, field_val, schema_val['decorators'][content_type_decorator])
                    elif schema_val['type'] == 'dict' or schema_val['type'] == 'OrderedDict':
                        self.check_collection_content(key, field_val.values(), schema_val['decorators'][content_type_decorator])
                    else:
                        self.check_content(key, field_val, schema_val['decorators'][content_type_decorator])
                if content_count_decorator in schema_val['decorators']:
                    if schema_val['type'] == 'list':
                        self.check_collection_length(key, field_val, schema_val['decorators'][content_count_decorator])
                    if schema_val['type'] == 'dict' or schema_val['type'] == 'OrderedDict':
                        self.check_collection_length(key, field_val.values(), schema_val['decorators'][content_count_decorator])

            if isinstance(field_val, IonObjectBase):
                field_val._validate()

            # Next validate only IonObjects found in child collections.
            # Note that this is non-recursive; only for first-level collections.
            elif isinstance(field_val, Mapping):
                for subkey in field_val:
                    subval = field_val[subkey]
                    if isinstance(subval, IonObjectBase):
                        subval._validate()
            elif isinstance(field_val, Iterable):
                for subval in field_val:
                    if isinstance(subval, IonObjectBase):
                        subval._validate()
예제 #24
0
        boo = 0

        log.debug("read pid file: " + pid_file)
        while (start_time + DEFAULT_TIMEOUT > time.time()):
            try:
                file = open(pid_file)
                pid = file.read().strip('\0\n\r')
                if (pid):
                    int(pid)
                    log.info("port agent pid: [%s]" % (pid))
                    return int(pid)
            except ValueError, e:
                log.warn("Failed to convert %s to an int '%s" % (pid, e))
                break
            except:
                log.warn("Failed to open pid file: %s" % (pid_file))
                gevent.sleep(1)

        log.error("port agent startup failed")

        return None

    def _read_config(self):
        self._tmp_config.seek(0)
        return "".join(self._tmp_config.readlines())

    def stop(self):
        log.info('Stop port agent')
        # When calling stop, IMS grabs a new port agent process via PortAgentProcess.get_process
        # So self._pid is None and needs to be initialized
        pid_file = PID_FILE % (PROCESS_BASE_DIR, self._command_port)
예제 #25
0
    def _validate(self):
        """
        Compare fields to the schema and raise AttributeError if mismatched.
        Named _validate instead of validate because the data may have a field named "validate".
        """
        fields, schema = self.__dict__, self._schema

        # Check for extra fields not defined in the schema
        extra_fields = fields.viewkeys() - schema.viewkeys() - BUILT_IN_ATTRS
        if len(extra_fields) > 0:
            raise AttributeError(
                'Fields found that are not in the schema: %r' %
                (list(extra_fields)))

        required_decorator = 'Required'
        content_type_decorator = 'ContentType'
        content_count_decorator = 'ContentCount'
        value_range_decorator = 'ValueRange'
        value_pattern_decorator = 'ValuePattern'

        # Check required field criteria met
        for key in schema:
            if 'decorators' in schema[key] and required_decorator in schema[
                    key]['decorators']:
                if not key in fields or fields[key] is None:
                    raise AttributeError('Required value "%s" not set' % key)

        # Check each attribute
        for key in fields.iterkeys():
            if key in BUILT_IN_ATTRS:
                continue

            schema_val = schema[key]

            # Correct any float or long types that got downgraded to int
            if isinstance(fields[key], int):
                if schema_val['type'] == 'float':
                    fields[key] = float(fields[key])
                elif schema_val['type'] == 'long':
                    fields[key] = long(fields[key])

            # argh, annoying work around for OrderedDict vs dict issue
            if type(fields[key]
                    ) == dict and schema_val['type'] == 'OrderedDict':
                fields[key] = OrderedDict(fields[key])

            # Basic type checking
            field_val = fields[key]

            if type(field_val).__name__ != schema_val['type']:

                # if the schema doesn't define a type, we can't very well validate it
                if schema_val['type'] == 'NoneType':
                    continue

                # Allow unicode instead of str. This may be too lenient.
                if schema_val['type'] == 'str' and type(
                        field_val).__name__ == 'unicode':
                    continue

                # Already checked for required above.  Assume optional and continue
                if field_val is None:
                    continue

                # Allow unicode instead of str. This may be too lenient.
                if schema_val['type'] == 'str' and type(
                        field_val).__name__ == 'unicode':
                    continue

                # IonObjects are ok for dict fields too!
                if isinstance(
                        field_val,
                        IonObjectBase) and schema_val['type'] == 'OrderedDict':
                    continue

                if not key in fields or fields[key] is None:
                    raise AttributeError('Required value "%s" not set' % key)

                # Check for inheritance
                if self.check_inheritance_chain(type(field_val),
                                                schema_val['type']):
                    continue

                # Check enum types
                from pyon.core.registry import enum_classes
                if isinstance(field_val,
                              int) and schema_val['type'] in enum_classes:
                    if field_val not in enum_classes(
                            schema_val['type'])._str_map:
                        raise AttributeError(
                            'Invalid enum value "%d" for field "%s.%s", should be between 1 and %d'
                            % (fields[key], type(self).__name__, key,
                               len(enum_classes(schema_val['type'])._str_map)))
                    else:
                        continue

                if type(field_val) == tuple and schema_val['type'] == 'list':
                    continue

                if isinstance(field_val,
                              IonObjectBase) and schema_val['type'] == 'dict':
                    log.warn(
                        'TODO: Please convert generic dict attribute type to abstract type for field "%s.%s"'
                        % (type(self).__name__, key))
                    continue

                # Special case check for ION object being passed where default type is dict or str
                if 'decorators' in schema_val:
                    if content_type_decorator in schema_val['decorators']:
                        if isinstance(
                                field_val, IonObjectBase
                        ) and schema_val['type'] == 'dict' or schema_val[
                                'type'] == 'str':
                            self.check_content(
                                key, field_val, schema_val['decorators']
                                [content_type_decorator])
                            continue

                raise AttributeError(
                    'Invalid type "%s" for field "%s.%s", should be "%s"' %
                    (type(fields[key]), type(self).__name__, key,
                     schema_val['type']))

            if type(field_val).__name__ == 'str':
                if value_pattern_decorator in schema_val['decorators']:
                    self.check_string_pattern_match(
                        key, field_val,
                        schema_val['decorators'][value_pattern_decorator])

            if type(field_val).__name__ in ['int', 'float', 'long']:
                if value_range_decorator in schema_val['decorators']:
                    self.check_numeric_value_range(
                        key, field_val,
                        schema_val['decorators'][value_range_decorator])

            if 'decorators' in schema_val:
                if content_type_decorator in schema_val['decorators']:
                    if schema_val['type'] == 'list':
                        self.check_collection_content(
                            key, field_val,
                            schema_val['decorators'][content_type_decorator])
                    elif schema_val['type'] == 'dict' or schema_val[
                            'type'] == 'OrderedDict':
                        self.check_collection_content(
                            key, field_val.values(),
                            schema_val['decorators'][content_type_decorator])
                    else:
                        self.check_content(
                            key, field_val,
                            schema_val['decorators'][content_type_decorator])
                if content_count_decorator in schema_val['decorators']:
                    if schema_val['type'] == 'list':
                        self.check_collection_length(
                            key, field_val,
                            schema_val['decorators'][content_count_decorator])
                    if schema_val['type'] == 'dict' or schema_val[
                            'type'] == 'OrderedDict':
                        self.check_collection_length(
                            key, field_val.values(),
                            schema_val['decorators'][content_count_decorator])

            if isinstance(field_val, IonObjectBase):
                field_val._validate()

            # Next validate only IonObjects found in child collections.
            # Note that this is non-recursive; only for first-level collections.
            elif isinstance(field_val, Mapping):
                for subkey in field_val:
                    subval = field_val[subkey]
                    if isinstance(subval, IonObjectBase):
                        subval._validate()
            elif isinstance(field_val, Iterable):
                for subval in field_val:
                    if isinstance(subval, IonObjectBase):
                        subval._validate()
예제 #26
0
    def _find_associated_resources(self, resource, association_predicate, target_type=None, res_type=None):
        """
        Returns a list of tuples (target_id, Association) based on associations for the given
        resource (object), predicate and optional target object type.
        This method figures out appropriate association lookup based on the predicate definitions
        @param resource Either a resource object or a resource id (then res_type is needed)
        """
        assoc_list = []
        res_type = res_type or resource.type_
        resource_id = resource if type(resource) is str else resource._id
        if target_type and type(target_type) not in (list, tuple):  # None and empty str left alone
            target_type = [target_type]

        assoc_direction = ""
        if association_predicate and (association_predicate.endswith(">") or association_predicate.endswith("<")):
            assoc_direction = association_predicate[-1]
            association_predicate = association_predicate[:-1]

        # First validate the association predicate
        pred = Predicates[association_predicate]
        if not pred:
            return []  # Unknown association type so return empty list

        # Need to check through all of these in this order to account for specific vs base class inclusions
        # @ TODO: This algorithm and the order is fragile
        if self.is_predicate_association(pred, "domain", res_type) and self._allow_direction(assoc_direction, ">"):
            # Case 1: Association in schema with current exact resource type as SUBJECT

            assoc_list.extend(self._find_associations(resource_id, association_predicate, target_type, backward=False))

            # If no objects were found, try finding as subjects just in case (unless direction requested)
            if not assoc_list and not assoc_direction:
                assoc_list.extend(
                    self._find_associations(resource_id, association_predicate, target_type, backward=True)
                )

        elif self.is_predicate_association(pred, "range", res_type) and self._allow_direction(assoc_direction, "<"):
            # Case 2: Association in schema with current exact resource type as OBJECT

            assoc_list.extend(self._find_associations(resource_id, association_predicate, target_type, backward=True))

        elif self.is_predicate_association_extension(pred, "domain", res_type) and self._allow_direction(
            assoc_direction, ">"
        ):
            # Case 3: Association in schema with base type of current resource type as SUBJECT
            assoc_list.extend(self._find_associations(resource_id, association_predicate, target_type, backward=False))

            # If no objects were found, try finding as subjects just in case.
            if not assoc_list and not assoc_direction:
                assoc_list.extend(
                    self._find_associations(resource_id, association_predicate, target_type, backward=True)
                )

        elif self.is_predicate_association_extension(pred, "range", res_type) and self._allow_direction(
            assoc_direction, "<"
        ):
            # Case 4: Association in schema with base type of current resource type as OBJECT
            assoc_list.extend(self._find_associations(resource_id, association_predicate, target_type, backward=True))

        else:
            log.warn("Cannot handle association predicate %s for resource type %s", association_predicate, res_type)

        return assoc_list
예제 #27
0
        boo = 0;

        log.debug("read pid file: " + pid_file)
        while(start_time + DEFAULT_TIMEOUT > time.time()):
            try:
                file = open(pid_file)
                pid = file.read().strip('\0\n\r')
                if(pid):
                    int(pid)
                    log.info("port agent pid: [%s]" % (pid))
                    return int(pid)
            except ValueError, e:
                log.warn("Failed to convert %s to an int '%s" % (pid, e) )
                break
            except:
                log.warn("Failed to open pid file: %s" % (pid_file))
                gevent.sleep(1);

        log.error("port agent startup failed");

        return None;

    def _read_config(self):
        self._tmp_config.seek(0);
        return "".join(self._tmp_config.readlines());
        

    def stop(self):
        log.info('Stop port agent')
        # When calling stop, IMS grabs a new port agent process via PortAgentProcess.get_process
        # So self._pid is None and needs to be initialized
예제 #28
0
    def set_object_field_values(self, obj, resource, ext_exclude, **kwargs):
        """
        Iterate through all fields of the given object and set values according
        to the field type and decorator definition in the object type schema.
        """

        # Step 1: Determine needs to fill fields with resource objects.
        field_needs = []  # Fields that need to be set in a subsequent step
        resource_needs = set()  # Resources to read by id based on needs
        assoc_needs = set()  # Compound associations to follow
        final_target_types = {}  # Keeps track of what resource type filter is desired

        for field in obj._schema:

            # Skip any fields that were specifically to be excluded
            if ext_exclude is not None and field in ext_exclude:
                continue

            # Iterate over all of the decorators for the field
            for decorator in obj._schema[field]["decorators"]:
                field_start_time = time.time()

                # Field gets value from method or service call (local to current executing process)
                if decorator == "Method":
                    deco_value = obj.get_decorator_value(field, decorator)
                    method_name = deco_value if deco_value else "get_" + field

                    ret_val = self.execute_method_with_resource(resource._id, method_name, **kwargs)
                    if ret_val is not None:
                        setattr(obj, field, ret_val)

                elif decorator == "ServiceRequest":
                    deco_value = obj.get_decorator_value(field, decorator)
                    if obj._schema[field]["type"] != "ServiceRequest":
                        log.error("The field %s is an incorrect type for a ServiceRequest decorator.", field)
                        continue

                    method_name = deco_value if deco_value else "get_" + field

                    if method_name.find(".") == -1:
                        raise Inconsistent(
                            "The field %s decorated as a ServiceRequest only supports remote operations.", field
                        )

                    service_client, operation = get_remote_info(self, method_name)
                    rmi_call = method_name.split(".")
                    parms = {"resource_id": resource._id}
                    parms.update(get_method_arguments(service_client, operation, **kwargs))
                    ret_val = IonObject(
                        OT.ServiceRequest,
                        service_name=rmi_call[0],
                        service_operation=operation,
                        request_parameters=parms,
                    )
                    setattr(obj, field, ret_val)

                # Fill field based on compound association chains. Results in nested lists of resource objects
                elif self.is_compound_association(decorator):
                    target_type = obj.get_decorator_value(field, decorator)
                    if (
                        target_type and "," in target_type
                    ):  # Can specify multiple type filters, only handles two levels for now
                        target_type, final_target_type = target_type.split(",")
                        final_target_types[field] = final_target_type  # Keep track for later

                    predicates = self.get_compound_association_predicates(decorator)
                    assoc_list = self._find_associated_resources(resource, predicates[0], target_type)
                    field_needs.append((field, "A", (assoc_list, predicates)))
                    for target_id, assoc in assoc_list:
                        assoc_needs.add((target_id, predicates[1]))

                # Fill field based on association with list of resource objects
                elif self.is_association_predicate(decorator):
                    target_type = obj.get_decorator_value(field, decorator)
                    if target_type and "," in target_type:  # Can specify list of target types
                        target_type = target_type.split(",")
                    assoc_list = self._find_associated_resources(resource, decorator, target_type)
                    if obj._schema[field]["type"] == "list":
                        if assoc_list:
                            field_needs.append((field, "L", assoc_list))
                            [resource_needs.add(target_id) for target_id, assoc in assoc_list]
                    elif obj._schema[field]["type"] == "int":
                        setattr(obj, field, len(assoc_list))
                    else:  # Can be nested object or None
                        if assoc_list:
                            first_assoc = assoc_list[0]
                            if len(assoc_list) != 1:
                                # WARNING: Swallow random further objects here!
                                log.warn(
                                    "Extended object field %s uses only 1 of %d associated resources",
                                    field,
                                    len(assoc_list),
                                )
                            field_needs.append((field, "O", first_assoc))
                            resource_needs.add(first_assoc[0])
                        else:
                            setattr(obj, field, None)
                else:
                    log.debug("Unknown decorator %s for field %s of resource %s", decorator, field, resource._id)

                field_stop_time = time.time()

                # log.debug("Time to process field %s(%s) %f secs", field, decorator, field_stop_time - field_start_time)

        # field_needs contains a list of what's needed to load in next step (different cases)
        if not field_needs:
            return

        # Step 2: Read second level of compound associations as needed
        # @TODO Can only do 2 level compounds for now. Make recursive someday
        if assoc_needs:
            assocs = self._rr.find_associations(anyside=list(assoc_needs), id_only=False)
            self._add_associations(assocs)

            # Determine resource ids to read for compound associations
            for field, need_type, needs in field_needs:
                if need_type == "A":
                    assoc_list, predicates = needs
                    for target_id, assoc in assoc_list:
                        res_type = assoc.ot if target_id == assoc.o else assoc.st
                        assoc_list1 = self._find_associated_resources(target_id, predicates[1], None, res_type)
                        for target_id1, assoc1 in assoc_list1:
                            resource_needs.add(target_id1)

        # Step 3: Read resource objects based on needs
        res_list = self._rr.read_mult(list(resource_needs))
        res_objs = dict(zip(resource_needs, res_list))

        # Step 4: Set fields to loaded resource objects based on type
        for field, need_type, needs in field_needs:
            if need_type == "L":  # case list
                obj_list = [res_objs[target_id] for target_id, assoc in needs]
                setattr(obj, field, obj_list)
            elif need_type == "O":  # case nested object
                target_id, assoc = needs
                setattr(obj, field, res_objs[target_id])
            elif need_type == "A":  # case compound
                assoc_list, predicates = needs
                obj_list = []
                for target_id, assoc in assoc_list:
                    res_type = assoc.ot if target_id == assoc.o else assoc.st
                    assoc_list1 = self._find_associated_resources(target_id, predicates[1], None, res_type)
                    obj_list.append([res_objs[target_id1] for target_id1, assoc1 in assoc_list1])

                # Filter the list to remove objects that might match the current resource type
                result_obj_list = []
                for ol_nested in obj_list:
                    if ol_nested:
                        # Only get the object types which don't match the current resource type and may match a final type
                        if final_target_types.has_key(field):
                            result_obj_list.extend(
                                [
                                    target_obj
                                    for target_obj in ol_nested
                                    if (
                                        target_obj.type_ != resource.type_
                                        and final_target_types[field] in target_obj._get_extends()
                                    )
                                ]
                            )
                        else:
                            result_obj_list.extend(
                                [target_obj for target_obj in ol_nested if (target_obj.type_ != resource.type_)]
                            )

                if obj._schema[field]["type"] == "list":
                    if result_obj_list:
                        setattr(obj, field, result_obj_list)
                elif obj._schema[field]["type"] == "int":
                    setattr(obj, field, len(result_obj_list))
                else:
                    if result_obj_list:
                        if len(result_obj_list) != 1:
                            # WARNING: Swallow random further objects here!
                            log.warn(
                                "Extended object field %s uses only 1 of %d compound associated resources",
                                field,
                                len(result_obj_list),
                            )
                        setattr(obj, field, result_obj_list[0])
                    else:
                        setattr(obj, field, None)