コード例 #1
0
ファイル: manage.py プロジェクト: bopopescu/devstack
 def rpc_client(self):
     if self._client is None:
         if not rpc.initialized():
             rpc.init(CONF)
             target = messaging.Target(topic=CONF.volume_topic)
             self._client = rpc.get_client(target)
     return self._client
コード例 #2
0
ファイル: service.py プロジェクト: HybridF5/cinder
    def __init__(self, host, binary, topic, manager, report_interval=None,
                 periodic_interval=None, periodic_fuzzy_delay=None,
                 service_name=None, *args, **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        self.manager = manager_class(host=self.host,
                                     service_name=service_name,
                                     *args, **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
        self.rpcserver = None
コード例 #3
0
    def __init__(self, host, binary, topic, manager, report_interval=None,
                 periodic_interval=None, periodic_fuzzy_delay=None,
                 service_name=None, *args, **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        manager_class = importutils.import_class(self.manager_class_name)
        manager_class = profiler.trace_cls("rpc")(manager_class)

        self.manager = manager_class(host=self.host,
                                     service_name=service_name,
                                     *args, **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
        self.rpcserver = None
コード例 #4
0
ファイル: manage.py プロジェクト: NeCTAR-RC/cinder
 def rpc_client(self):
     if self._client is None:
         if not rpc.initialized():
             rpc.init(CONF)
             target = messaging.Target(topic=CONF.volume_topic)
             self._client = rpc.get_client(target)
     return self._client
コード例 #5
0
ファイル: manage.py プロジェクト: rahul4-jain/cinder
    def rpc_client(self):
        if self._client is None:
            if not rpc.initialized():
                rpc.init(CONF)
                target = messaging.Target(topic=CONF.volume_topic)
                serializer = objects.base.CinderObjectSerializer()
                self._client = rpc.get_client(target, serializer=serializer)

        return self._client
コード例 #6
0
    def _rpc_client(self):
        if self._client is None:
            if not rpc.initialized():
                rpc.init(CONF)
                target = messaging.Target(topic=constants.VOLUME_TOPIC)
                serializer = objects.base.CinderObjectSerializer()
                self._client = rpc.get_client(target, serializer=serializer)

        return self._client
コード例 #7
0
ファイル: manage.py プロジェクト: bopopescu/stack
    def _rpc_client(self):
        if self._client is None:
            if not rpc.initialized():
                rpc.init(CONF)
                target = messaging.Target(topic=CONF.volume_topic)
                serializer = objects.base.CinderObjectSerializer()
                self._client = rpc.get_client(target, serializer=serializer)

        return self._client
コード例 #8
0
ファイル: manage.py プロジェクト: Nexenta/cinder
    def _rpc_client(self):
        if self._client is None:
            if not rpc.initialized():
                rpc.init(CONF)
                target = messaging.Target(topic=constants.VOLUME_TOPIC)
                serializer = objects.base.CinderObjectSerializer()
                self._client = rpc.get_client(target, serializer=serializer)

        return self._client
コード例 #9
0
    def __init__(self,
                 host,
                 binary,
                 topic,
                 manager,
                 report_interval=None,
                 periodic_interval=None,
                 periodic_fuzzy_delay=None,
                 service_name=None,
                 coordination=False,
                 *args,
                 **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        self.coordination = coordination
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        # NOTE(geguileo): We need to create the Service DB entry before we
        # create the manager, otherwise capped versions for serializer and rpc
        # client would used existing DB entries not including us, which could
        # result in us using None (if it's the first time the service is run)
        # or an old version (if this is a normal upgrade of a single service).
        ctxt = context.get_admin_context()
        try:
            service_ref = objects.Service.get_by_args(ctxt, host, binary)
            service_ref.rpc_current_version = manager_class.RPC_API_VERSION
            obj_version = objects_base.OBJ_VERSIONS.get_current()
            service_ref.object_current_version = obj_version
            service_ref.save()
            self.service_id = service_ref.id
        except exception.NotFound:
            self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)

        self.manager = manager_class(host=self.host,
                                     service_name=service_name,
                                     *args,
                                     **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
        self.rpcserver = None
コード例 #10
0
ファイル: service.py プロジェクト: Hopebaytech/cinder
    def __init__(self, host, binary, topic, manager, report_interval=None,
                 periodic_interval=None, periodic_fuzzy_delay=None,
                 service_name=None, coordination=False, *args, **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        self.coordination = coordination
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        # NOTE(geguileo): We need to create the Service DB entry before we
        # create the manager, otherwise capped versions for serializer and rpc
        # client would used existing DB entries not including us, which could
        # result in us using None (if it's the first time the service is run)
        # or an old version (if this is a normal upgrade of a single service).
        ctxt = context.get_admin_context()
        try:
            service_ref = objects.Service.get_by_args(ctxt, host, binary)
            service_ref.rpc_current_version = manager_class.RPC_API_VERSION
            obj_version = objects_base.OBJ_VERSIONS.get_current()
            service_ref.object_current_version = obj_version
            service_ref.save()
            self.service_id = service_ref.id
        except exception.NotFound:
            self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)

        self.manager = manager_class(host=self.host,
                                     service_name=service_name,
                                     *args, **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
        self.rpcserver = None
コード例 #11
0
ファイル: service.py プロジェクト: lubidl0/cinder-1
    def __init__(self,
                 host,
                 binary,
                 topic,
                 manager,
                 report_interval=None,
                 periodic_interval=None,
                 periodic_fuzzy_delay=None,
                 service_name=None,
                 coordination=False,
                 cluster=None,
                 *args,
                 **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.cluster = cluster
        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        self.coordination = coordination
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        self.service = None
        self.manager = manager_class(host=self.host,
                                     cluster=self.cluster,
                                     service_name=service_name,
                                     *args,
                                     **kwargs)
        self.availability_zone = self.manager.availability_zone

        # NOTE(geguileo): We need to create the Service DB entry before we
        # create the manager, otherwise capped versions for serializer and rpc
        # client would use existing DB entries not including us, which could
        # result in us using None (if it's the first time the service is run)
        # or an old version (if this is a normal upgrade of a single service).
        ctxt = context.get_admin_context()
        try:
            service_ref = objects.Service.get_by_args(ctxt, host, binary)
            service_ref.rpc_current_version = manager_class.RPC_API_VERSION
            obj_version = objects_base.OBJ_VERSIONS.get_current()
            service_ref.object_current_version = obj_version

            # added_to_cluster attribute marks when we consider that we have
            # just added a host to a cluster so we can include resources into
            # that cluster.  We consider that we have added the host when we
            # didn't have data in the cluster DB field and our current
            # configuration has a cluster value.  We don't want to do anything
            # automatic if the cluster is changed, in those cases we'll want
            # to use cinder manage command and to it manually.
            self.added_to_cluster = (not service_ref.cluster_name and cluster)

            if service_ref.cluster_name != cluster:
                LOG.info(
                    'This service has been moved from cluster '
                    '%(cluster_svc)s to %(cluster_cfg)s. Resources '
                    'will %(opt_no)sbe moved to the new cluster', {
                        'cluster_svc': service_ref.cluster_name,
                        'cluster_cfg': cluster,
                        'opt_no': '' if self.added_to_cluster else 'NO '
                    })

            if self.added_to_cluster:
                # We pass copy service's disable status in the cluster if we
                # have to create it.
                self._ensure_cluster_exists(ctxt, service_ref)
                service_ref.cluster_name = cluster
            service_ref.save()
            Service.service_id = service_ref.id
            self.origin_service_id = service_ref.id
        except exception.NotFound:
            self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)
            # Service entry Entry didn't exist because it was manually removed
            # or it's the first time running, to be on the safe side we say we
            # were added if we are clustered.
            self.added_to_cluster = bool(cluster)

        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs

        setup_profiler(binary, host)
        self.rpcserver = None
        self.backend_rpcserver = None
        self.cluster_rpcserver = None
コード例 #12
0
    def __init__(self,
                 host,
                 binary,
                 topic,
                 manager,
                 report_interval=None,
                 periodic_interval=None,
                 periodic_fuzzy_delay=None,
                 service_name=None,
                 coordination=False,
                 cluster=None,
                 *args,
                 **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.cluster = cluster
        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        self.coordination = coordination
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        # NOTE(geguileo): We need to create the Service DB entry before we
        # create the manager, otherwise capped versions for serializer and rpc
        # client would use existing DB entries not including us, which could
        # result in us using None (if it's the first time the service is run)
        # or an old version (if this is a normal upgrade of a single service).
        ctxt = context.get_admin_context()
        self.is_upgrading_to_n = self.is_svc_upgrading_to_n(binary)
        try:
            service_ref = objects.Service.get_by_args(ctxt, host, binary)
            service_ref.rpc_current_version = manager_class.RPC_API_VERSION
            obj_version = objects_base.OBJ_VERSIONS.get_current()
            service_ref.object_current_version = obj_version
            # TODO(geguileo): In O we can remove the service upgrading part on
            # the next equation, because by then all our services will be
            # properly setting the cluster during volume migrations since
            # they'll have the new Volume ORM model.  But until then we can
            # only set the cluster in the DB and pass added_to_cluster to
            # init_host when we have completed the rolling upgrade from M to N.

            # added_to_cluster attribute marks when we consider that we have
            # just added a host to a cluster so we can include resources into
            # that cluster.  We consider that we have added the host when we
            # didn't have data in the cluster DB field and our current
            # configuration has a cluster value.  We don't want to do anything
            # automatic if the cluster is changed, in those cases we'll want
            # to use cinder manage command and to it manually.
            self.added_to_cluster = (not service_ref.cluster_name and cluster
                                     and not self.is_upgrading_to_n)

            # TODO(geguileo): In O - Remove self.is_upgrading_to_n part
            if (service_ref.cluster_name != cluster
                    and not self.is_upgrading_to_n):
                LOG.info(
                    _LI('This service has been moved from cluster '
                        '%(cluster_svc)s to %(cluster_cfg)s. Resources '
                        'will %(opt_no)sbe moved to the new cluster'), {
                            'cluster_svc': service_ref.cluster_name,
                            'cluster_cfg': cluster,
                            'opt_no': '' if self.added_to_cluster else 'NO '
                        })

            if self.added_to_cluster:
                # We pass copy service's disable status in the cluster if we
                # have to create it.
                self._ensure_cluster_exists(ctxt, service_ref.disabled)
                service_ref.cluster_name = cluster
            service_ref.save()
            Service.service_id = service_ref.id
        except exception.NotFound:
            # We don't want to include cluster information on the service or
            # create the cluster entry if we are upgrading.
            self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)
            # TODO(geguileo): In O set added_to_cluster to True
            # We don't want to include resources in the cluster during the
            # start while we are still doing the rolling upgrade.
            self.added_to_cluster = not self.is_upgrading_to_n

        self.manager = manager_class(host=self.host,
                                     cluster=self.cluster,
                                     service_name=service_name,
                                     *args,
                                     **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
        self.rpcserver = None
        self.cluster_rpcserver = None
コード例 #13
0
ファイル: service.py プロジェクト: NetApp/cinder
    def __init__(self, host, binary, topic, manager, report_interval=None,
                 periodic_interval=None, periodic_fuzzy_delay=None,
                 service_name=None, coordination=False, cluster=None, *args,
                 **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.cluster = cluster
        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        self.coordination = coordination
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        # NOTE(geguileo): We need to create the Service DB entry before we
        # create the manager, otherwise capped versions for serializer and rpc
        # client would use existing DB entries not including us, which could
        # result in us using None (if it's the first time the service is run)
        # or an old version (if this is a normal upgrade of a single service).
        ctxt = context.get_admin_context()
        self.is_upgrading_to_n = self.is_svc_upgrading_to_n(binary)
        try:
            service_ref = objects.Service.get_by_args(ctxt, host, binary)
            service_ref.rpc_current_version = manager_class.RPC_API_VERSION
            obj_version = objects_base.OBJ_VERSIONS.get_current()
            service_ref.object_current_version = obj_version
            # TODO(geguileo): In O we can remove the service upgrading part on
            # the next equation, because by then all our services will be
            # properly setting the cluster during volume migrations since
            # they'll have the new Volume ORM model.  But until then we can
            # only set the cluster in the DB and pass added_to_cluster to
            # init_host when we have completed the rolling upgrade from M to N.

            # added_to_cluster attribute marks when we consider that we have
            # just added a host to a cluster so we can include resources into
            # that cluster.  We consider that we have added the host when we
            # didn't have data in the cluster DB field and our current
            # configuration has a cluster value.  We don't want to do anything
            # automatic if the cluster is changed, in those cases we'll want
            # to use cinder manage command and to it manually.
            self.added_to_cluster = (not service_ref.cluster_name and cluster
                                     and not self.is_upgrading_to_n)

            # TODO(geguileo): In O - Remove self.is_upgrading_to_n part
            if (service_ref.cluster_name != cluster and
                    not self.is_upgrading_to_n):
                LOG.info(_LI('This service has been moved from cluster '
                             '%(cluster_svc)s to %(cluster_cfg)s. Resources '
                             'will %(opt_no)sbe moved to the new cluster'),
                         {'cluster_svc': service_ref.cluster_name,
                          'cluster_cfg': cluster,
                          'opt_no': '' if self.added_to_cluster else 'NO '})

            if self.added_to_cluster:
                # We pass copy service's disable status in the cluster if we
                # have to create it.
                self._ensure_cluster_exists(ctxt, service_ref.disabled)
                service_ref.cluster_name = cluster
            service_ref.save()
            Service.service_id = service_ref.id
        except exception.NotFound:
            # We don't want to include cluster information on the service or
            # create the cluster entry if we are upgrading.
            self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)
            # TODO(geguileo): In O set added_to_cluster to True
            # We don't want to include resources in the cluster during the
            # start while we are still doing the rolling upgrade.
            self.added_to_cluster = not self.is_upgrading_to_n

        self.manager = manager_class(host=self.host,
                                     cluster=self.cluster,
                                     service_name=service_name,
                                     *args, **kwargs)
        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs
        self.timers = []

        setup_profiler(binary, host)
        self.rpcserver = None
        self.cluster_rpcserver = None
コード例 #14
0
ファイル: service.py プロジェクト: mahak/cinder
    def __init__(self, host, binary, topic, manager, report_interval=None,
                 periodic_interval=None, periodic_fuzzy_delay=None,
                 service_name=None, coordination=False, cluster=None, *args,
                 **kwargs):
        super(Service, self).__init__()

        if not rpc.initialized():
            rpc.init(CONF)

        self.cluster = cluster
        self.host = host
        self.binary = binary
        self.topic = topic
        self.manager_class_name = manager
        self.coordination = coordination
        manager_class = importutils.import_class(self.manager_class_name)
        if CONF.profiler.enabled:
            manager_class = profiler.trace_cls("rpc")(manager_class)

        self.service = None
        self.manager = manager_class(host=self.host,
                                     cluster=self.cluster,
                                     service_name=service_name,
                                     *args, **kwargs)
        self.availability_zone = self.manager.availability_zone

        # NOTE(geguileo): We need to create the Service DB entry before we
        # create the manager, otherwise capped versions for serializer and rpc
        # client would use existing DB entries not including us, which could
        # result in us using None (if it's the first time the service is run)
        # or an old version (if this is a normal upgrade of a single service).
        ctxt = context.get_admin_context()
        try:
            service_ref = objects.Service.get_by_args(ctxt, host, binary)
            service_ref.rpc_current_version = manager_class.RPC_API_VERSION
            obj_version = objects_base.OBJ_VERSIONS.get_current()
            service_ref.object_current_version = obj_version

            # added_to_cluster attribute marks when we consider that we have
            # just added a host to a cluster so we can include resources into
            # that cluster.  We consider that we have added the host when we
            # didn't have data in the cluster DB field and our current
            # configuration has a cluster value.  We don't want to do anything
            # automatic if the cluster is changed, in those cases we'll want
            # to use cinder manage command and to it manually.
            self.added_to_cluster = (not service_ref.cluster_name and cluster)

            if service_ref.cluster_name != cluster:
                LOG.info('This service has been moved from cluster '
                         '%(cluster_svc)s to %(cluster_cfg)s. Resources '
                         'will %(opt_no)sbe moved to the new cluster',
                         {'cluster_svc': service_ref.cluster_name,
                          'cluster_cfg': cluster,
                          'opt_no': '' if self.added_to_cluster else 'NO '})

            if self.added_to_cluster:
                # We pass copy service's disable status in the cluster if we
                # have to create it.
                self._ensure_cluster_exists(ctxt, service_ref)
                service_ref.cluster_name = cluster
            service_ref.save()
            Service.service_id = service_ref.id
            self.origin_service_id = service_ref.id
        except exception.NotFound:
            self._create_service_ref(ctxt, manager_class.RPC_API_VERSION)
            # Service entry Entry didn't exist because it was manually removed
            # or it's the first time running, to be on the safe side we say we
            # were added if we are clustered.
            self.added_to_cluster = bool(cluster)

        self.report_interval = report_interval
        self.periodic_interval = periodic_interval
        self.periodic_fuzzy_delay = periodic_fuzzy_delay
        self.basic_config_check()
        self.saved_args, self.saved_kwargs = args, kwargs

        setup_profiler(binary, host)
        self.rpcserver = None
        self.backend_rpcserver = None
        self.cluster_rpcserver = None