コード例 #1
0
ファイル: decorators.py プロジェクト: yongshengma/framework
    def build_new_kwargs(original_function, request, instance, version, raw_version, passed_kwargs):
        # type: (callable, Union[WSGIRequest, Request], DataObject, int, str, **any) -> Tuple[dict, dict]
        """
        Convert all positional arguments to keyword arguments
        :param original_function: The orignally decorated function
        :type original_function: callable
        :param request: API request object
        :type request: Union[WSGIRequest, Request]
        :param instance: The data object instance to inject
        :type instance: DataObject
        :param version: Parsed API version
        :type version: int
        :param raw_version: Unparsed API version
        :type raw_version: str
        :param passed_kwargs: Kwargs passed to the original function
        :type passed_kwargs: dict
        :return: The kwargs for the original function and the kwargs for the validator
        :rtype: Tuple[dict, dict]
        """
        function_metadata = original_function.ovs_metadata
        kwargs = {}
        validator_kwargs = {}
        empty = object()
        # Special reserved keywords
        reserved = {'version': version,
                    'raw_version': raw_version,
                    'request': request,
                    'local_storagerouter': StorageRouterList.get_by_machine_id(settings.UNIQUE_ID)}
        if instance is not None:
            reserved[object_type.__name__.lower()] = instance

        for mandatory_vars, optional_vars, new_kwargs in [(function_metadata['load']['mandatory'][:], function_metadata['load']['optional'][:], kwargs),
                                                          (validation_mandatory_vars[:], validation_optional_vars[:], validator_kwargs)]:
            for keyword, value in reserved.iteritems():
                if keyword in mandatory_vars:
                    new_kwargs[keyword] = value
                    mandatory_vars.remove(keyword)

            # The rest of the parameters
            post_data = request.DATA if hasattr(request, 'DATA') else request.POST
            query_params = request.QUERY_PARAMS if hasattr(request, 'QUERY_PARAMS') else request.GET
            # Used to detect if anything was passed. Can't use None as the value passed might be None
            data_containers = [passed_kwargs, post_data, query_params]
            for parameters, mandatory in ((mandatory_vars, True), (optional_vars, False)):
                for name in parameters:
                    val = empty
                    for container in data_containers:
                        val = container.get(name, empty)
                        if val != empty:
                            break
                    if val != empty:
                        # Embrace our design flaw. The query shouldn't be json dumped separately.
                        if name == 'query':
                            val = _try_parse(val)
                        new_kwargs[name] = _try_convert_bool(val)
                    elif mandatory:
                        raise HttpNotAcceptableException(error_description='Invalid data passed: {0} is missing'.format(name),
                                                         error='invalid_data')
        return kwargs, validator_kwargs
コード例 #2
0
    def get_by_machine_id(machine_id):
        """
        Fetch a dal machine by id

        :param machine_id: id of the machine
        :return:
        """

        return StorageRouterList.get_by_machine_id(machine_id)
コード例 #3
0
 def get_my_storagerouter():
     """
     Returns unique machine storagerouter id
     :return: Storage Router this is executed on
     :rtype: StorageRouter
     """
     from ovs.dal.lists.storagerouterlist import StorageRouterList
     storagerouter = StorageRouterList.get_by_machine_id(System.get_my_machine_id())
     if storagerouter is None:
         raise RuntimeError('Could not find the local StorageRouter')
     return storagerouter
コード例 #4
0
ファイル: decorators.py プロジェクト: dawnpower/framework
 def new_function(*args, **kwargs):
     """
     Wrapped function
     """
     request = _find_request(args)
     new_kwargs = {}
     # Find out the arguments of the decorated function
     function_info = inspect.getargspec(f)
     if function_info.defaults is None:
         mandatory_vars = function_info.args[1:]
         optional_vars = []
     else:
         mandatory_vars = function_info.args[1:-len(function_info.defaults)]
         optional_vars = function_info.args[len(mandatory_vars) + 1:]
     # Check versioning
     version = regex.match(request.META['HTTP_ACCEPT']).groupdict()['version']
     versions = (max(min_version, settings.VERSION[0]), min(max_version, settings.VERSION[-1]))
     if version == '*':  # If accepting all versions, it defaults to the highest one
         version = versions[1]
     version = int(version)
     if version < versions[0] or version > versions[1]:
         raise NotAcceptable('API version requirements: {0} <= <version> <= {1}. Got {2}'.format(versions[0], versions[1], version))
     if 'version' in mandatory_vars:
         new_kwargs['version'] = version
         mandatory_vars.remove('version')
     # Fill request parameter, if available
     if 'request' in mandatory_vars:
         new_kwargs['request'] = request
         mandatory_vars.remove('request')
     # Fill main object, if required
     if 'pk' in kwargs and object_type is not None:
         typename = object_type.__name__.lower()
         try:
             instance = object_type(kwargs['pk'])
             if typename in mandatory_vars:
                 new_kwargs[typename] = instance
                 mandatory_vars.remove(typename)
         except ObjectNotFoundException:
             raise Http404()
     # Fill local storagerouter, if requested
     if 'local_storagerouter' in mandatory_vars:
         storagerouter = StorageRouterList.get_by_machine_id(settings.UNIQUE_ID)
         new_kwargs['local_storagerouter'] = storagerouter
         mandatory_vars.remove('local_storagerouter')
     # Fill mandatory parameters
     post_data = request.DATA if hasattr(request, 'DATA') else request.POST
     get_data = request.QUERY_PARAMS if hasattr(request, 'QUERY_PARAMS') else request.GET
     for name in mandatory_vars:
         if name in kwargs:
             new_kwargs[name] = kwargs[name]
         else:
             if name not in post_data:
                 if name not in get_data:
                     raise NotAcceptable('Invalid data passed: {0} is missing'.format(name))
                 new_kwargs[name] = _try_parse(get_data[name])
             else:
                 new_kwargs[name] = _try_parse(post_data[name])
     # Try to fill optional parameters
     for name in optional_vars:
         if name in kwargs:
             new_kwargs[name] = kwargs[name]
         else:
             if name in post_data:
                 new_kwargs[name] = _try_parse(post_data[name])
             elif name in get_data:
                 new_kwargs[name] = _try_parse(get_data[name])
     # Call the function
     return f(args[0], **new_kwargs)
コード例 #5
0
ファイル: decorators.py プロジェクト: winglq/framework
 def new_function(*args, **kwargs):
     """
     Wrapped function
     """
     request = _find_request(args)
     new_kwargs = {}
     validation_new_kwargs = {}
     # Find out the arguments of the decorated function
     if validator is not None:
         f_info = inspect.getargspec(validator)
         if f_info.defaults is None:
             validation_mandatory_vars = f_info.args[1:]
             validation_optional_vars = []
         else:
             validation_mandatory_vars = f_info.args[1:-len(f_info.
                                                            defaults)]
             validation_optional_vars = f_info.args[
                 len(validation_mandatory_vars) + 1:]
     else:
         validation_mandatory_vars = []
         validation_optional_vars = []
     # Check versioning
     version_match = regex.match(request.META['HTTP_ACCEPT'])
     if version_match is not None:
         version = version_match.groupdict()['version']
     else:
         version = settings.VERSION[-1]
     raw_version = version
     versions = (max(min_version, settings.VERSION[0]),
                 min(max_version, settings.VERSION[-1]))
     if version == '*':  # If accepting all versions, it defaults to the highest one
         version = versions[1]
     version = int(version)
     if version < versions[0] or version > versions[1]:
         raise HttpNotAcceptableException(
             error_description=
             'API version requirements: {0} <= <version> <= {1}. Got {2}'
             .format(versions[0], versions[1], version),
             error='invalid_version')
     # Load some information
     instance = None
     if 'pk' in kwargs and object_type is not None:
         try:
             instance = object_type(kwargs['pk'])
         except ObjectNotFoundException:
             raise HttpNotFoundException(
                 error_description=
                 'The requested object could not be found',
                 error='object_not_found')
     # Build new kwargs
     for _mandatory_vars, _optional_vars, _new_kwargs in [
         (f.ovs_metadata['load']['mandatory'][:],
          f.ovs_metadata['load']['optional'][:], new_kwargs),
         (validation_mandatory_vars, validation_optional_vars,
          validation_new_kwargs)
     ]:
         if 'version' in _mandatory_vars:
             _new_kwargs['version'] = version
             _mandatory_vars.remove('version')
         if 'raw_version' in _mandatory_vars:
             _new_kwargs['raw_version'] = raw_version
             _mandatory_vars.remove('raw_version')
         if 'request' in _mandatory_vars:
             _new_kwargs['request'] = request
             _mandatory_vars.remove('request')
         if instance is not None:
             typename = object_type.__name__.lower()
             if typename in _mandatory_vars:
                 _new_kwargs[typename] = instance
                 _mandatory_vars.remove(typename)
         if 'local_storagerouter' in _mandatory_vars:
             storagerouter = StorageRouterList.get_by_machine_id(
                 settings.UNIQUE_ID)
             _new_kwargs['local_storagerouter'] = storagerouter
             _mandatory_vars.remove('local_storagerouter')
         # The rest of the mandatory parameters
         post_data = request.DATA if hasattr(request,
                                             'DATA') else request.POST
         get_data = request.QUERY_PARAMS if hasattr(
             request, 'QUERY_PARAMS') else request.GET
         for name in _mandatory_vars:
             if name in kwargs:
                 _new_kwargs[name] = kwargs[name]
             else:
                 if name not in post_data:
                     if name not in get_data:
                         raise HttpNotAcceptableException(
                             error_description=
                             'Invalid data passed: {0} is missing'.
                             format(name),
                             error='invalid_data')
                     _new_kwargs[name] = _try_parse(get_data[name])
                 else:
                     _new_kwargs[name] = _try_parse(post_data[name])
         # Try to fill optional parameters
         for name in _optional_vars:
             if name in kwargs:
                 _new_kwargs[name] = kwargs[name]
             else:
                 if name in post_data:
                     _new_kwargs[name] = _try_parse(post_data[name])
                 elif name in get_data:
                     _new_kwargs[name] = _try_parse(get_data[name])
     # Execute validator
     if validator is not None:
         validator(args[0], **validation_new_kwargs)
     # Call the function
     return f(args[0], **new_kwargs)
コード例 #6
0
ファイル: decorators.py プロジェクト: grimpy/openvstorage
 def new_function(*args, **kwargs):
     """
     Wrapped function
     """
     request = _find_request(args)
     new_kwargs = {}
     validation_new_kwargs = {}
     # Find out the arguments of the decorated function
     if validator is not None:
         f_info = inspect.getargspec(validator)
         if f_info.defaults is None:
             validation_mandatory_vars = f_info.args[1:]
             validation_optional_vars = []
         else:
             validation_mandatory_vars = f_info.args[1:-len(f_info.defaults)]
             validation_optional_vars = f_info.args[len(validation_mandatory_vars) + 1:]
     else:
         validation_mandatory_vars = []
         validation_optional_vars = []
     # Check versioning
     version_match = regex.match(request.META['HTTP_ACCEPT'])
     if version_match is not None:
         version = version_match.groupdict()['version']
     else:
         version = settings.VERSION[-1]
     raw_version = version
     versions = (max(min_version, settings.VERSION[0]), min(max_version, settings.VERSION[-1]))
     if version == '*':  # If accepting all versions, it defaults to the highest one
         version = versions[1]
     version = int(version)
     if version < versions[0] or version > versions[1]:
         raise HttpNotAcceptableException(error_description='API version requirements: {0} <= <version> <= {1}. Got {2}'.format(versions[0], versions[1], version),
                                          error='invalid_version')
     # Load some information
     instance = None
     if 'pk' in kwargs and object_type is not None:
         try:
             instance = object_type(kwargs['pk'])
         except ObjectNotFoundException:
             raise HttpNotFoundException(error_description='The requested object could not be found',
                                         error='object_not_found')
     # Build new kwargs
     for _mandatory_vars, _optional_vars, _new_kwargs in [(f.ovs_metadata['load']['mandatory'][:], f.ovs_metadata['load']['optional'][:], new_kwargs),
                                                          (validation_mandatory_vars, validation_optional_vars, validation_new_kwargs)]:
         if 'version' in _mandatory_vars:
             _new_kwargs['version'] = version
             _mandatory_vars.remove('version')
         if 'raw_version' in _mandatory_vars:
             _new_kwargs['raw_version'] = raw_version
             _mandatory_vars.remove('raw_version')
         if 'request' in _mandatory_vars:
             _new_kwargs['request'] = request
             _mandatory_vars.remove('request')
         if instance is not None:
             typename = object_type.__name__.lower()
             if typename in _mandatory_vars:
                 _new_kwargs[typename] = instance
                 _mandatory_vars.remove(typename)
         if 'local_storagerouter' in _mandatory_vars:
             storagerouter = StorageRouterList.get_by_machine_id(settings.UNIQUE_ID)
             _new_kwargs['local_storagerouter'] = storagerouter
             _mandatory_vars.remove('local_storagerouter')
         # The rest of the mandatory parameters
         post_data = request.DATA if hasattr(request, 'DATA') else request.POST
         get_data = request.QUERY_PARAMS if hasattr(request, 'QUERY_PARAMS') else request.GET
         for name in _mandatory_vars:
             if name in kwargs:
                 _new_kwargs[name] = kwargs[name]
             else:
                 if name not in post_data:
                     if name not in get_data:
                         raise HttpNotAcceptableException(error_description='Invalid data passed: {0} is missing'.format(name),
                                                          error='invalid_data')
                     _new_kwargs[name] = _try_parse(get_data[name])
                 else:
                     _new_kwargs[name] = _try_parse(post_data[name])
         # Try to fill optional parameters
         for name in _optional_vars:
             if name in kwargs:
                 _new_kwargs[name] = kwargs[name]
             else:
                 if name in post_data:
                     _new_kwargs[name] = _try_parse(post_data[name])
                 elif name in get_data:
                     _new_kwargs[name] = _try_parse(get_data[name])
     # Execute validator
     if validator is not None:
         validator(args[0], **validation_new_kwargs)
     # Call the function
     return f(args[0], **new_kwargs)
コード例 #7
0
ファイル: nodetype.py プロジェクト: sun363587351/framework-1
    def demote_node(cluster_ip,
                    master_ip,
                    ip_client_map,
                    unique_id,
                    unconfigure_memcached,
                    unconfigure_rabbitmq,
                    offline_nodes=None):
        """
        Demotes a given node
        """
        from ovs.dal.lists.storagerouterlist import StorageRouterList

        Toolbox.log(logger=NodeTypeController._logger,
                    messages='Demoting node',
                    title=True)
        service_manager = ServiceFactory.get_manager()
        if offline_nodes is None:
            offline_nodes = []

        if unconfigure_memcached is True and len(offline_nodes) == 0:
            if NodeTypeController._validate_local_memcache_servers(
                    ip_client_map) is False:
                raise RuntimeError(
                    'Not all memcache nodes can be reached which is required for demoting a node.'
                )

        # Find other (arakoon) master nodes
        arakoon_cluster_name = str(
            Configuration.get('/ovs/framework/arakoon_clusters|ovsdb'))
        arakoon_metadata = ArakoonInstaller.get_arakoon_metadata_by_cluster_name(
            cluster_name=arakoon_cluster_name)
        config = ArakoonClusterConfig(cluster_id=arakoon_cluster_name)
        master_node_ips = [node.ip for node in config.nodes]
        shrink = False
        if cluster_ip in master_node_ips:
            shrink = True
            master_node_ips.remove(cluster_ip)
        if len(master_node_ips) == 0:
            raise RuntimeError(
                'There should be at least one other master node')

        storagerouter = StorageRouterList.get_by_machine_id(unique_id)
        storagerouter.node_type = 'EXTRA'
        storagerouter.save()

        offline_node_ips = [node.ip for node in offline_nodes]
        if arakoon_metadata['internal'] is True and shrink is True:
            Toolbox.log(logger=NodeTypeController._logger,
                        messages='Leaving Arakoon {0} cluster'.format(
                            arakoon_cluster_name))
            arakoon_installer = ArakoonInstaller(
                cluster_name=arakoon_cluster_name)
            arakoon_installer.load()
            arakoon_installer.shrink_cluster(removal_ip=cluster_ip,
                                             offline_nodes=offline_node_ips)
            arakoon_installer.restart_cluster_after_shrinking()
        try:
            external_config = Configuration.get(
                '/ovs/framework/external_config')
            if external_config is None and shrink is True:
                Toolbox.log(logger=NodeTypeController._logger,
                            messages='Leaving Arakoon config cluster')
                arakoon_installer = ArakoonInstaller(cluster_name='config')
                arakoon_installer.load(ip=master_node_ips[0])
                arakoon_installer.shrink_cluster(
                    removal_ip=cluster_ip, offline_nodes=offline_node_ips)
                arakoon_installer.restart_cluster_after_shrinking()
        except Exception as ex:
            Toolbox.log(
                logger=NodeTypeController._logger,
                messages=['\nFailed to leave configuration cluster', ex],
                loglevel='exception')

        Toolbox.log(logger=NodeTypeController._logger,
                    messages='Update configurations')
        try:
            if unconfigure_memcached is True:
                endpoints = Configuration.get(
                    '/ovs/framework/memcache|endpoints')
                endpoint = '{0}:{1}'.format(cluster_ip, 11211)
                if endpoint in endpoints:
                    endpoints.remove(endpoint)
                Configuration.set('/ovs/framework/memcache|endpoints',
                                  endpoints)
            if unconfigure_rabbitmq is True:
                endpoints = Configuration.get(
                    '/ovs/framework/messagequeue|endpoints')
                endpoint = '{0}:{1}'.format(cluster_ip, 5672)
                if endpoint in endpoints:
                    endpoints.remove(endpoint)
                Configuration.set('/ovs/framework/messagequeue|endpoints',
                                  endpoints)
        except Exception as ex:
            Toolbox.log(logger=NodeTypeController._logger,
                        messages=['\nFailed to update configurations', ex],
                        loglevel='exception')

        if arakoon_metadata['internal'] is True:
            Toolbox.log(logger=NodeTypeController._logger,
                        messages='Restarting master node services')
            remaining_nodes = ip_client_map.keys()[:]
            if cluster_ip in remaining_nodes:
                remaining_nodes.remove(cluster_ip)

            PersistentFactory.store = None
            VolatileFactory.store = None

            for service in storagerouter.services:
                if service.name == 'arakoon-ovsdb':
                    service.delete()

        target_client = None
        if storagerouter in offline_nodes:
            if unconfigure_rabbitmq is True:
                Toolbox.log(
                    logger=NodeTypeController._logger,
                    messages='Removing/unconfiguring offline RabbitMQ node')
                client = ip_client_map[master_ip]
                try:
                    client.run([
                        'rabbitmqctl', 'forget_cluster_node',
                        'rabbit@{0}'.format(storagerouter.name)
                    ])
                except Exception as ex:
                    Toolbox.log(logger=NodeTypeController._logger,
                                messages=[
                                    '\nFailed to forget RabbitMQ cluster node',
                                    ex
                                ],
                                loglevel='exception')
        else:
            target_client = ip_client_map[cluster_ip]
            if unconfigure_rabbitmq is True:
                Toolbox.log(logger=NodeTypeController._logger,
                            messages='Removing/unconfiguring RabbitMQ')
                try:
                    if service_manager.has_service('rabbitmq-server',
                                                   client=target_client):
                        ServiceFactory.change_service_state(
                            target_client, 'rabbitmq-server', 'stop',
                            NodeTypeController._logger)
                        target_client.run(['rabbitmq-server', '-detached'])
                        time.sleep(5)
                        target_client.run(['rabbitmqctl', 'stop_app'])
                        time.sleep(5)
                        target_client.run(['rabbitmqctl', 'reset'])
                        time.sleep(5)
                        target_client.run(['rabbitmqctl', 'stop'])
                        time.sleep(5)
                        target_client.file_unlink(
                            "/var/lib/rabbitmq/.erlang.cookie")
                        ServiceFactory.change_service_state(
                            target_client, 'rabbitmq-server', 'stop',
                            NodeTypeController._logger)  # To be sure
                except Exception as ex:
                    Toolbox.log(logger=NodeTypeController._logger,
                                messages=[
                                    '\nFailed to remove/unconfigure RabbitMQ',
                                    ex
                                ],
                                loglevel='exception')

            Toolbox.log(logger=NodeTypeController._logger,
                        messages='Stopping services')
            services = ['memcached', 'rabbitmq-server']
            if unconfigure_rabbitmq is False:
                services.remove('rabbitmq-server')
            if unconfigure_memcached is False:
                services.remove('memcached')
            for service in services:
                if service_manager.has_service(service, client=target_client):
                    Toolbox.log(
                        logger=NodeTypeController._logger,
                        messages='Stopping service {0}'.format(service))
                    try:
                        ServiceFactory.change_service_state(
                            target_client, service, 'stop',
                            NodeTypeController._logger)
                    except Exception as ex:
                        Toolbox.log(
                            logger=NodeTypeController._logger,
                            messages=[
                                '\nFailed to stop service'.format(service), ex
                            ],
                            loglevel='exception')

            Toolbox.log(logger=NodeTypeController._logger,
                        messages='Removing services')
            services = [
                'scheduled-tasks', 'webapp-api', 'volumerouter-consumer'
            ]
            for service in services:
                if service_manager.has_service(service, client=target_client):
                    Toolbox.log(
                        logger=NodeTypeController._logger,
                        messages='Removing service {0}'.format(service))
                    try:
                        ServiceFactory.change_service_state(
                            target_client, service, 'stop',
                            NodeTypeController._logger)
                        service_manager.remove_service(service,
                                                       client=target_client)
                    except Exception as ex:
                        Toolbox.log(
                            logger=NodeTypeController._logger,
                            messages=[
                                '\nFailed to remove service'.format(service),
                                ex
                            ],
                            loglevel='exception')

            if service_manager.has_service('workers', client=target_client):
                service_manager.add_service(
                    name='workers',
                    client=target_client,
                    params={'WORKER_QUEUE': '{0}'.format(unique_id)})
        try:
            NodeTypeController._configure_amqp_to_volumedriver()
        except Exception as ex:
            Toolbox.log(
                logger=NodeTypeController._logger,
                messages=['\nFailed to configure AMQP to Storage Driver', ex],
                loglevel='exception')

        Toolbox.log(logger=NodeTypeController._logger,
                    messages='Restarting services')
        NodeTypeController.restart_framework_and_memcache_services(
            clients=ip_client_map,
            logger=NodeTypeController._logger,
            offline_node_ips=offline_node_ips)

        if Toolbox.run_hooks(component='nodetype',
                             sub_component='demote',
                             logger=NodeTypeController._logger,
                             cluster_ip=cluster_ip,
                             master_ip=master_ip,
                             offline_node_ips=offline_node_ips):
            Toolbox.log(logger=NodeTypeController._logger,
                        messages='Restarting services')
            NodeTypeController.restart_framework_and_memcache_services(
                clients=ip_client_map,
                logger=NodeTypeController._logger,
                offline_node_ips=offline_node_ips)

        if storagerouter not in offline_nodes:
            target_client = ip_client_map[cluster_ip]
            node_name, _ = target_client.get_hostname()
            if NodeTypeController.avahi_installed(
                    client=target_client,
                    logger=NodeTypeController._logger) is True:
                NodeTypeController.configure_avahi(
                    client=target_client,
                    node_name=node_name,
                    node_type='extra',
                    logger=NodeTypeController._logger)
        Configuration.set(
            '/ovs/framework/hosts/{0}/type'.format(storagerouter.machine_id),
            'EXTRA')

        if target_client is not None and target_client.file_exists(
                '/tmp/ovs_rollback'):
            target_client.file_write('/tmp/ovs_rollback', 'rollback')

        Toolbox.log(logger=NodeTypeController._logger,
                    messages='Demote complete',
                    title=True)
コード例 #8
0
ファイル: nodetype.py プロジェクト: sun363587351/framework-1
    def promote_node(cluster_ip, master_ip, ip_client_map, unique_id,
                     configure_memcached, configure_rabbitmq):
        """
        Promotes a given node
        """
        from ovs.dal.lists.storagerouterlist import StorageRouterList
        from ovs.dal.lists.servicetypelist import ServiceTypeList
        from ovs.dal.lists.servicelist import ServiceList
        from ovs.dal.hybrids.service import Service

        Toolbox.log(logger=NodeTypeController._logger,
                    messages='Promoting node',
                    title=True)
        service_manager = ServiceFactory.get_manager()
        if configure_memcached is True:
            if NodeTypeController._validate_local_memcache_servers(
                    ip_client_map) is False:
                raise RuntimeError(
                    'Not all memcache nodes can be reached which is required for promoting a node.'
                )

        target_client = ip_client_map[cluster_ip]
        machine_id = System.get_my_machine_id(target_client)
        node_name, _ = target_client.get_hostname()
        master_client = ip_client_map[master_ip]

        storagerouter = StorageRouterList.get_by_machine_id(unique_id)
        storagerouter.node_type = 'MASTER'
        storagerouter.save()

        external_config = Configuration.get('/ovs/framework/external_config')
        if external_config is None:
            Toolbox.log(logger=NodeTypeController._logger,
                        messages='Joining Arakoon configuration cluster')
            arakoon_installer = ArakoonInstaller(cluster_name='config')
            arakoon_installer.load(ip=master_ip)
            arakoon_installer.extend_cluster(
                new_ip=cluster_ip,
                base_dir=Configuration.get('/ovs/framework/paths|ovsdb'))
            arakoon_installer.restart_cluster_after_extending(
                new_ip=cluster_ip)
            service_manager.register_service(
                node_name=machine_id,
                service_metadata=arakoon_installer.service_metadata[cluster_ip]
            )

        # Find other (arakoon) master nodes
        arakoon_cluster_name = str(
            Configuration.get('/ovs/framework/arakoon_clusters|ovsdb'))
        arakoon_metadata = ArakoonInstaller.get_arakoon_metadata_by_cluster_name(
            cluster_name=arakoon_cluster_name)
        config = ArakoonClusterConfig(cluster_id=arakoon_cluster_name)
        master_node_ips = [node.ip for node in config.nodes]
        if cluster_ip in master_node_ips:
            master_node_ips.remove(cluster_ip)
        if len(master_node_ips) == 0:
            raise RuntimeError(
                'There should be at least one other master node')

        arakoon_ports = []
        if arakoon_metadata['internal'] is True:
            Toolbox.log(logger=NodeTypeController._logger,
                        messages='Joining Arakoon OVS DB cluster')
            arakoon_installer = ArakoonInstaller(
                cluster_name=arakoon_cluster_name)
            arakoon_installer.load()
            arakoon_installer.extend_cluster(
                new_ip=cluster_ip,
                base_dir=Configuration.get('/ovs/framework/paths|ovsdb'))
            arakoon_installer.restart_cluster_after_extending(
                new_ip=cluster_ip)
            arakoon_ports = arakoon_installer.ports[cluster_ip]

        if configure_memcached is True:
            NodeTypeController.configure_memcached(
                client=target_client, logger=NodeTypeController._logger)
        NodeTypeController.add_services(client=target_client,
                                        node_type='master',
                                        logger=NodeTypeController._logger)

        Toolbox.log(logger=NodeTypeController._logger,
                    messages='Update configurations')
        if configure_memcached is True:
            endpoints = Configuration.get('/ovs/framework/memcache|endpoints')
            endpoint = '{0}:11211'.format(cluster_ip)
            if endpoint not in endpoints:
                endpoints.append(endpoint)
                Configuration.set('/ovs/framework/memcache|endpoints',
                                  endpoints)
        if configure_rabbitmq is True:
            endpoints = Configuration.get(
                '/ovs/framework/messagequeue|endpoints')
            endpoint = '{0}:5672'.format(cluster_ip)
            if endpoint not in endpoints:
                endpoints.append(endpoint)
                Configuration.set('/ovs/framework/messagequeue|endpoints',
                                  endpoints)

        if arakoon_metadata['internal'] is True:
            Toolbox.log(logger=NodeTypeController._logger,
                        messages='Restarting master node services')
            PersistentFactory.store = None
            VolatileFactory.store = None

            if 'arakoon-ovsdb' not in [
                    s.name for s in ServiceList.get_services() if
                    s.is_internal is False or s.storagerouter.ip == cluster_ip
            ]:
                service = Service()
                service.name = 'arakoon-ovsdb'
                service.type = ServiceTypeList.get_by_name(
                    ServiceType.SERVICE_TYPES.ARAKOON)
                service.ports = arakoon_ports
                service.storagerouter = storagerouter
                service.save()

        if configure_rabbitmq is True:
            NodeTypeController.configure_rabbitmq(
                client=target_client, logger=NodeTypeController._logger)
            # Copy rabbitmq cookie
            rabbitmq_cookie_file = '/var/lib/rabbitmq/.erlang.cookie'

            Toolbox.log(logger=NodeTypeController._logger,
                        messages='Copying RabbitMQ cookie')
            contents = master_client.file_read(rabbitmq_cookie_file)
            master_hostname, _ = master_client.get_hostname()
            target_client.dir_create(os.path.dirname(rabbitmq_cookie_file))
            target_client.file_write(rabbitmq_cookie_file, contents)
            target_client.file_chmod(rabbitmq_cookie_file, mode=0400)
            target_client.run(['rabbitmq-server', '-detached'])
            time.sleep(5)
            target_client.run(['rabbitmqctl', 'stop_app'])
            time.sleep(5)
            target_client.run([
                'rabbitmqctl', 'join_cluster',
                'rabbit@{0}'.format(master_hostname)
            ])
            time.sleep(5)
            target_client.run(['rabbitmqctl', 'stop'])
            time.sleep(5)

            # Enable HA for the rabbitMQ queues
            ServiceFactory.change_service_state(target_client,
                                                'rabbitmq-server', 'start',
                                                NodeTypeController._logger)
            NodeTypeController.check_rabbitmq_and_enable_ha_mode(
                client=target_client, logger=NodeTypeController._logger)

        NodeTypeController._configure_amqp_to_volumedriver()

        Toolbox.log(logger=NodeTypeController._logger,
                    messages='Starting services')
        services = ['memcached', 'arakoon-ovsdb', 'rabbitmq-server']
        if arakoon_metadata['internal'] is True:
            services.remove('arakoon-ovsdb')
        for service in services:
            if service_manager.has_service(service, client=target_client):
                ServiceFactory.change_service_state(target_client, service,
                                                    'start',
                                                    NodeTypeController._logger)

        Toolbox.log(logger=NodeTypeController._logger,
                    messages='Restarting services')
        NodeTypeController.restart_framework_and_memcache_services(
            clients=ip_client_map, logger=NodeTypeController._logger)

        if Toolbox.run_hooks(component='nodetype',
                             sub_component='promote',
                             logger=NodeTypeController._logger,
                             cluster_ip=cluster_ip,
                             master_ip=master_ip):
            Toolbox.log(logger=NodeTypeController._logger,
                        messages='Restarting services')
            NodeTypeController.restart_framework_and_memcache_services(
                clients=ip_client_map, logger=NodeTypeController._logger)

        if NodeTypeController.avahi_installed(
                client=target_client,
                logger=NodeTypeController._logger) is True:
            NodeTypeController.configure_avahi(
                client=target_client,
                node_name=node_name,
                node_type='master',
                logger=NodeTypeController._logger)
        Configuration.set('/ovs/framework/hosts/{0}/type'.format(machine_id),
                          'MASTER')
        target_client.run(
            ['chown', '-R', 'ovs:ovs', '/opt/OpenvStorage/config'])
        Configuration.set(
            '/ovs/framework/hosts/{0}/promotecompleted'.format(machine_id),
            True)

        if target_client.file_exists('/tmp/ovs_rollback'):
            target_client.file_delete('/tmp/ovs_rollback')

        Toolbox.log(logger=NodeTypeController._logger,
                    messages='Promote complete')
コード例 #9
0
 def new_function(*args, **kwargs):
     """
     Wrapped function
     """
     request = _find_request(args)
     new_kwargs = {}
     # Find out the arguments of the decorated function
     function_info = inspect.getargspec(f)
     if function_info.defaults is None:
         mandatory_vars = function_info.args[1:]
         optional_vars = []
     else:
         mandatory_vars = function_info.args[1:-len(function_info.
                                                    defaults)]
         optional_vars = function_info.args[len(mandatory_vars) + 1:]
     # Check versioning
     version = regex.match(
         request.META['HTTP_ACCEPT']).groupdict()['version']
     versions = (max(min_version, settings.VERSION[0]),
                 min(max_version, settings.VERSION[-1]))
     if version == '*':  # If accepting all versions, it defaults to the highest one
         version = versions[1]
     version = int(version)
     if version < versions[0] or version > versions[1]:
         raise NotAcceptable(
             'API version requirements: {0} <= <version> <= {1}. Got {2}'
             .format(versions[0], versions[1], version))
     if 'version' in mandatory_vars:
         new_kwargs['version'] = version
         mandatory_vars.remove('version')
     # Fill request parameter, if available
     if 'request' in mandatory_vars:
         new_kwargs['request'] = request
         mandatory_vars.remove('request')
     # Fill main object, if required
     if 'pk' in kwargs and object_type is not None:
         typename = object_type.__name__.lower()
         try:
             instance = object_type(kwargs['pk'])
             if typename in mandatory_vars:
                 new_kwargs[typename] = instance
                 mandatory_vars.remove(typename)
         except ObjectNotFoundException:
             raise Http404()
     # Fill local storagerouter, if requested
     if 'local_storagerouter' in mandatory_vars:
         storagerouter = StorageRouterList.get_by_machine_id(
             settings.UNIQUE_ID)
         new_kwargs['local_storagerouter'] = storagerouter
         mandatory_vars.remove('local_storagerouter')
     # Fill mandatory parameters
     post_data = request.DATA if hasattr(request,
                                         'DATA') else request.POST
     get_data = request.QUERY_PARAMS if hasattr(
         request, 'QUERY_PARAMS') else request.GET
     for name in mandatory_vars:
         if name in kwargs:
             new_kwargs[name] = kwargs[name]
         else:
             if name not in post_data:
                 if name not in get_data:
                     raise NotAcceptable(
                         'Invalid data passed: {0} is missing'.format(
                             name))
                 new_kwargs[name] = _try_parse(get_data[name])
             else:
                 new_kwargs[name] = _try_parse(post_data[name])
     # Try to fill optional parameters
     for name in optional_vars:
         if name in kwargs:
             new_kwargs[name] = kwargs[name]
         else:
             if name in post_data:
                 new_kwargs[name] = _try_parse(post_data[name])
             elif name in get_data:
                 new_kwargs[name] = _try_parse(get_data[name])
     # Call the function
     return f(args[0], **new_kwargs)
コード例 #10
0
ファイル: nodetype.py プロジェクト: openvstorage/framework
    def demote_node(cluster_ip, master_ip, ip_client_map, unique_id, unconfigure_memcached, unconfigure_rabbitmq, offline_nodes=None):
        """
        Demotes a given node
        """
        from ovs.dal.lists.storagerouterlist import StorageRouterList

        Toolbox.log(logger=NodeTypeController._logger, messages='Demoting node', title=True)
        if offline_nodes is None:
            offline_nodes = []

        if unconfigure_memcached is True and len(offline_nodes) == 0:
            if NodeTypeController._validate_local_memcache_servers(ip_client_map) is False:
                raise RuntimeError('Not all memcache nodes can be reached which is required for demoting a node.')

        # Find other (arakoon) master nodes
        arakoon_cluster_name = str(Configuration.get('/ovs/framework/arakoon_clusters|ovsdb'))
        arakoon_metadata = ArakoonInstaller.get_arakoon_metadata_by_cluster_name(cluster_name=arakoon_cluster_name)
        config = ArakoonClusterConfig(cluster_id=arakoon_cluster_name, filesystem=False)
        config.load_config()
        master_node_ips = [node.ip for node in config.nodes]
        if cluster_ip in master_node_ips:
            master_node_ips.remove(cluster_ip)
        if len(master_node_ips) == 0:
            raise RuntimeError('There should be at least one other master node')

        storagerouter = StorageRouterList.get_by_machine_id(unique_id)
        storagerouter.node_type = 'EXTRA'
        storagerouter.save()

        offline_node_ips = [node.ip for node in offline_nodes]
        if arakoon_metadata['internal'] is True:
            Toolbox.log(logger=NodeTypeController._logger, messages='Leaving Arakoon {0} cluster'.format(arakoon_cluster_name))
            ArakoonInstaller.shrink_cluster(deleted_node_ip=cluster_ip,
                                            remaining_node_ips=master_node_ips,
                                            cluster_name=arakoon_cluster_name,
                                            offline_nodes=offline_node_ips)

        try:
            external_config = Configuration.get('/ovs/framework/external_config')
            if external_config is None:
                config_store = Configuration.get_store()
                if config_store == 'arakoon':
                    Toolbox.log(logger=NodeTypeController._logger, messages='Leaving Arakoon config cluster')
                    ArakoonInstaller.shrink_cluster(deleted_node_ip=cluster_ip,
                                                    remaining_node_ips=master_node_ips,
                                                    cluster_name='config',
                                                    offline_nodes=offline_node_ips,
                                                    filesystem=True)
                else:
                    from ovs.extensions.db.etcd.installer import EtcdInstaller
                    Toolbox.log(logger=NodeTypeController._logger, messages='Leaving Etcd cluster')
                    EtcdInstaller.shrink_cluster(master_ip, cluster_ip, 'config', offline_node_ips)
        except Exception as ex:
            Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to leave configuration cluster', ex], loglevel='exception')

        Toolbox.log(logger=NodeTypeController._logger, messages='Update configurations')
        try:
            if unconfigure_memcached is True:
                endpoints = Configuration.get('/ovs/framework/memcache|endpoints')
                endpoint = '{0}:{1}'.format(cluster_ip, 11211)
                if endpoint in endpoints:
                    endpoints.remove(endpoint)
                Configuration.set('/ovs/framework/memcache|endpoints', endpoints)
            if unconfigure_rabbitmq is True:
                endpoints = Configuration.get('/ovs/framework/messagequeue|endpoints')
                endpoint = '{0}:{1}'.format(cluster_ip, 5672)
                if endpoint in endpoints:
                    endpoints.remove(endpoint)
                Configuration.set('/ovs/framework/messagequeue|endpoints', endpoints)
        except Exception as ex:
            Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to update configurations', ex], loglevel='exception')

        if arakoon_metadata['internal'] is True:
            Toolbox.log(logger=NodeTypeController._logger, messages='Restarting master node services')
            remaining_nodes = ip_client_map.keys()[:]
            if cluster_ip in remaining_nodes:
                remaining_nodes.remove(cluster_ip)

            PersistentFactory.store = None
            VolatileFactory.store = None

            for service in storagerouter.services:
                if service.name == 'arakoon-ovsdb':
                    service.delete()

        target_client = None
        if storagerouter in offline_nodes:
            if unconfigure_rabbitmq is True:
                Toolbox.log(logger=NodeTypeController._logger, messages='Removing/unconfiguring offline RabbitMQ node')
                client = ip_client_map[master_ip]
                try:
                    client.run(['rabbitmqctl', 'forget_cluster_node', 'rabbit@{0}'.format(storagerouter.name)])
                except Exception as ex:
                    Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to forget RabbitMQ cluster node', ex], loglevel='exception')
        else:
            target_client = ip_client_map[cluster_ip]
            if unconfigure_rabbitmq is True:
                Toolbox.log(logger=NodeTypeController._logger, messages='Removing/unconfiguring RabbitMQ')
                try:
                    if ServiceManager.has_service('rabbitmq-server', client=target_client):
                        Toolbox.change_service_state(target_client, 'rabbitmq-server', 'stop', NodeTypeController._logger)
                        target_client.run(['rabbitmq-server', '-detached'])
                        time.sleep(5)
                        target_client.run(['rabbitmqctl', 'stop_app'])
                        time.sleep(5)
                        target_client.run(['rabbitmqctl', 'reset'])
                        time.sleep(5)
                        target_client.run(['rabbitmqctl', 'stop'])
                        time.sleep(5)
                        target_client.file_unlink("/var/lib/rabbitmq/.erlang.cookie")
                        Toolbox.change_service_state(target_client, 'rabbitmq-server', 'stop', NodeTypeController._logger)  # To be sure
                except Exception as ex:
                    Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to remove/unconfigure RabbitMQ', ex], loglevel='exception')

            Toolbox.log(logger=NodeTypeController._logger, messages='Stopping services')
            services = ['memcached', 'rabbitmq-server']
            if unconfigure_rabbitmq is False:
                services.remove('rabbitmq-server')
            if unconfigure_memcached is False:
                services.remove('memcached')
            for service in services:
                if ServiceManager.has_service(service, client=target_client):
                    Toolbox.log(logger=NodeTypeController._logger, messages='Stopping service {0}'.format(service))
                    try:
                        Toolbox.change_service_state(target_client, service, 'stop', NodeTypeController._logger)
                    except Exception as ex:
                        Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to stop service'.format(service), ex], loglevel='exception')

            Toolbox.log(logger=NodeTypeController._logger, messages='Removing services')
            services = ['scheduled-tasks', 'webapp-api', 'volumerouter-consumer']
            for service in services:
                if ServiceManager.has_service(service, client=target_client):
                    Toolbox.log(logger=NodeTypeController._logger, messages='Removing service {0}'.format(service))
                    try:
                        Toolbox.change_service_state(target_client, service, 'stop', NodeTypeController._logger)
                        ServiceManager.remove_service(service, client=target_client)
                    except Exception as ex:
                        Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to remove service'.format(service), ex], loglevel='exception')

            if ServiceManager.has_service('workers', client=target_client):
                ServiceManager.add_service(name='workers',
                                           client=target_client,
                                           params={'WORKER_QUEUE': '{0}'.format(unique_id)})
        try:
            NodeTypeController._configure_amqp_to_volumedriver()
        except Exception as ex:
            Toolbox.log(logger=NodeTypeController._logger, messages=['\nFailed to configure AMQP to Storage Driver', ex], loglevel='exception')

        Toolbox.log(logger=NodeTypeController._logger, messages='Restarting services')
        NodeTypeController.restart_framework_and_memcache_services(clients=ip_client_map, logger=NodeTypeController._logger, offline_node_ips=offline_node_ips)

        if Toolbox.run_hooks(component='nodetype',
                             sub_component='demote',
                             logger=NodeTypeController._logger,
                             cluster_ip=cluster_ip,
                             master_ip=master_ip,
                             offline_node_ips=offline_node_ips):
            Toolbox.log(logger=NodeTypeController._logger, messages='Restarting services')
            NodeTypeController.restart_framework_and_memcache_services(clients=ip_client_map, logger=NodeTypeController._logger, offline_node_ips=offline_node_ips)

        if storagerouter not in offline_nodes:
            target_client = ip_client_map[cluster_ip]
            node_name, _ = target_client.get_hostname()
            if NodeTypeController.avahi_installed(client=target_client, logger=NodeTypeController._logger) is True:
                NodeTypeController.configure_avahi(client=target_client, node_name=node_name, node_type='extra', logger=NodeTypeController._logger)
        Configuration.set('/ovs/framework/hosts/{0}/type'.format(storagerouter.machine_id), 'EXTRA')

        if target_client is not None and target_client.file_exists('/tmp/ovs_rollback'):
            target_client.file_write('/tmp/ovs_rollback', 'rollback')

        Toolbox.log(logger=NodeTypeController._logger, messages='Demote complete', title=True)
コード例 #11
0
ファイル: nodetype.py プロジェクト: openvstorage/framework
    def promote_node(cluster_ip, master_ip, ip_client_map, unique_id, configure_memcached, configure_rabbitmq):
        """
        Promotes a given node
        """
        from ovs.dal.lists.storagerouterlist import StorageRouterList
        from ovs.dal.lists.servicetypelist import ServiceTypeList
        from ovs.dal.lists.servicelist import ServiceList
        from ovs.dal.hybrids.service import Service

        Toolbox.log(logger=NodeTypeController._logger, messages='Promoting node', title=True)
        if configure_memcached is True:
            if NodeTypeController._validate_local_memcache_servers(ip_client_map) is False:
                raise RuntimeError('Not all memcache nodes can be reached which is required for promoting a node.')

        target_client = ip_client_map[cluster_ip]
        machine_id = System.get_my_machine_id(target_client)
        node_name, _ = target_client.get_hostname()
        master_client = ip_client_map[master_ip]

        storagerouter = StorageRouterList.get_by_machine_id(unique_id)
        storagerouter.node_type = 'MASTER'
        storagerouter.save()

        external_config = Configuration.get('/ovs/framework/external_config')
        if external_config is None:
            config_store = Configuration.get_store()
            if config_store == 'arakoon':
                Toolbox.log(logger=NodeTypeController._logger, messages='Joining Arakoon configuration cluster')
                metadata = ArakoonInstaller.extend_cluster(master_ip=master_ip,
                                                           new_ip=cluster_ip,
                                                           cluster_name='config',
                                                           base_dir=Configuration.get('/ovs/framework/paths|ovsdb'),
                                                           ports=[26400, 26401],
                                                           filesystem=True)
                ArakoonInstaller.restart_cluster_add(cluster_name='config',
                                                     current_ips=metadata['ips'],
                                                     new_ip=cluster_ip,
                                                     filesystem=True)
                ServiceManager.register_service(node_name=machine_id,
                                                service_metadata=metadata['service_metadata'])
            else:
                from ovs.extensions.db.etcd.installer import EtcdInstaller
                Toolbox.log(logger=NodeTypeController._logger, messages='Joining Etcd cluster')
                EtcdInstaller.extend_cluster(master_ip, cluster_ip, 'config')

        # Find other (arakoon) master nodes
        arakoon_cluster_name = str(Configuration.get('/ovs/framework/arakoon_clusters|ovsdb'))
        arakoon_metadata = ArakoonInstaller.get_arakoon_metadata_by_cluster_name(cluster_name=arakoon_cluster_name)
        config = ArakoonClusterConfig(cluster_id=arakoon_cluster_name, filesystem=False)
        config.load_config()
        master_node_ips = [node.ip for node in config.nodes]
        if cluster_ip in master_node_ips:
            master_node_ips.remove(cluster_ip)
        if len(master_node_ips) == 0:
            raise RuntimeError('There should be at least one other master node')

        arakoon_ports = []
        if arakoon_metadata['internal'] is True:
            Toolbox.log(logger=NodeTypeController._logger, messages='Joining Arakoon OVS DB cluster')
            result = ArakoonInstaller.extend_cluster(master_ip=master_ip,
                                                     new_ip=cluster_ip,
                                                     cluster_name=arakoon_cluster_name,
                                                     base_dir=Configuration.get('/ovs/framework/paths|ovsdb'))
            ArakoonInstaller.restart_cluster_add(cluster_name=arakoon_cluster_name,
                                                 current_ips=result['ips'],
                                                 new_ip=cluster_ip, filesystem=False)
            arakoon_ports = [result['client_port'], result['messaging_port']]

        if configure_memcached is True:
            NodeTypeController.configure_memcached(client=target_client, logger=NodeTypeController._logger)
        NodeTypeController.add_services(client=target_client, node_type='master', logger=NodeTypeController._logger)

        Toolbox.log(logger=NodeTypeController._logger, messages='Update configurations')
        if configure_memcached is True:
            endpoints = Configuration.get('/ovs/framework/memcache|endpoints')
            endpoint = '{0}:11211'.format(cluster_ip)
            if endpoint not in endpoints:
                endpoints.append(endpoint)
                Configuration.set('/ovs/framework/memcache|endpoints', endpoints)
        if configure_rabbitmq is True:
            endpoints = Configuration.get('/ovs/framework/messagequeue|endpoints')
            endpoint = '{0}:5672'.format(cluster_ip)
            if endpoint not in endpoints:
                endpoints.append(endpoint)
                Configuration.set('/ovs/framework/messagequeue|endpoints', endpoints)

        if arakoon_metadata['internal'] is True:
            Toolbox.log(logger=NodeTypeController._logger, messages='Restarting master node services')
            ArakoonInstaller.restart_cluster_add(cluster_name=arakoon_cluster_name,
                                                 current_ips=master_node_ips,
                                                 new_ip=cluster_ip,
                                                 filesystem=False)
            PersistentFactory.store = None
            VolatileFactory.store = None

            if 'arakoon-ovsdb' not in [s.name for s in ServiceList.get_services() if s.is_internal is False or s.storagerouter.ip == cluster_ip]:
                service = Service()
                service.name = 'arakoon-ovsdb'
                service.type = ServiceTypeList.get_by_name(ServiceType.SERVICE_TYPES.ARAKOON)
                service.ports = arakoon_ports
                service.storagerouter = storagerouter
                service.save()

        if configure_rabbitmq is True:
            NodeTypeController.configure_rabbitmq(client=target_client, logger=NodeTypeController._logger)
            # Copy rabbitmq cookie
            rabbitmq_cookie_file = '/var/lib/rabbitmq/.erlang.cookie'

            Toolbox.log(logger=NodeTypeController._logger, messages='Copying Rabbit MQ cookie')
            contents = master_client.file_read(rabbitmq_cookie_file)
            master_hostname, _ = master_client.get_hostname()
            target_client.dir_create(os.path.dirname(rabbitmq_cookie_file))
            target_client.file_write(rabbitmq_cookie_file, contents)
            target_client.file_chmod(rabbitmq_cookie_file, mode=400)
            target_client.run(['rabbitmq-server', '-detached'])
            time.sleep(5)
            target_client.run(['rabbitmqctl', 'stop_app'])
            time.sleep(5)
            target_client.run(['rabbitmqctl', 'join_cluster', 'rabbit@{0}'.format(master_hostname)])
            time.sleep(5)
            target_client.run(['rabbitmqctl', 'stop'])
            time.sleep(5)

            # Enable HA for the rabbitMQ queues
            Toolbox.change_service_state(target_client, 'rabbitmq-server', 'start', NodeTypeController._logger)
            NodeTypeController.check_rabbitmq_and_enable_ha_mode(client=target_client, logger=NodeTypeController._logger)

        NodeTypeController._configure_amqp_to_volumedriver()

        Toolbox.log(logger=NodeTypeController._logger, messages='Starting services')
        services = ['memcached', 'arakoon-ovsdb', 'rabbitmq-server', 'etcd-config']
        if arakoon_metadata['internal'] is True:
            services.remove('arakoon-ovsdb')
        for service in services:
            if ServiceManager.has_service(service, client=target_client):
                Toolbox.change_service_state(target_client, service, 'start', NodeTypeController._logger)

        Toolbox.log(logger=NodeTypeController._logger, messages='Restarting services')
        NodeTypeController.restart_framework_and_memcache_services(clients=ip_client_map, logger=NodeTypeController._logger)

        if Toolbox.run_hooks(component='nodetype',
                             sub_component='promote',
                             logger=NodeTypeController._logger,
                             cluster_ip=cluster_ip,
                             master_ip=master_ip):
            Toolbox.log(logger=NodeTypeController._logger, messages='Restarting services')
            NodeTypeController.restart_framework_and_memcache_services(clients=ip_client_map, logger=NodeTypeController._logger)

        if NodeTypeController.avahi_installed(client=target_client, logger=NodeTypeController._logger) is True:
            NodeTypeController.configure_avahi(client=target_client, node_name=node_name, node_type='master', logger=NodeTypeController._logger)
        Configuration.set('/ovs/framework/hosts/{0}/type'.format(machine_id), 'MASTER')
        target_client.run(['chown', '-R', 'ovs:ovs', '/opt/OpenvStorage/config'])
        Configuration.set('/ovs/framework/hosts/{0}/promotecompleted'.format(machine_id), True)

        if target_client.file_exists('/tmp/ovs_rollback'):
            target_client.file_delete('/tmp/ovs_rollback')

        Toolbox.log(logger=NodeTypeController._logger, messages='Promote complete')