示例#1
0
文件: task.py 项目: anbangr/fuel-web
    def message(cls, task, nodes_to_provisioning):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        task = objects.Task.get_by_uid(
            task.id,
            fail_if_not_found=True,
            lock_for_update=True
        )
        objects.NodeCollection.lock_nodes(nodes_to_provisioning)
        serialized_cluster = provisioning_serializers.serialize(
            task.cluster, nodes_to_provisioning)

        for node in nodes_to_provisioning:
            if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
                continue
            logs_utils.prepare_syslog_dir(node)

        rpc_message = make_astute_message(
            task,
            cls._get_provision_method(task.cluster),
            'provision_resp',
            {
                'provisioning_info': serialized_cluster
            }
        )
        db().commit()
        return rpc_message
示例#2
0
    def message(cls, task, nodes_to_provisioning):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        task = objects.Task.get_by_uid(
            task.id,
            fail_if_not_found=True,
            lock_for_update=True
        )
        objects.NodeCollection.lock_nodes(nodes_to_provisioning)
        serialized_cluster = provisioning_serializers.serialize(
            task.cluster, nodes_to_provisioning)

        for node in nodes_to_provisioning:
            if settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP:
                continue

            admin_net_id = objects.Node.get_network_manager(
                node
            ).get_admin_network_group_id(node.id)

            TaskHelper.prepare_syslog_dir(node, admin_net_id)

        rpc_message = make_astute_message(
            task,
            'provision',
            'provision_resp',
            {
                'provisioning_info': serialized_cluster
            }
        )
        db().commit()
        return rpc_message
示例#3
0
    def message(cls, task, nodes):
        logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)

        for n in nodes:
            if n.pending_roles:
                n.roles += n.pending_roles
                n.pending_roles = []
            n.status = 'provisioned'
            n.progress = 0

        # here we replace deployment data if user redefined them
        serialized_cluster = deployment_serializers.serialize(
            task.cluster, nodes)

        # After serialization set pending_addition to False
        for node in nodes:
            node.pending_addition = False

        rpc_message = make_astute_message(
            task,
            'deploy',
            'deploy_resp',
            {
                'deployment_info': serialized_cluster
            }
        )
        db().commit()
        return rpc_message
    def _link_tasks(previous, current):
        """Link the previous and current task in chain.

        :param previous: the previous task instance
        :param current: the current task instance
        """

        # in case if uuis is same, that means task will run on same nodes
        if previous.get('uids') == current.get('uids'):
            logger.debug(
                "connect task '%s' with previous in chain '%s'",
                current['id'], previous['id']
            )
            current.setdefault('requires', []).append(previous['id'])
        else:
            # the list of nodes is different, make cross-depends
            logger.debug(
                "cross node dependencies: task '%s', previous task '%s', "
                "nodes: %s",
                current['id'], previous['id'],
                ', '.join(previous.get('uids', ()))
            )
            requires_ex = current.setdefault('requires_ex', [])
            for node_id in previous.get('uids', ()):
                requires_ex.append(
                    {'name': previous['id'], 'node_id': node_id}
                )
示例#5
0
文件: task.py 项目: damjanek/fuelweb
 def execute(cls, task):
     logger.debug("DeploymentTask.execute(task=%s)" % task.uuid)
     message = cls.message(task)
     task.cache = message
     orm().add(task)
     orm().commit()
     rpc.cast('naily', message)
示例#6
0
文件: cluster.py 项目: tsipa/fuel-web
    def PUT(self, cluster_id):
        """:returns: JSONized Cluster attributes.
        :http: * 200 (OK)
               * 400 (wrong attributes data specified)
               * 404 (cluster not found in db)
               * 500 (cluster has no attributes)
        """
        cluster = self.get_object_or_404(
            Cluster,
            cluster_id,
            log_404=(
                "warning",
                "Error: there is no cluster "
                "with id '{0}' in DB.".format(cluster_id)
            )
        )

        if not cluster.attributes:
            logger.error('ClusterAttributesDefaultsHandler: no attributes'
                         ' found for cluster_id %s' % cluster_id)
            raise web.internalerror("No attributes found!")

        cluster.attributes.editable = cluster.release.attributes_metadata.get(
            "editable"
        )
        db().commit()
        cluster.add_pending_changes("attributes")

        logger.debug('ClusterAttributesDefaultsHandler:'
                     ' editable attributes for cluster_id %s were reset'
                     ' to default' % cluster_id)
        return {"editable": cluster.attributes.editable}
示例#7
0
def cast(name, message, service=False):
    logger.debug(
        "RPC cast to orchestrator:\n{0}".format(
            jsonutils.dumps(message, indent=4)
        )
    )
    #测试使用
    file_object = open('/opt/queuemsg.txt', 'w')
    file_object.write(jsonutils.dumps(message, indent=4))
    file_object.close()
    

    use_queue = naily_queue if not service else naily_service_queue
    use_exchange = naily_exchange if not service else naily_service_exchange
    with Connection(conn_str) as conn:
        with conn.Producer(serializer='json') as producer:
            publish = functools.partial(producer.publish, message,
                exchange=use_exchange, routing_key=name, declare=[use_queue])
            try:
                #pass
                publish()
            except amqp_exceptions.PreconditionFailed as e:
                logger.warning(six.text_type(e))
                # (dshulyak) we should drop both exchanges/queues in order
                # for astute to be able to recover temporary queues
                utils.delete_entities(
                    conn, naily_service_exchange, naily_service_queue,
                    naily_exchange, naily_queue)
                publish()
示例#8
0
文件: task.py 项目: vefimova/fuel-web
    def message(cls, task, nodes):
        logger.debug("%s.message(task=%s)", cls.__class__.__name__, task.uuid)

        for n in nodes:
            if n.pending_roles:
                n.roles += n.pending_roles
                n.pending_roles = []
            n.status = 'provisioned'
            n.progress = 0

        orchestrator_graph = deployment_graph.AstuteGraph(task.cluster)

        serialized_cluster = deployment_serializers.serialize(
            orchestrator_graph, task.cluster, nodes)

        # After serialization set pending_addition to False
        for node in nodes:
            node.pending_addition = False

        rpc_message = make_astute_message(
            task,
            'deploy',
            'deploy_resp',
            {
                'deployment_info': serialized_cluster
            }
        )
        db().flush()
        return rpc_message
示例#9
0
    def PUT(self, cluster_id):
        """:returns: JSONized Cluster attributes.

        :http: * 200 (OK)
               * 400 (wrong attributes data specified)
               * 404 (cluster not found in db)
               * 500 (cluster has no attributes)
        """
        cluster = self.get_object_or_404(
            objects.Cluster,
            cluster_id,
            log_404=(
                "error",
                "There is no cluster "
                "with id '{0}' in DB.".format(cluster_id)
            )
        )

        if not cluster.attributes:
            logger.error('ClusterAttributesDefaultsHandler: no attributes'
                         ' found for cluster_id %s' % cluster_id)
            raise self.http(500, "No attributes found!")

        cluster.attributes.editable = (
            objects.Cluster.get_default_editable_attributes(cluster))
        objects.Cluster.add_pending_changes(cluster, "attributes")

        logger.debug('ClusterAttributesDefaultsHandler:'
                     ' editable attributes for cluster_id %s were reset'
                     ' to default' % cluster_id)
        return {"editable": cluster.attributes.editable}
示例#10
0
    def update_interfaces_info(cls, node, update_by_agent=False):
        """Update interfaces in case of correct interfaces
        in meta field in node's model
        """
        try:
            cls.check_interfaces_correctness(node)
        except errors.InvalidInterfacesInfo as e:
            logger.debug("Cannot update interfaces: %s", e.message)
            return
        pxe_iface_name = cls._get_pxe_iface_name(node)
        for interface in node.meta["interfaces"]:
            # set 'pxe' property for appropriate iface
            if interface['name'] == pxe_iface_name:
                interface['pxe'] = True
            # try to get interface by mac address
            interface_db = next((
                n for n in node.nic_interfaces
                if utils.is_same_mac(n.mac, interface['mac'])),
                None)

            # try to get interface instance by interface name. this protects
            # us from loosing nodes when some NICs was replaced with a new one
            interface_db = interface_db or next((
                n for n in node.nic_interfaces if n.name == interface['name']),
                None)

            if interface_db:
                cls.__update_existing_interface(interface_db.id, interface,
                                                update_by_agent)
            else:
                cls.__add_new_interface(node, interface)

        cls.__delete_not_found_interfaces(node, node.meta["interfaces"])
示例#11
0
 def create_mbr(self, boot=False):
     if self.free_space >= self.vm.field_generator("calc_mbr_size"):
         if boot:
             self.volumes.append({"type": "mbr"})
         logger.debug("Allocating MBR")
         self.free_space = self.free_space - \
             self.vm.field_generator("calc_mbr_size")
示例#12
0
    def stats_user_resp(cls, **kwargs):
        logger.info("RPC method stats_user_resp received: %s",
                    jsonutils.dumps(kwargs))

        task_uuid = kwargs.get('task_uuid')
        nodes = kwargs.get('nodes', [])
        status = kwargs.get('status')
        error = kwargs.get('error')
        message = kwargs.get('msg')

        task = objects.Task.get_by_uuid(
            task_uuid, fail_if_not_found=True, lock_for_update=True)

        if status not in (consts.TASK_STATUSES.ready,
                          consts.TASK_STATUSES.error):
            logger.debug("Task %s, id: %s in status: %s",
                         task.name, task.id, task.status)
            return

        data = {'status': status, 'progress': 100, 'message': message}
        if status == consts.TASK_STATUSES.error:
            logger.error("Task %s, id: %s failed: %s",
                         task.name, task.id, error)
            data['message'] = error

        objects.Task.update(task, data)
        cls._update_action_log_entry(status, task.name, task_uuid, nodes)
        logger.info("RPC method stats_user_resp processed")
示例#13
0
    def checkout(self, instance):
        fetch_file = os.path.join(
            const.REPOS_DIR,
            instance.repo_name,
            '.git/FETCH_HEAD'
        )
        if os.path.exists(fetch_file):
            current_ts = time.time()
            cluster = Cluster.get_by_uid(instance.env_id)
            last_fetch = os.stat(fetch_file).st_mtime
            if cluster.status != CLUSTER_STATUSES.deployment and \
                current_ts - last_fetch < const.REPO_TTL:
                return

        logger.debug("Repo TTL exceeded. Fetching code...")
        ssh_cmd = self._get_ssh_cmd(instance.repo_name)

        if not os.path.exists(self._get_key_path(instance.repo_name)):
            logger.debug('Key file does not exist. Creating...')
            self._create_key_file(instance.repo_name)

        with instance.repo.git.custom_environment(GIT_SSH=ssh_cmd):
            commit = instance.repo.remotes.origin.fetch(refspec=instance.ref)
            commit = commit[0].commit
            instance.repo.head.reference = commit
            instance.repo.head.reset(index=True, working_tree=True)
示例#14
0
    def _make_zabbix_request(cls, url, method, params, auth=None):
        header = {'Content-type': 'application/json'}
        data = {'jsonrpc': '2.0',
                'id': '1',
                'method': method,
                'params': params}
        if auth:
            data['auth'] = auth

        logger.debug("Zabbix request: %s", data)
        request = urllib2.Request(url, json.dumps(data), header)

        try:
            response = urllib2.urlopen(request)
        except urllib2.URLError as e:
            raise errors.CannotMakeZabbixRequest(
                "Can't make a request to Zabbix: {0}".format(e)
            )

        result = json.loads(response.read())
        logger.debug("Zabbix response: %s", result)

        if 'error' in result:
            code = result['error']['code']
            msg = result['error']['message']
            data = result['error'].get('data', '')
            raise errors.ZabbixRequestError(
                "Zabbix returned error code {0}, {1}: {2}".format(
                    code, msg, data
                )
            )

        return result['result']
示例#15
0
文件: helpers.py 项目: loles/fuelweb
 def update_slave_nodes_fqdn(cls, nodes):
     for n in nodes:
         fqdn = cls.make_slave_fqdn(n.id)
         if n.fqdn != fqdn:
             n.fqdn = fqdn
             logger.debug("Updating node fqdn: %s %s", n.id, n.fqdn)
             db().commit()
示例#16
0
文件: task.py 项目: damjanek/fuelweb
    def execute(self, task, data):
        task_uuid = task.uuid
        nodes = []
        for n in task.cluster.nodes:
            node_json = {'uid': n.id, 'networks': []}
            for nic in n.interfaces:
                vlans = []
                for ng in nic.assigned_networks:
                    # Handle FuelWeb admin network first.
                    if not ng.cluster_id:
                        vlans.append(0)
                        continue
                    data_ng = filter(
                        lambda i: i['name'] == ng.name,
                        data
                    )[0]
                    vlans.extend(data_ng['vlans'])
                if not vlans:
                    continue
                node_json['networks'].append(
                    {'iface': nic.name, 'vlans': vlans}
                )
            nodes.append(node_json)

        message = {'method': 'verify_networks',
                   'respond_to': 'verify_networks_resp',
                   'args': {'task_uuid': task.uuid,
                            'nodes': nodes}}
        logger.debug("Network verification is called with: %s", message)

        task.cache = message
        orm().add(task)
        orm().commit()
        rpc.cast('naily', message)
示例#17
0
    def execute(self, nodes_to_deployment):
        TaskHelper.update_slave_nodes_fqdn(nodes_to_deployment)
        logger.debug('Nodes to deploy: {0}'.format(
            ' '.join([n.fqdn for n in nodes_to_deployment])))
        task_deployment = Task(name='deployment', cluster=self.cluster)
        db().add(task_deployment)
        db().commit()

        deployment_message = self._call_silently(
            task_deployment,
            tasks.DeploymentTask,
            nodes_to_deployment,
            method_name='message')

        db().refresh(task_deployment)

        task_deployment.cache = deployment_message

        for node in nodes_to_deployment:
            node.status = 'deploying'
            node.progress = 0

        db().commit()
        rpc.cast('naily', deployment_message)

        return task_deployment
示例#18
0
 def _traverse(self, cdict):
     new_dict = {}
     if isinstance(cdict, dict):
         for i, val in cdict.iteritems():
             if type(val) in (str, unicode, int, float):
                 new_dict[i] = val
             elif isinstance(val, dict):
                 if "generator" in val:
                     logger.debug("Generating value: generator: %s "
                                  "generator_args: %s", val["generator"],
                                  val.get("generator_args", []))
                     genval = self.field_generator(
                         val["generator"],
                         *(val.get("generator_args", []))
                     )
                     logger.debug("Generated value: %s", str(genval))
                     new_dict[i] = genval
                 else:
                     new_dict[i] = self._traverse(val)
             elif isinstance(val, list):
                 new_dict[i] = []
                 for d in val:
                     new_dict[i].append(self._traverse(d))
     elif isinstance(cdict, list):
         new_dict = []
         for d in cdict:
             new_dict.append(self._traverse(d))
     return new_dict
示例#19
0
    def execute(self, nodes_to_provision):
        """Run provisioning task on specified nodes
        """
        # locking nodes
        nodes_ids = [node.id for node in nodes_to_provision]
        nodes = objects.NodeCollection.filter_by_list(None, "id", nodes_ids, order_by="id")
        objects.NodeCollection.lock_for_update(nodes).all()

        objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
        logger.debug("Nodes to provision: {0}".format(" ".join([n.fqdn for n in nodes_to_provision])))

        task_provision = Task(name="provision")
        task_provision.node_ids = nodes_ids
        # node_ids参数在安装成功时候无用,但在安装失败的时候需要用到
        db().add(task_provision)
        db().commit()

        provision_message = self._call_silently(
            task_provision, tasks.InstallosTask, nodes_to_provision, method_name="message"
        )

        task_provision = objects.Task.get_by_uid(task_provision.id, fail_if_not_found=True, lock_for_update=True)
        task_provision.cache = provision_message
        objects.NodeCollection.lock_for_update(nodes).all()

        for node in nodes_to_provision:
            node.pending_addition = False
            node.status = NODE_STATUSES.provisioning
            node.progress = 0

        db().commit()

        rpc.cast("naily", provision_message)
        logger.info(u"消息发送完毕")
        return task_provision
示例#20
0
文件: node.py 项目: gdyuldin/fuel-web
    def update_primary_roles(cls, instance, new_primary_roles):
        """Update primary_roles for Node instance.

        Logs an error if node doesn't belong to Cluster

        :param instance: Node instance
        :param new_primary_roles: list of new pending role names
        :returns: None
        """
        if not instance.cluster_id:
            logger.warning(
                u"Attempting to assign pending roles to node "
                u"'{0}' which isn't added to cluster".format(
                    instance.full_name))
            return

        assigned_roles = set(instance.roles + instance.pending_roles)
        for role in new_primary_roles:
            if role not in assigned_roles:
                logger.warning(
                    u"Could not mark node {0} as primary for {1} role, "
                    u"because there's no assigned {1} role.".format(
                        instance.full_name, role)
                )
                return

        logger.debug(
            u"Updating primary roles for node {0}: {1}".format(
                instance.full_name,
                new_primary_roles))

        instance.primary_roles = new_primary_roles
        db().flush()
示例#21
0
 def execute(cls, task):
     logger.debug("ProvisionTask.execute(task=%s)" % task.uuid)
     message = cls.message(task)
     task.cache = message
     db().add(task)
     db().commit()
     rpc.cast('naily', message)
示例#22
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        task.cluster.prepare_for_deployment()
        nodes = TaskHelper.nodes_to_deploy(task.cluster)
        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(
                cluster=task.cluster).order_by(Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue

            if n.id in nodes_ids:  # It's node which we need to redeploy
                n.pending_addition = False
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()

        # here we replace provisioning data if user redefined them
        serialized_cluster = task.cluster.replaced_deployment_info or \
            deployment_serializers.serialize(task.cluster)

        return {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'deployment_info': serialized_cluster}}
示例#23
0
    def update_verify_networks(cls, uuid, status,
                               progress, msg, result):
        #TODO(dshulyak) move network tests into ostf
        task = db().query(Task).filter_by(uuid=uuid).first()
        if not task:
            logger.error("Can't set status='%s', message='%s': No task \
                    with UUID %s found!", status, msg, uuid)
            return

        previous_status = task.status

        statuses = [sub.status for sub in task.subtasks]
        messages = [sub.message for sub in task.subtasks]
        messages.append(msg)
        statuses.append(status)
        if any(st == 'error' for st in statuses):
            task.status = 'error'
        else:
            task.status = status or task.status
        task.progress = progress or task.progress
        task.result = result or task.result
        # join messages if not None or ""
        task.message = '\n'.join([m for m in messages if m])
        db().commit()
        if previous_status != task.status and task.cluster_id:
            logger.debug("Updating cluster status: "
                         "cluster_id: %s status: %s",
                         task.cluster_id, status)
            cls.update_cluster_status(uuid)
示例#24
0
文件: node.py 项目: gdyuldin/fuel-web
    def update_pending_roles(cls, instance, new_pending_roles):
        """Update pending_roles for Node instance.

        Logs an error if node doesn't belong to Cluster

        :param instance: Node instance
        :param new_pending_roles: list of new pending role names
        :returns: None
        """
        if not instance.cluster_id:
            logger.warning(
                u"Attempting to assign pending roles to node "
                u"'{0}' which isn't added to cluster".format(
                    instance.full_name))
            return

        logger.debug(
            u"Updating pending roles for node {0}: {1}".format(
                instance.full_name,
                new_pending_roles))

        if new_pending_roles == []:
            # TODO(enchantner): research why the hell we need this
            Cluster.clear_pending_changes(
                instance.cluster,
                node_id=instance.id
            )

        instance.pending_roles = new_pending_roles
        db().flush()
示例#25
0
 def make_bootable(self):
     logger.debug("Allocating /boot partition")
     self.create_partition(
         "/boot",
         self.vm.field_generator("calc_boot_size")
     )
     self.create_mbr(True)
示例#26
0
    def execute(self, nodes_to_provision, **kwargs):
        """Run provisioning task on specified nodes."""

        logger.debug('Nodes to provision: {0}'.format(
            ' '.join([objects.Node.get_node_fqdn(n)
                      for n in nodes_to_provision])))

        self.check_running_task()

        task_provision = Task(name=consts.TASK_NAMES.provision,
                              status=consts.TASK_STATUSES.pending,
                              cluster=self.cluster)
        db().add(task_provision)
        # update cluster status
        self.cluster.status = consts.CLUSTER_STATUSES.deployment

        db().commit()
        nodes_ids_to_provision = [node.id for node in nodes_to_provision]

        # perform async call of _execute_async
        mule.call_task_manager_async(
            self.__class__,
            '_execute_async',
            self.cluster.id,
            task_provision.id,
            nodes_ids_to_provision=nodes_ids_to_provision,
            **kwargs
        )

        return task_provision
示例#27
0
文件: node.py 项目: koder-ua/fuel-web
    def update_by_agent(cls, instance, data):
        """Update Node instance with some specific cases for agent.

        * don't update provisioning or error state back to discover
        * don't update volume information if disks arrays is empty

        :param data: dictionary of key-value pairs as object fields
        :returns: Node instance
        """
        # don't update provisioning and error back to discover
        if instance.status in ('provisioning', 'error'):
            if data.get('status', 'discover') == 'discover':
                logger.debug(
                    u"Node {0} has provisioning or error status - "
                    u"status not updated by agent".format(
                        instance.human_readable_name
                    )
                )

                data['status'] = instance.status

        # don't update volume information, if agent has sent an empty array
        meta = data.get('meta', {})
        if meta and len(meta.get('disks', [])) == 0 \
                and instance.meta.get('disks'):

            logger.warning(
                u'Node {0} has received an empty disks array - '
                u'volume information will not be updated'.format(
                    instance.human_readable_name
                )
            )
            meta['disks'] = instance.meta['disks']

        return cls.update(instance, data)
示例#28
0
 def execute(self):
     logger.debug("Creating release dowload task")
     task = Task(name="download_release")
     db().add(task)
     db().commit()
     self._call_silently(task, tasks.DownloadReleaseTask, self.release_data)
     return task
示例#29
0
    def execute(self, nodes_to_provision):
        """Run provisioning task on specified nodes
        """
        objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)

        logger.debug('Nodes to provision: {0}'.format(
            ' '.join([n.fqdn for n in nodes_to_provision])))

        task_provision = Task(name='provision', cluster=self.cluster)
        db().add(task_provision)
        db().commit()

        provision_message = self._call_silently(
            task_provision,
            tasks.ProvisionTask,
            nodes_to_provision,
            method_name='message'
        )
        db().refresh(task_provision)

        task_provision.cache = provision_message

        for node in nodes_to_provision:
            node.pending_addition = False
            node.status = 'provisioning'
            node.progress = 0

        db().commit()

        rpc.cast('naily', provision_message)

        return task_provision
示例#30
0
文件: manager.py 项目: teran/fuel-web
    def assign_admin_ips(cls, node_id, num=1):
        """Method for assigning admin IP addresses to nodes.

        :param node_id: Node database ID.
        :type  node_id: int
        :param num: Number of IP addresses for node.
        :type  num: int
        :returns: None
        """
        admin_net_id = cls.get_admin_network_group_id()
        node_admin_ips = db().query(IPAddr).filter_by(
            node=node_id,
            network=admin_net_id
        ).all()

        if not node_admin_ips or len(node_admin_ips) < num:
            admin_net = db().query(NetworkGroup).get(admin_net_id)
            logger.debug(
                u"Trying to assign admin ips: node=%s count=%s",
                node_id,
                num - len(node_admin_ips)
            )
            free_ips = cls.get_free_ips(
                admin_net.id,
                num=num - len(node_admin_ips)
            )
            logger.info(len(free_ips))
            for ip in free_ips:
                ip_db = IPAddr(
                    node=node_id,
                    ip_addr=ip,
                    network=admin_net_id
                )
                db().add(ip_db)
            db().commit()
示例#31
0
文件: node.py 项目: dp2014/fuel-web
    def prepare_for_deployment(cls, instances):
        """Prepare environment for deployment. Assign IPs for all networks."""
        if not instances:
            logger.debug("prepare_for_deployment was called with no instances")
            return

        cluster = instances[0].cluster
        netmanager = Cluster.get_network_manager(cluster)

        if cluster.network_config.configuration_template:
            return netmanager.assign_ips_for_nodes_w_template(
                cluster, instances)

        nodes_by_id = dict((n.id, n) for n in instances)

        query = (db().query(
            models.Node.id, models.NetworkGroup.id, models.NetworkGroup.name,
            models.NetworkGroup.meta).join(models.NodeGroup.nodes).join(
                models.NodeGroup.networks).filter(
                    models.NodeGroup.cluster_id == cluster.id,
                    models.NetworkGroup.name !=
                    consts.NETWORKS.fuelweb_admin).order_by(
                        models.NetworkGroup.id))

        # Group by NetworkGroup.id
        for key, items in itertools.groupby(query, operator.itemgetter(1)):
            items = list(items)
            network_name = items[0][2]
            network_metadata = items[0][3]

            if not network_metadata.get('notation'):
                continue

            nodes = [
                nodes_by_id[item[0]] for item in items
                if item[0] in nodes_by_id
            ]
            netmanager.assign_ips(nodes, network_name)

        netmanager.assign_admin_ips(instances)
示例#32
0
    def create_networks(self, nw_group):
        '''
        Method for creation of networks for network group.

        :param nw_group: NetworkGroup object.
        :type  nw_group: NetworkGroup
        :returns: None
        '''
        fixnet = IPNetwork(nw_group.cidr)
        subnet_bits = int(math.ceil(math.log(nw_group.network_size, 2)))
        logger.debug("Specified network size requires %s bits", subnet_bits)
        subnets = list(fixnet.subnet(32 - subnet_bits, count=nw_group.amount))
        logger.debug("Base CIDR sliced on subnets: %s", subnets)

        for net in nw_group.networks:
            logger.debug("Deleting old network with id=%s, cidr=%s", net.id,
                         net.cidr)
            ips = db().query(IPAddr).filter(IPAddr.network == net.id).all()
            map(db().delete, ips)
            db().delete(net)
            db().commit()
        # Dmitry's hack for clearing VLANs without networks
        self.clear_vlans()
        db().commit()
        nw_group.networks = []

        for n in xrange(nw_group.amount):
            vlan_id = None
            if nw_group.vlan_start is not None:
                vlan_db = db().query(Vlan).get(nw_group.vlan_start + n)
                if vlan_db:
                    logger.warning("Intersection with existing vlan_id: %s",
                                   vlan_db.id)
                else:
                    vlan_db = Vlan(id=nw_group.vlan_start + n)
                    db().add(vlan_db)
                vlan_id = vlan_db.id
                logger.debug("Created VLAN object, vlan_id=%s", vlan_id)
            gateway = None
            if nw_group.gateway:
                gateway = nw_group.gateway
            net_db = Network(release=nw_group.release,
                             name=nw_group.name,
                             access=nw_group.access,
                             cidr=str(subnets[n]),
                             vlan_id=vlan_id,
                             gateway=gateway,
                             network_group_id=nw_group.id)
            db().add(net_db)
        db().commit()
示例#33
0
    def __init__(self):
        settings_files = []
        logger.debug("Looking for settings.yaml package config "
                     "using old style __file__")
        project_path = os.path.dirname(__file__)
        project_settings_file = os.path.join(project_path, 'settings.yaml')
        settings_files.append(project_settings_file)

        settings_files.append('/etc/nailgun/settings.yaml')
        settings_files.append('/etc/nailgun/version.yaml')
        self.config = {}

        for sf in settings_files:
            try:
                logger.debug("Trying to read config file %s" % sf)
                self.update_from_file(sf)
            except Exception as e:
                logger.debug("Error while reading config file %s: %s" %
                             (sf, str(e)))

        if int(self.config.get("DEVELOPMENT")):
            logger.info("DEVELOPMENT MODE ON:")
            here = os.path.abspath(
                os.path.join(os.path.dirname(__file__), '..'))
            self.config.update({
                'STATIC_DIR': os.path.join(here, 'static'),
                'TEMPLATE_DIR': os.path.join(here, 'static')
            })
            logger.info("Static dir is %s" % self.config.get("STATIC_DIR"))
            logger.info("Template dir is %s" % self.config.get("TEMPLATE_DIR"))
示例#34
0
    def execute(self, nodes_to_deployment, deployment_tasks=None):
        deployment_tasks = deployment_tasks or []

        # locking nodes for update
        objects.NodeCollection.lock_nodes(nodes_to_deployment)
        objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_deployment)

        logger.debug('Nodes to deploy: {0}'.format(' '.join(
            [n.fqdn for n in nodes_to_deployment])))
        task_deployment = Task(name=consts.TASK_NAMES.deployment,
                               cluster=self.cluster)
        db().add(task_deployment)

        deployment_message = self._call_silently(
            task_deployment,
            tasks.DeploymentTask,
            nodes_to_deployment,
            deployment_tasks=deployment_tasks,
            method_name='message')

        db().refresh(task_deployment)

        # locking task
        task_deployment = objects.Task.get_by_uid(task_deployment.id,
                                                  fail_if_not_found=True,
                                                  lock_for_update=True)
        # locking nodes
        objects.NodeCollection.lock_nodes(nodes_to_deployment)

        task_deployment.cache = deployment_message

        for node in nodes_to_deployment:
            node.status = 'deploying'
            node.progress = 0

        db().commit()

        rpc.cast('naily', deployment_message)

        return task_deployment
示例#35
0
    def check_before_deployment(self, supertask):
        # checking admin intersection with untagged
        if self.cluster.net_provider == 'nova_network':
            net_serializer = NovaNetworkConfigurationSerializer
        elif self.cluster.net_provider == 'neutron':
            net_serializer = NeutronNetworkConfigurationSerializer

        network_info = net_serializer.serialize_for_cluster(self.cluster)
        network_info["networks"] = [
            n for n in network_info["networks"] if n["name"] != "fuelweb_admin"
        ]

        check_networks = supertask.create_subtask('check_networks')
        self._call_silently(check_networks,
                            tasks.CheckNetworksTask,
                            data=network_info,
                            check_admin_untagged=True)
        db().refresh(check_networks)
        if check_networks.status == 'error':
            logger.warning("Checking networks failed: %s",
                           check_networks.message)
            raise errors.CheckBeforeDeploymentError(check_networks.message)
        db().delete(check_networks)
        db().commit()

        # checking prerequisites
        check_before = supertask.create_subtask('check_before_deployment')
        logger.debug("Checking prerequisites task: %s", check_before.uuid)
        self._call_silently(check_before, tasks.CheckBeforeDeploymentTask)
        db().refresh(check_before)
        # if failed to check prerequisites
        # then task is already set to error
        if check_before.status == 'error':
            logger.warning("Checking prerequisites failed: %s",
                           check_before.message)
            raise errors.CheckBeforeDeploymentError(check_before.message)
        logger.debug(
            "Checking prerequisites is successful, starting deployment...")
        db().delete(check_before)
        db().commit()
示例#36
0
    def update_task_status(cls, uuid, status, progress, msg="", result=None):
        logger.debug("Updating task: %s", uuid)
        task = db().query(Task).filter_by(uuid=uuid).first()
        if not task:
            logger.error(
                "Can't set status='%s', message='%s':no task \
                    with UUID %s found!", status, msg, uuid)
            return
        previous_status = task.status
        data = {
            'status': status,
            'progress': progress,
            'message': msg,
            'result': result
        }

        for key, value in data.iteritems():
            if value is not None:
                setattr(task, key, value)
                logger.info(u"Task {0} ({1}) {2} is set to {3}".format(
                    task.uuid, task.name, key, value))
        db().add(task)
        db().commit()

        if previous_status != status and task.cluster_id:
            logger.debug(
                "Updating cluster status: "
                "cluster_id: %s status: %s", task.cluster_id, status)
            cls.update_cluster_status(uuid)
        if task.parent:
            logger.debug("Updating parent task: %s.", task.parent.uuid)
            cls.update_parent_task(task.parent.uuid)
示例#37
0
    def create_pv(self, volume_info, size=None):
        """Allocates all available space if size is None.

        Size in parameter should include size of lvm meta
        """
        name = volume_info['id']
        logger.debug('Creating PV: disk=%s vg=%s, size=%s', self.id, name,
                     str(size))

        if size is None:
            logger.debug(
                'Size is not defined. Will use all free space on this disk.')
            size = self.free_space

        self.free_space -= size
        # Don't allocate lvm if size equal 0
        lvm_meta_size = self.get_lvm_meta_from_pool() if size else 0

        logger.debug('Appending PV to volumes.')
        self.volumes.append({
            'type': 'pv',
            'vg': name,
            'size': size + lvm_meta_size,
            'lvm_meta_size': lvm_meta_size
        })
示例#38
0
    def execute(self, nodes_to_provision):
        """Run provisioning task on specified nodes
        """
        # locking nodes
        nodes_ids = [node.id for node in nodes_to_provision]
        nodes = objects.NodeCollection.filter_by_list(None,
                                                      'id',
                                                      nodes_ids,
                                                      order_by='id')
        objects.NodeCollection.lock_for_update(nodes).all()

        objects.NodeCollection.update_slave_nodes_fqdn(nodes_to_provision)
        logger.debug('Nodes to provision: {0}'.format(' '.join(
            [n.fqdn for n in nodes_to_provision])))

        task_provision = Task(name='provision', cluster=self.cluster)
        db().add(task_provision)
        db().commit()

        provision_message = self._call_silently(task_provision,
                                                tasks.ProvisionTask,
                                                nodes_to_provision,
                                                method_name='message')

        task_provision = objects.Task.get_by_uid(task_provision.id,
                                                 fail_if_not_found=True,
                                                 lock_for_update=True)
        task_provision.cache = provision_message
        objects.NodeCollection.lock_for_update(nodes).all()

        for node in nodes_to_provision:
            node.pending_addition = False
            node.status = NODE_STATUSES.provisioning
            node.progress = 0

        db().commit()

        rpc.cast('naily', provision_message)

        return task_provision
示例#39
0
文件: node.py 项目: blkart/fuel-web
    def update_pending_roles(cls, instance, new_pending_roles):
        """Update pending_roles for Node instance.
        Logs an error if node doesn't belong to Cluster

        :param instance: Node instance
        :param new_pending_roles: list of new pending role names
        :returns: None
        """
        if not instance.cluster_id:
            logger.warning(
                u"Attempting to assign pending roles to node "
                u"'{0}' which isn't added to cluster".format(
                    instance.name or instance.id
                )
            )
            return

        logger.debug(
            u"Updating pending roles for node {0}: {1}".format(
                instance.id,
                new_pending_roles
            )
        )

        if new_pending_roles == []:
            instance.pending_role_list = []
            #TODO(enchantner): research why the hell we need this
            Cluster.clear_pending_changes(
                instance.cluster,
                node_id=instance.id
            )
        else:
            instance.pending_role_list = db().query(models.Role).filter_by(
                release_id=instance.cluster.release_id,
            ).filter(
                models.Role.name.in_(new_pending_roles)
            ).all()

        db().flush()
        db().refresh(instance)
示例#40
0
    def task_in_orchestrator(cls, **kwargs):
        logger.info("RPC method task_in_orchestrator received: %s",
                    jsonutils.dumps(kwargs))

        task_uuid = kwargs.get('task_uuid')

        try:
            task = objects.Task.get_by_uuid(task_uuid,
                                            fail_if_not_found=True,
                                            lock_for_update=True)
            if task.status == consts.TASK_STATUSES.pending:
                objects.Task.update(task,
                                    {'status': consts.TASK_STATUSES.running})
                logger.debug("Task '%s' is acknowledged as running", task_uuid)
            else:
                logger.debug(
                    "Task '%s' in status '%s' can not "
                    "be acknowledged as running", task_uuid, task.status)
        except nailgun_errors.ObjectNotFound:
            logger.warning(
                "Task '%s' acknowledgement as running failed "
                "due to task doesn't exist in DB", task_uuid)
示例#41
0
    def update_verify_networks(cls, instance, status,
                               progress, msg, result):
        #TODO(dshulyak) move network tests into ostf
        previous_status = instance.status

        statuses = [sub.status for sub in instance.subtasks]
        messages = [sub.message for sub in instance.subtasks]
        messages.append(msg)
        statuses.append(status)
        if any(st == 'error' for st in statuses):
            instance.status = 'error'
        else:
            instance.status = status or instance.status
        instance.progress = progress or instance.progress
        instance.result = result or instance.result
        # join messages if not None or ""
        instance.message = '\n'.join([m for m in messages if m])
        if previous_status != instance.status and instance.cluster_id:
            logger.debug("Updating cluster status: "
                         "cluster_id: %s status: %s",
                         instance.cluster_id, status)
            cls._update_cluster_data(instance)
示例#42
0
    def execute(self):
        # locking required tasks
        locked_tasks = objects.TaskCollection.lock_cluster_tasks(
            self.cluster.id)
        # locking cluster
        objects.Cluster.get_by_uid(self.cluster.id,
                                   fail_if_not_found=True,
                                   lock_for_update=True)
        # locking nodes
        nodes = objects.NodeCollection.filter_by(None,
                                                 cluster_id=self.cluster.id)
        nodes = objects.NodeCollection.order_by(nodes, 'id')
        objects.NodeCollection.lock_for_update(nodes).all()

        current_cluster_tasks = objects.TaskCollection.filter_by_list(
            locked_tasks, 'name', (consts.TASK_NAMES.cluster_deletion, ))

        deploy_running = objects.TaskCollection.filter_by(
            None,
            cluster_id=self.cluster.id,
            name=consts.TASK_NAMES.deploy,
            status=consts.TASK_STATUSES.running)
        deploy_running = objects.TaskCollection.order_by(deploy_running,
                                                         'id').first()
        if deploy_running:
            logger.error(u"Deleting cluster '{0}' "
                         "while deployment is still running".format(
                             self.cluster.name))
            # Updating action logs for deploy task
            TaskHelper.set_ready_if_not_finished(deploy_running)

        logger.debug("Removing cluster tasks")
        for task in current_cluster_tasks:
            if task.status == consts.TASK_STATUSES.running:
                db().rollback()
                raise errors.DeletionAlreadyStarted()
            elif task.status in (consts.TASK_STATUSES.ready,
                                 consts.TASK_STATUSES.error):
                for subtask in task.subtasks:
                    db().delete(subtask)
                db().delete(task)
        db().flush()

        logger.debug("Labeling cluster nodes to delete")
        for node in self.cluster.nodes:
            node.pending_deletion = True
            db().add(node)
        db().flush()

        self.cluster.status = consts.CLUSTER_STATUSES.remove
        db().add(self.cluster)

        logger.debug("Creating cluster deletion task")
        task = Task(name=consts.TASK_NAMES.cluster_deletion,
                    cluster=self.cluster)
        db().add(task)
        db().commit()
        self._call_silently(task, tasks.ClusterDeletionTask)
        return task
示例#43
0
    def process_task(self, task, node_ids):
        """Processes one task one nodes of cluster.

        :param task: the task instance
        :param node_ids: the list of nodes, where this tasks should run
        """

        logger.debug("applying task '%s' for nodes: %s", task['id'], node_ids)
        task_serializer = self.factory.create_serializer(task)
        for node_id in node_ids:
            try:
                task = task_serializer.serialize(node_id)
            except Exception:
                logger.exception("Failed to serialize task %s", task['id'])
                raise

            node_tasks = self.tasks_graph.setdefault(node_id, {})
            # de-duplication the tasks on node
            # since task can be added after expand group need to
            # overwrite if existed task is skipped and new is not skipped.
            if self.need_update_task(node_tasks, task):
                node_tasks[task['id']] = task
示例#44
0
    def remove_cluster_resp(cls, **kwargs):
        logger.info("RPC method remove_cluster_resp received: %s" % kwargs)
        task_uuid = kwargs.get('task_uuid')

        cls.remove_nodes_resp(**kwargs)

        task = cls.db.query(Task).filter_by(uuid=task_uuid).first()
        cluster = task.cluster

        if task.status in ('ready', ):
            logger.debug("Removing environment itself")
            cluster_name = cluster.name

            nws = itertools.chain(
                *[n.networks for n in cluster.network_groups])
            ips = cls.db.query(IPAddr).filter(
                IPAddr.network.in_([n.id for n in nws]))
            map(cls.db.delete, ips)
            cls.db.commit()

            cls.db.delete(cluster)
            cls.db.commit()

            # Dmitry's hack for clearing VLANs without networks
            cls.network_manager.clear_vlans()

            notifier.notify(
                "done", u"Environment '%s' and all its nodes are deleted" %
                (cluster_name))

        elif task.status in ('error', ):
            cluster.status = 'error'
            cls.db.add(cluster)
            cls.db.commit()
            if not task.message:
                task.message = "Failed to delete nodes:\n{0}".format(
                    cls._generate_error_message(task,
                                                error_types=('deletion', )))
            notifier.notify("error", task.message, cluster.id)
示例#45
0
    def clear_pending_changes(cls, instance, node_id=None):
        """Clear pending changes for current Cluster.
        If node_id is specified then only clears changes connected
        to this node.

        :param instance: Cluster instance
        :param node_id: node id for changes
        :returns: None
        """
        logger.debug(
            u"Removing pending changes in environment {0}{1}".format(
                instance.id,
                u" where node_id={0}".format(node_id) if node_id else u""
            )
        )
        chs = db().query(models.ClusterChanges).filter_by(
            cluster_id=instance.id
        )
        if node_id:
            chs = chs.filter_by(node_id=node_id)
        map(db().delete, chs.all())
        db().flush()
示例#46
0
    def process_deployment(cls, data, cluster, nodes, **kwargs):
        """Find and replace cpu pinning parameters in deployment data,
           this includes changing nova hash, nodes hash on every node and
           contrail plugin parameters.
        """
        nodes_data = [
            node_data for node_data in data if node_data['uid'] != 'master'
        ]
        pinning_nodes = [
            node_data['uid'] for node_data in nodes_data
            if CpuPinOverride.get_by_uid(node_data['uid'])
        ]
        logger.debug(pinning_nodes)

        for node_data in nodes_data:
            pins_data = CpuPinOverride.get_by_uid(node_data['uid'])
            if pins_data:
                # Setting nova cores and kernel params
                node_data['nova']['cpu_pinning'] = pins_data.nova_cores
                kparams = node_data['kernel_params']['kernel']
                newkparams = PinningOverridePipeline._generate_kernel_params(
                    kparams.split(), pins_data)
                node_data['kernel_params']['kernel'] = newkparams
                node_data['release']['attributes_metadata']['editable'][
                    'kernel_params']['kernel']['value'] = newkparams

                # Setting contrail vrouter coremask
                if pins_data.vrouter_cores and 'dpdk' in node_data['roles']:
                    pins_str = ','.join(pins_data.vrouter_cores)
                    # vn637v. Concatenate pins + empty string to convert value from
                    # FixNum to String
                    node_data['contrail']['vrouter_core_mask'] = ' ' + pins_str

            # Overriding network_metadata['nodes'] hash on all nodes
            for nm_val in node_data['network_metadata']['nodes'].values():
                if nm_val['uid'] in pinning_nodes:
                    nm_val['nova_cpu_pinning_enabled'] = True
        logger.debug('Overriding CPU pinning values in deployment data')
        return data
示例#47
0
    def serialize(self, node_id):
        logger.debug(
            "serialize task %s for node %s",
            self.task_template['id'], node_id
        )
        task = utils.traverse(
            self.task_template,
            utils.text_format_safe,
            self.context.get_formatter_context(node_id),
            {
                'yaql_exp': self.context.get_yaql_interpreter(node_id)
            }
        )
        if not self.should_execute(task, node_id):
            logger.debug(
                "Task %s is skipped by condition.", self.task_template['id']
            )
            return super(DefaultTaskSerializer, self).serialize(node_id)

        task.setdefault('parameters', {}).setdefault('cwd', '/')
        task.setdefault('fail_on_error', True)
        return self.get_required_fields(task)
    def _get_allowed_nodes_statuses(self, context):
        """Extracts node statuses that allows distributed serialization"""
        common = context.new.get('common', {})
        cluster = common.get('cluster', {})
        logger.debug(
            "Getting allowed nodes statuses to use as serialization "
            "workers for cluster %s", cluster.get('id'))
        check_fields = {
            'ds_use_ready': consts.NODE_STATUSES.ready,
            'ds_use_provisioned': consts.NODE_STATUSES.provisioned,
            'ds_use_discover': consts.NODE_STATUSES.discover,
            'ds_use_error': consts.NODE_STATUSES.error
        }
        statuses = set()
        for field, node_status in check_fields.items():
            if common.get(field):
                statuses.add(node_status)

        logger.debug(
            "Allowed nodes statuses to use as serialization workers "
            "for cluster %s are: %s", cluster.get('id'), statuses)
        return statuses
示例#49
0
    def add_pending_changes(cls, instance, changes_type, node_id=None):
        """Add pending changes for current Cluster.

        If node_id is specified then links created changes with node.

        :param instance: Cluster instance
        :param changes_type: name of changes to add
        :param node_id: node id for changes
        :returns: None
        """
        logger.debug(
            u"New pending changes in environment {0}: {1}{2}".format(
                instance.id,
                changes_type,
                u" node_id={0}".format(node_id) if node_id else u""
            )
        )

        # TODO(enchantner): check if node belongs to cluster
        ex_chs = db().query(models.ClusterChanges).filter_by(
            cluster=instance,
            name=changes_type
        )
        if not node_id:
            ex_chs = ex_chs.first()
        else:
            ex_chs = ex_chs.filter_by(node_id=node_id).first()
        # do nothing if changes with the same name already pending
        if ex_chs:
            return
        ch = models.ClusterChanges(
            cluster_id=instance.id,
            name=changes_type
        )
        if node_id:
            ch.node_id = node_id

        db().add(ch)
        db().flush()
示例#50
0
    def detach_from_model(cls, instance, graph_type=None):
        """Detach existing deployment graph to given model if it exists.

        :param instance: model that should have relation to graph
        :type instance: models.Plugin|models.Cluster|models.Release|
        :param graph_type: graph type
        :type graph_type: basestring
        :returns: if graph was detached
        :rtype: bool
        """
        if graph_type is None:
            graph_type = consts.DEFAULT_DEPLOYMENT_GRAPH_TYPE
        existing_graph = cls.get_for_model(instance, graph_type)
        if existing_graph:
            association = cls.get_association_for_model(instance)
            instance.deployment_graphs_assoc.filter(
                association.type == graph_type).delete()
            db().flush()
            logger.debug(
                'Graph with ID={0} was detached from model {1} with ID={2}'.
                format(existing_graph.id, instance, instance.id))
            return existing_graph
示例#51
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        task.cluster.prepare_for_deployment()
        nodes = TaskHelper.nodes_to_deploy(task.cluster)
        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(cluster=task.cluster).order_by(
                Node.id):
            # However, we must not pass nodes which are set to be deleted.
            if n.pending_deletion:
                continue

            if n.id in nodes_ids:
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()

        # here we replace provisioning data if user redefined them
        serialized_cluster = task.cluster.replaced_deployment_info or \
            deployment_serializers.serialize(task.cluster)

        # After searilization set pending_addition to False
        for node in db().query(Node).filter(Node.id.in_(nodes_ids)):
            node.pending_addition = False
        db().commit()

        return {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'deployment_info': serialized_cluster
            }
        }
示例#52
0
    def serialize(self, node_id, formatter_context=None):
        """Serialize task in expected by orchestrator format

        If serialization is performed on the remote worker
        we should pass formatter_context parameter with values
        from the master node settings

        :param formatter_context: formatter context
        :param node_id: the target node_id
        """

        logger.debug("serialize task %s for node %s", self.task_template['id'],
                     node_id)
        formatter_context = formatter_context \
            or self.context.get_formatter_context(node_id)
        task = utils.traverse(
            self.task_template, utils.text_format_safe, formatter_context, {
                'yaql_exp':
                self.context.get_yaql_interpreter(node_id,
                                                  self.task_template['id'])
            })
        return self.normalize(self.finalize(task, node_id))
示例#53
0
文件: manager.py 项目: tsipa/fuel-web
    def execute(self, nodes_to_provision):
        """Run provisioning task on specified nodes

        Constraints: currently this task cannot deploy RedHat.
                     For redhat here should be added additional
                     tasks e.i. check credentials, check licenses,
                     redhat downloading.
                     Status of this task you can track here:
                     https://blueprints.launchpad.net/fuel/+spec
                           /nailgun-separate-provisioning-for-redhat
        """
        TaskHelper.update_slave_nodes_fqdn(nodes_to_provision)
        logger.debug('Nodes to provision: {0}'.format(
            ' '.join([n.fqdn for n in nodes_to_provision])))

        task_provision = Task(name='provision', cluster=self.cluster)
        db().add(task_provision)
        db().commit()

        provision_message = self._call_silently(
            task_provision,
            tasks.ProvisionTask,
            nodes_to_provision,
            method_name='message'
        )
        db().refresh(task_provision)

        task_provision.cache = provision_message

        for node in nodes_to_provision:
            node.pending_addition = False
            node.status = 'provisioning'
            node.progress = 0

        db().commit()

        rpc.cast('naily', provision_message)

        return task_provision
示例#54
0
def cast(name, message, service=False):
    logger.debug("RPC cast to orchestrator:\n{0}".format(
        jsonutils.dumps(message, indent=4)))
    use_queue = naily_queue if not service else naily_service_queue
    use_exchange = naily_exchange if not service else naily_service_exchange
    with Connection(conn_str) as conn:
        with conn.Producer(serializer='json') as producer:
            publish = functools.partial(producer.publish,
                                        message,
                                        exchange=use_exchange,
                                        routing_key=name,
                                        declare=[use_queue])
            try:
                publish()
            except amqp_exceptions.PreconditionFailed as e:
                logger.warning(six.text_type(e))
                # (dshulyak) we should drop both exchanges/queues in order
                # for astute to be able to recover temporary queues
                utils.delete_entities(conn, naily_service_exchange,
                                      naily_service_queue, naily_exchange,
                                      naily_queue)
                publish()
示例#55
0
    def _execute_sync(self, sub_transaction):
        cluster = sub_transaction.cluster
        graph = objects.Cluster.get_deployment_graph(
            cluster, sub_transaction.graph_type)
        nodes = _get_nodes_to_run(cluster, graph.get('node_filter'),
                                  sub_transaction.cache.get('nodes'))
        logger.debug("execute graph %s on nodes %s",
                     sub_transaction.graph_type, [n.id for n in nodes])
        # we should initialize primary roles for cluster before
        # role resolve has been created
        objects.Cluster.set_primary_tags(cluster, nodes)
        resolver = resolvers.TagResolver(nodes)
        _adjust_graph_tasks(graph, cluster, resolver,
                            sub_transaction.cache.get('tasks'))

        context = lcm.TransactionContext(
            _get_expected_state(cluster, nodes),
            _get_current_state(cluster, nodes, graph['tasks'],
                               sub_transaction.cache.get('force')))

        _prepare_nodes(nodes, sub_transaction.dry_run, context.new['nodes'])

        # Attach desired state to the sub transaction, so when we continue
        # our top-level transaction, the new state will be calculated on
        # top of this.
        _dump_expected_state(sub_transaction, context.new, graph['tasks'])

        message = make_astute_message(sub_transaction, context, graph,
                                      resolver)
        objects.Transaction.on_start(sub_transaction)
        helpers.TaskHelper.create_action_log(sub_transaction)

        # Once rpc.cast() is called, the message is sent to Astute. By
        # that moment all transaction instanced must exist in database,
        # otherwise we may get wrong result due to RPC receiver won't
        # found entry to update.
        db().commit()
        rpc.cast('naily', [message])
示例#56
0
    def message(cls, task, nodes):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)

        nodes_ids = [n.id for n in nodes]
        for n in db().query(Node).filter_by(cluster=task.cluster).order_by(
                Node.id):

            if n.id in nodes_ids:
                if n.pending_roles:
                    n.roles += n.pending_roles
                    n.pending_roles = []

                # If reciever for some reasons didn't update
                # node's status to provisioned when deployment
                # started, we should do it in nailgun
                if n.status in ('deploying'):
                    n.status = 'provisioned'
                n.progress = 0
                db().add(n)
                db().commit()

        # here we replace provisioning data if user redefined them
        serialized_cluster = task.cluster.replaced_deployment_info or \
            deployment_serializers.serialize(task.cluster, nodes)

        # After searilization set pending_addition to False
        for node in nodes:
            node.pending_addition = False
        db().commit()

        return {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'deployment_info': serialized_cluster
            }
        }
示例#57
0
    def resolve(self, roles, policy=None):
        result = set()
        if roles == consts.TASK_ROLES.all:
            # optimization
            result = {
                uid
                for nodes in six.itervalues(self.__mapping) for uid in nodes
            }
        else:
            if isinstance(roles, six.string_types):
                roles = [roles]

            if not isinstance(roles, (list, tuple, set)):
                # TODO(bgaifullin) fix wrong format for roles in tasks.yaml
                # After it will be allowed to raise exception here
                logger.warn(
                    'Wrong roles format, `roles` should be a list or "*": %s',
                    roles)
                return result

            for role in roles:
                if role in self.SPECIAL_ROLES:
                    result.update(self.SPECIAL_ROLES[role])
                else:
                    pattern = NameMatchingPolicy.create(role)
                    for node_role, nodes_ids in six.iteritems(self.__mapping):
                        if pattern.match(node_role):
                            result.update(nodes_ids)

        # in some cases need only one any node from pool
        # for example if need only one any controller.
        # to distribute load select first node from pool
        if result and policy == consts.NODE_RESOLVE_POLICY.any:
            result = {next(iter(result))}

        logger.debug("Role '%s' and policy '%s' was resolved to: %s", roles,
                     policy, result)
        return result
示例#58
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        nodes = TaskHelper.nodes_to_provision(task.cluster)
        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP

        # We need to assign admin ips
        # and only after that prepare syslog
        # directories
        task.cluster.prepare_for_provisioning()

        for node in nodes:
            if USE_FAKE:
                continue

            if node.offline:
                raise errors.NodeOffline(
                    u'Node "%s" is offline.'
                    ' Remove it from environment and try again.' %
                    node.full_name)

            TaskHelper.prepare_syslog_dir(node)

            node.status = 'provisioning'
            db().commit()

        serialized_cluster = task.cluster.replaced_provisioning_info or \
            provisioning_serializers.serialize(task.cluster)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'provisioning_info': serialized_cluster
            }
        }

        return message
示例#59
0
    def update_by_agent(cls, instance, data):
        """Update Node instance with some specific cases for agent.

        * don't update provisioning or error state back to discover
        * don't update volume information if disks arrays is empty

        :param data: dictionary of key-value pairs as object fields
        :returns: Node instance
        """
        # don't update provisioning and error back to discover
        if instance.status in ('provisioning', 'error'):
            if data.get('status', 'discover') == 'discover':
                logger.debug(u"Node {0} has provisioning or error status - "
                             u"status not updated by agent".format(
                                 instance.human_readable_name))

                data['status'] = instance.status

        # don't update volume information, if agent has sent an empty array
        meta = data.get('meta', {})
        if meta and len(meta.get('disks', [])) == 0 \
                and instance.meta.get('disks'):

            logger.warning(u'Node {0} has received an empty disks array - '
                           u'volume information will not be updated'.format(
                               instance.human_readable_name))
            meta['disks'] = instance.meta['disks']

        #(dshulyak) change this verification to NODE_STATUSES.deploying
        # after we will reuse ips from dhcp range
        netmanager = Cluster.get_network_manager()
        admin_ng = netmanager.get_admin_network_group(instance.id)
        if data.get('ip') and not netmanager.is_same_network(
                data['ip'], admin_ng.cidr):
            logger.debug('Corrupted network data %s, skipping update',
                         instance.id)
            return instance
        return cls.update(instance, data)
示例#60
0
    def checkout(self, instance):
        fetch_file = os.path.join(const.REPOS_DIR, instance.repo_name,
                                  '.git/FETCH_HEAD')
        if os.path.exists(fetch_file):
            current_ts = time.time()
            cluster = Cluster.get_by_uid(instance.env_id)
            last_fetch = os.stat(fetch_file).st_mtime
            if cluster.status != CLUSTER_STATUSES.deployment and \
                current_ts - last_fetch < const.REPO_TTL:
                return

        logger.debug("Repo TTL exceeded. Fetching code...")
        ssh_cmd = self._get_ssh_cmd(instance.repo_name)

        if not os.path.exists(self._get_key_path(instance.repo_name)):
            logger.debug('Key file does not exist. Creating...')
            self._create_key_file(instance.repo_name)

        with instance.repo.git.custom_environment(GIT_SSH=ssh_cmd):
            commit = instance.repo.remotes.origin.fetch(refspec=instance.ref)
            commit = commit[0].commit
            instance.repo.head.reference = commit
            instance.repo.head.reset(index=True, working_tree=True)