Ejemplo n.º 1
0
 def update_slave_nodes_fqdn(cls, nodes):
     for n in nodes:
         fqdn = cls.make_slave_fqdn(n.id, n.role)
         if n.fqdn != fqdn:
             n.fqdn = fqdn
             logger.debug("Updating node fqdn: %s %s", n.id, n.fqdn)
             orm().commit()
Ejemplo n.º 2
0
 def execute(cls, task):
     logger.debug("ProvisionTask.execute(task=%s)" % task.uuid)
     message = cls.message(task)
     task.cache = message
     orm().add(task)
     orm().commit()
     rpc.cast('naily', message)
Ejemplo n.º 3
0
    def execute(self, task, data):
        task_uuid = task.uuid
        nodes = []
        for n in task.cluster.nodes:
            node_json = {'uid': n.id, 'networks': []}
            for nic in n.interfaces:
                vlans = []
                for ng in nic.assigned_networks:
                    # Handle FuelWeb admin network first.
                    if not ng.cluster_id:
                        vlans.append(0)
                        continue
                    data_ng = filter(
                        lambda i: i['name'] == ng.name,
                        data
                    )[0]
                    vlans.extend(data_ng['vlans'])
                if not vlans:
                    continue
                node_json['networks'].append(
                    {'iface': nic.name, 'vlans': vlans}
                )
            nodes.append(node_json)

        message = {'method': 'verify_networks',
                   'respond_to': 'verify_networks_resp',
                   'args': {'task_uuid': task.uuid,
                            'nodes': nodes}}
        logger.debug("Network verification is called with: %s", message)

        task.cache = message
        orm().add(task)
        orm().commit()
        rpc.cast('naily', message)
Ejemplo n.º 4
0
    def execute(self, task, data):
        task_uuid = task.uuid
        nodes = []
        for n in task.cluster.nodes:
            node_json = {'uid': n.id, 'networks': []}
            for nic in n.interfaces:
                vlans = []
                for ng in nic.assigned_networks:
                    # Handle FuelWeb admin network first.
                    if not ng.cluster_id:
                        vlans.append(0)
                        continue
                    data_ng = filter(
                        lambda i: i['name'] == ng.name,
                        data
                    )[0]
                    vlans.extend(data_ng['vlans'])
                if not vlans:
                    continue
                node_json['networks'].append(
                    {'iface': nic.name, 'vlans': vlans}
                )
            nodes.append(node_json)

        message = {'method': 'verify_networks',
                   'respond_to': 'verify_networks_resp',
                   'args': {'task_uuid': task.uuid,
                            'nodes': nodes}}
        logger.debug("Network verification is called with: %s", message)

        task.cache = message
        orm().add(task)
        orm().commit()
        rpc.cast('naily', message)
Ejemplo n.º 5
0
 def execute(self, nets, vlan_ids):
     task = Task(
         name="check_networks",
         cluster=self.cluster
     )
     orm().add(task)
     orm().commit()
     self._call_silently(
         task,
         tasks.CheckNetworksTask,
         nets
     )
     orm().refresh(task)
     if task.status != 'error':
         # this one is connected with UI issues - we need to
         # separate if error happened inside nailgun or somewhere
         # in the orchestrator, and UI does it by task name.
         task.name = "verify_networks"
         orm().add(task)
         orm().commit()
         self._call_silently(
             task,
             tasks.VerifyNetworksTask,
             vlan_ids
         )
     return task
Ejemplo n.º 6
0
 def execute(cls, task):
     logger.debug("ProvisionTask.execute(task=%s)" % task.uuid)
     message = cls.message(task)
     task.cache = message
     orm().add(task)
     orm().commit()
     rpc.cast('naily', message)
Ejemplo n.º 7
0
    def execute(self, task, data):
        # If not set in data then fetch from db
        if 'net_manager' in data:
            netmanager = data['net_manager']
        else:
            netmanager = task.cluster.net_manager

        if 'networks' in data:
            networks = data['networks']
        else:
            networks = map(lambda x: x.__dict__, task.cluster.network_groups)

        result = []
        err_msgs = []
        for ng in networks:
            net_errors = []
            ng_db = orm().query(NetworkGroup).get(ng['id'])
            if not ng_db:
                net_errors.append("id")
                err_msgs.append("Invalid network ID: {0}".format(ng['id']))
            else:
                if 'cidr' in ng:
                    fnet = netaddr.IPSet([ng['cidr']])

                    if fnet & netaddr.IPSet(settings.NET_EXCLUDE):
                        net_errors.append("cidr")
                        err_msgs.append(
                            "Intersection with admin "
                            "network(s) '{0}' found".format(
                                settings.NET_EXCLUDE
                            )
                        )
                    if fnet.size < ng['network_size'] * ng['amount']:
                        net_errors.append("cidr")
                        err_msgs.append(
                            "CIDR size for network '{0}' "
                            "is less than required".format(
                                ng.get('name') or ng_db.name or ng_db.id
                            )
                        )
                if ng.get('amount') > 1 and netmanager == 'FlatDHCPManager':
                    net_errors.append("amount")
                    err_msgs.append(
                        "Network amount for '{0}' is more than 1 "
                        "while using FlatDHCP manager.".format(
                            ng.get('name') or ng_db.name or ng_db.id
                        )
                    )
            if net_errors:
                result.append({
                    "id": int(ng["id"]),
                    "errors": net_errors
                })
        if err_msgs:
            task.result = result
            orm().add(task)
            orm().commit()
            full_err_msg = "\n".join(err_msgs)
            raise errors.NetworkCheckError(full_err_msg)
Ejemplo n.º 8
0
 def clear_pending_changes(self, node_id=None):
     chs = orm().query(ClusterChanges).filter_by(
         cluster_id=self.id
     )
     if node_id:
         chs = chs.filter_by(node_id=node_id)
     map(orm().delete, chs.all())
     orm().commit()
Ejemplo n.º 9
0
    def create_subtask(self, name):
        if not name:
            raise ValueError("Subtask name not specified")

        task = Task(name=name, cluster=self.cluster)

        self.subtasks.append(task)
        orm().commit()
        return task
Ejemplo n.º 10
0
    def verify_data_correctness(cls, node):
        db_node = orm().query(Node).filter_by(id=node['id']).first()
        if not db_node:
            raise errors.InvalidData(
                "There is no node with ID '%d' in DB" % node['id'],
                log_message=True
            )
        interfaces = node['interfaces']
        db_interfaces = db_node.interfaces
        if len(interfaces) != len(db_interfaces):
            raise errors.InvalidData(
                "Node '%d' has different amount of interfaces" % node['id'],
                log_message=True
            )
        # FIXIT: we should use not all networks but appropriate for this
        # node only.
        db_network_groups = orm().query(NetworkGroup).filter_by(
            cluster_id=db_node.cluster_id
        ).all()
        if not db_network_groups:
            raise errors.InvalidData(
                "There are no networks related to"
                " node '%d' in DB" % node['id'],
                log_message=True
            )
        network_group_ids = set([ng.id for ng in db_network_groups])

        for iface in interfaces:
            db_iface = filter(
                lambda i: i.id == iface['id'],
                db_interfaces
            )
            if not db_iface:
                raise errors.InvalidData(
                    "There is no interface with ID '%d'"
                    " for node '%d' in DB" %
                    (iface['id'], node['id']),
                    log_message=True
                )
            db_iface = db_iface[0]

            for net in iface['assigned_networks']:
                if net['id'] not in network_group_ids:
                    raise errors.InvalidData(
                        "Node '%d' shouldn't be connected to"
                        " network with ID '%d'" %
                        (node['id'], net['id']),
                        log_message=True
                    )
                network_group_ids.remove(net['id'])

        # Check if there are unassigned networks for this node.
        if network_group_ids:
            raise errors.InvalidData(
                "Too few neworks to assign to node '%d'" % node['id'],
                log_message=True
            )
Ejemplo n.º 11
0
    def create_subtask(self, name):
        if not name:
            raise ValueError("Subtask name not specified")

        task = Task(name=name, cluster=self.cluster)

        self.subtasks.append(task)
        orm().commit()
        return task
Ejemplo n.º 12
0
 def validate(cls, data):
     d = cls.validate_json(data)
     if d.get("name"):
         if orm().query(Cluster).filter_by(name=d["name"]).first():
             c = web.webapi.conflict
             c.message = "Environment with this name already exists"
             raise c()
     if d.get("release"):
         release = orm().query(Release).get(d.get("release"))
         if not release:
             raise web.webapi.badrequest(message="Invalid release id")
     return d
Ejemplo n.º 13
0
 def validate(cls, data):
     d = cls.validate_json(data)
     if d.get("name"):
         if orm().query(Cluster).filter_by(
             name=d["name"]
         ).first():
             c = web.webapi.conflict
             c.message = "Environment with this name already exists"
             raise c()
     if d.get("release"):
         release = orm().query(Release).get(d.get("release"))
         if not release:
             raise web.webapi.badrequest(message="Invalid release id")
     return d
Ejemplo n.º 14
0
    def update_ranges_from_cidr(self, network_group, cidr):
        """
        Update network ranges for cidr
        """
        orm().query(IPAddrRange).filter_by(
            network_group_id=network_group.id).delete()

        new_cidr = IPNetwork(cidr)
        ip_range = IPAddrRange(
            network_group_id=network_group.id,
            first=str(new_cidr[2]),
            last=str(new_cidr[-2]))

        self.db.add(ip_range)
        self.db.commit()
Ejemplo n.º 15
0
 def add_pending_changes(self, changes_type, node_id=None):
     ex_chs = orm().query(ClusterChanges).filter_by(cluster=self,
                                                    name=changes_type)
     if not node_id:
         ex_chs = ex_chs.first()
     else:
         ex_chs = ex_chs.filter_by(node_id=node_id).first()
     # do nothing if changes with the same name already pending
     if ex_chs:
         return
     ch = ClusterChanges(cluster_id=self.id, name=changes_type)
     if node_id:
         ch.node_id = node_id
     orm().add(ch)
     orm().commit()
Ejemplo n.º 16
0
 def execute(self, nets, vlan_ids):
     task = Task(name="check_networks", cluster=self.cluster)
     orm().add(task)
     orm().commit()
     self._call_silently(task, tasks.CheckNetworksTask, nets)
     orm().refresh(task)
     if task.status != 'error':
         # this one is connected with UI issues - we need to
         # separate if error happened inside nailgun or somewhere
         # in the orchestrator, and UI does it by task name.
         task.name = "verify_networks"
         orm().add(task)
         orm().commit()
         self._call_silently(task, tasks.VerifyNetworksTask, vlan_ids)
     return task
Ejemplo n.º 17
0
 def __init__(self, interval=None, timeout=None):
     super(KeepAliveThread, self).__init__()
     self.stoprequest = threading.Event()
     self.interval = interval or settings.KEEPALIVE['interval']
     self.timeout = timeout or settings.KEEPALIVE['timeout']
     self.db = orm()
     self.reset_nodes_timestamp()
Ejemplo n.º 18
0
    def __controller_nodes(cls, cluster_id):
        nodes = orm().query(Node).filter_by(
            cluster_id=cluster_id,
            role='controller',
            pending_deletion=False).order_by(Node.id)

        return map(cls.__format_node_for_naily, nodes)
Ejemplo n.º 19
0
 def update_cluster_status(cls, uuid):
     db = orm()
     task = db.query(Task).filter_by(uuid=uuid).first()
     # FIXME: should be moved to task/manager "finish" method after
     # web.ctx.orm issue is addressed
     cluster = task.cluster
     if task.name == 'deploy':
         logger.debug("Task %s name: deploy", task.uuid)
         if task.status == 'ready':
             # FIXME: we should also calculate deployment "validity"
             # (check if all of the required nodes of required roles are
             # present). If cluster is not "valid", we should also set
             # its status to "error" even if it is deployed successfully.
             # This method is also would be affected by web.ctx.orm issue.
             cluster.status = 'operational'
             cluster.clear_pending_changes()
         elif task.status == 'error':
             logger.debug("Updating cluster status to error: "
                          "cluster_id: %s", task.cluster_id)
             cluster.status = 'error'
     elif task.name == 'provision':
         logger.debug("Task %s name: provision", task.uuid)
         if task.status == 'error':
             logger.debug("Updating cluster status to error: "
                          "cluster_id: %s", task.cluster_id)
             cluster.status = 'error'
     db.add(cluster)
     db.commit()
Ejemplo n.º 20
0
    def __controller_nodes(cls, cluster_id):
        nodes = orm().query(Node).filter_by(
            cluster_id=cluster_id,
            role='controller',
            pending_deletion=False).order_by(Node.id)

        return map(cls.__format_node_for_naily, nodes)
Ejemplo n.º 21
0
    def update(cls, cluster, network_configuration):
        from nailgun.network.manager import NetworkManager
        network_manager = NetworkManager()
        if 'net_manager' in network_configuration:
            setattr(
                cluster,
                'net_manager',
                network_configuration['net_manager'])

        if 'networks' in network_configuration:
            for ng in network_configuration['networks']:
                ng_db = orm().query(NetworkGroup).get(ng['id'])

                for key, value in ng.iteritems():
                    if key == "ip_ranges":
                        cls.__set_ip_ranges(ng['id'], value)
                    else:
                        if key == 'cidr' and \
                                not ng['name'] in ('public', 'floating'):
                            network_manager.update_ranges_from_cidr(
                                ng_db, value)

                        setattr(ng_db, key, value)

                network_manager.create_networks(ng_db)
                ng_db.cluster.add_pending_changes('networks')
Ejemplo n.º 22
0
    def validate_collection_update(cls, data):
        d = cls.validate_json(data)
        if not isinstance(d, list):
            raise errors.InvalidData(
                "Invalid json list",
                log_message=True
            )

        q = orm().query(Notification)
        valid_d = []
        for nd in d:
            valid_nd = {}
            if "id" not in nd:
                raise errors.InvalidData(
                    "ID is not set correctly",
                    log_message=True
                )

            if "status" not in nd:
                raise errors.InvalidData(
                    "ID is not set correctly",
                    log_message=True
                )

            if not q.get(nd["id"]):
                raise errors.InvalidData(
                    "Invalid ID specified",
                    log_message=True
                )

            valid_nd["id"] = nd["id"]
            valid_nd["status"] = nd["status"]
            valid_d.append(valid_nd)
        return valid_d
Ejemplo n.º 23
0
 def update_task_status(cls, uuid, status, progress, msg="", result=None):
     logger.debug("Updating task: %s", uuid)
     db = orm()
     task = db.query(Task).filter_by(uuid=uuid).first()
     if not task:
         logger.error("Can't set status='%s', message='%s':no task \
                 with UUID %s found!", status, msg, uuid)
         return
     previous_status = task.status
     data = {'status': status, 'progress': progress,
             'message': msg, 'result': result}
     for key, value in data.iteritems():
         if value is not None:
             setattr(task, key, value)
             logger.info(
                 u"Task {0} {1} is set to {2}".format(
                     task.uuid,
                     key,
                     value
                 )
             )
     db.add(task)
     db.commit()
     if previous_status != status:
         logger.debug("Updating cluster status: "
                      "cluster_id: %s status: %s",
                      task.cluster_id, status)
         cls.update_cluster_status(uuid)
     if task.parent:
         logger.debug("Updating parent task: %s", task.parent.uuid)
         cls.update_parent_task(task.parent.uuid)
Ejemplo n.º 24
0
    def update(cls, cluster, network_configuration):
        from nailgun.network.manager import NetworkManager
        network_manager = NetworkManager()
        if 'net_manager' in network_configuration:
            setattr(cluster, 'net_manager',
                    network_configuration['net_manager'])

        if 'networks' in network_configuration:
            for ng in network_configuration['networks']:
                ng_db = orm().query(NetworkGroup).get(ng['id'])

                for key, value in ng.iteritems():
                    if key == "ip_ranges":
                        # deleting old ip ranges
                        map(
                            orm().delete,
                            orm().query(IPAddrRange).filter_by(
                                network_group_id=ng['id']))
                        for r in value:
                            new_ip_range = IPAddrRange(
                                first=r[0],
                                last=r[1],
                                network_group_id=ng['id'])
                            orm().add(new_ip_range)
                            orm().commit()
                    else:
                        setattr(ng_db, key, value)

                network_manager.create_networks(ng_db)
                ng_db.cluster.add_pending_changes('networks')
Ejemplo n.º 25
0
    def update(cls, cluster, network_configuration):
        from nailgun.network.manager import NetworkManager
        network_manager = NetworkManager()
        if 'net_manager' in network_configuration:
            setattr(
                cluster,
                'net_manager',
                network_configuration['net_manager'])

        if 'networks' in network_configuration:
            for ng in network_configuration['networks']:
                ng_db = orm().query(NetworkGroup).get(ng['id'])

                for key, value in ng.iteritems():
                    if key == "ip_ranges":
                        # deleting old ip ranges
                        map(
                            orm().delete,
                            orm().query(IPAddrRange).filter_by(
                                network_group_id=ng['id']
                            )
                        )
                        for r in value:
                            new_ip_range = IPAddrRange(
                                first=r[0],
                                last=r[1],
                                network_group_id=ng['id']
                            )
                            orm().add(new_ip_range)
                            orm().commit()
                    else:
                        setattr(ng_db, key, value)

                network_manager.create_networks(ng_db)
                ng_db.cluster.add_pending_changes('networks')
Ejemplo n.º 26
0
 def __init__(self, node):
     self.db = orm()
     self.node = node
     if not self.node:
         raise Exception(
             "Invalid node - can't generate volumes info"
         )
     self.volumes = []
     self.disks = []
Ejemplo n.º 27
0
 def add_pending_changes(self, changes_type, node_id=None):
     ex_chs = orm().query(ClusterChanges).filter_by(
         cluster=self,
         name=changes_type
     )
     if not node_id:
         ex_chs = ex_chs.first()
     else:
         ex_chs = ex_chs.filter_by(node_id=node_id).first()
     # do nothing if changes with the same name already pending
     if ex_chs:
         return
     ch = ClusterChanges(
         cluster_id=self.id,
         name=changes_type
     )
     if node_id:
         ch.node_id = node_id
     orm().add(ch)
     orm().commit()
Ejemplo n.º 28
0
    def notify(self,
               topic,
               message,
               cluster_id=None,
               node_id=None,
               task_uuid=None):
        if topic == 'discover' and node_id is None:
            raise Exception("No node id in discover notification")
        task = None
        if task_uuid:
            task = orm().query(Task).filter_by(uuid=task_uuid).first()

        exist = None
        if node_id and task:
            exist = orm().query(Notification).filter_by(node_id=node_id,
                                                        message=message,
                                                        task=task).first()

        if not exist:
            notification = Notification()
            notification.topic = topic
            notification.message = message
            notification.cluster_id = cluster_id
            notification.node_id = node_id
            if task:
                notification.task_id = task.id
            notification.datetime = datetime.now()
            orm().add(notification)
            orm().commit()
            logger.info("Notification: topic: %s message: %s" %
                        (topic, message))
Ejemplo n.º 29
0
    def notify(self, topic, message,
               cluster_id=None, node_id=None, task_uuid=None):
        if topic == 'discover' and node_id is None:
            raise Exception("No node id in discover notification")
        task = None
        if task_uuid:
            task = orm().query(Task).filter_by(uuid=task_uuid).first()

        exist = None
        if node_id and task:
            exist = orm().query(Notification).filter_by(
                node_id=node_id,
                message=message,
                task=task
            ).first()

        if not exist:
            notification = Notification()
            notification.topic = topic
            notification.message = message
            notification.cluster_id = cluster_id
            notification.node_id = node_id
            if task:
                notification.task_id = task.id
            notification.datetime = datetime.now()
            orm().add(notification)
            orm().commit()
            logger.info(
                "Notification: topic: %s message: %s" % (topic, message)
            )
Ejemplo n.º 30
0
 def __init__(self, app, db=None):
     self.db = db or orm()
     self.app = app
     self.tester = TestCase
     self.tester.runTest = lambda a: None
     self.tester = self.tester()
     self.here = os.path.abspath(os.path.dirname(__file__))
     self.fixture_dir = os.path.join(self.here, "..", "fixtures")
     self.default_headers = {"Content-Type": "application/json"}
     self.releases = []
     self.clusters = []
     self.nodes = []
     self.network_manager = NetworkManager(db=self.db)
Ejemplo n.º 31
0
    def __add_vlan_interfaces(cls, nodes):
        """
        We shouldn't pass to orchetrator fixed network
        when network manager is VlanManager, but we should specify
        fixed_interface (private_interface in terms of fuel) as result
        we just pass vlan_interface as node attribute.
        """
        netmanager = NetworkManager()
        for node in nodes:
            node_db = orm().query(Node).get(node['id'])

            fixed_interface = netmanager._get_interface_by_network_name(
                node_db.id, 'fixed')

            node['vlan_interface'] = fixed_interface.name
Ejemplo n.º 32
0
    def __add_vlan_interfaces(cls, nodes):
        """
        We shouldn't pass to orchetrator fixed network
        when network manager is VlanManager, but we should specify
        fixed_interface (private_interface in terms of fuel) as result
        we just pass vlan_interface as node attribute.
        """
        netmanager = NetworkManager()
        for node in nodes:
            node_db = orm().query(Node).get(node['id'])

            fixed_interface = netmanager._get_interface_by_network_name(
                node_db, 'fixed')

            node['vlan_interface'] = fixed_interface.name
Ejemplo n.º 33
0
 def __init__(self, app, db=None):
     self.db = db or orm()
     self.app = app
     self.tester = TestCase
     self.tester.runTest = lambda a: None
     self.tester = self.tester()
     self.here = os.path.abspath(os.path.dirname(__file__))
     self.fixture_dir = os.path.join(self.here, "..", "fixtures")
     self.default_headers = {
         "Content-Type": "application/json"
     }
     self.releases = []
     self.clusters = []
     self.nodes = []
     self.network_manager = NetworkManager(db=self.db)
Ejemplo n.º 34
0
 def execute(self, data):
     task = Task(name="check_networks", cluster=self.cluster)
     orm().add(task)
     orm().commit()
     self._call_silently(task, tasks.CheckNetworksTask, data)
     orm().refresh(task)
     if task.status == 'running':
         TaskHelper.update_task_status(task.uuid,
                                       status="ready",
                                       progress=100)
     return task
Ejemplo n.º 35
0
 def validate(cls, data):
     d = cls.validate_json(data)
     if not "name" in d:
         raise errors.InvalidData(
             "No release name specified",
             log_message=True
         )
     if not "version" in d:
         raise errors.InvalidData(
             "No release version specified",
             log_message=True
         )
     if orm().query(Release).filter_by(
         name=d["name"],
         version=d["version"]
     ).first():
         raise errors.AlreadyExists(
             "Release with the same name and version "
             "already exists",
             log_message=True
         )
     if "networks_metadata" in d:
         for network in d["networks_metadata"]:
             if not "name" in network or not "access" in network:
                 raise errors.InvalidData(
                     "Invalid network data: %s" % str(network),
                     log_message=True
                 )
             if network["access"] not in settings.NETWORK_POOLS:
                 raise errors.InvalidData(
                     "Invalid access mode for network",
                     log_message=True
                 )
     else:
         d["networks_metadata"] = []
     if not "attributes_metadata" in d:
         d["attributes_metadata"] = {}
     else:
         try:
             Attributes.validate_fixture(d["attributes_metadata"])
         except:
             raise errors.InvalidData(
                 "Invalid logical structure of attributes metadata",
                 log_message=True
             )
     return d
Ejemplo n.º 36
0
class BasicValidator(object):
    db = orm()

    @classmethod
    def validate_json(cls, data):
        if data:
            try:
                res = json.loads(data)
            except:
                raise web.webapi.badrequest(message="Invalid json format")
        else:
            raise web.webapi.badrequest(message="Empty request received")
        return res

    @classmethod
    def validate(cls, data):
        raise NotImplementedError("You should override this method")
Ejemplo n.º 37
0
    def __set_ip_ranges(cls, network_group_id, ip_ranges):
        # deleting old ip ranges
        orm().query(IPAddrRange).filter_by(
            network_group_id=network_group_id).delete()

        for r in ip_ranges:
            new_ip_range = IPAddrRange(
                first=r[0],
                last=r[1],
                network_group_id=network_group_id)
            orm().add(new_ip_range)
        orm().commit()
Ejemplo n.º 38
0
 def update_parent_task(cls, uuid):
     db = orm()
     task = db.query(Task).filter_by(uuid=uuid).first()
     subtasks = task.subtasks
     if len(subtasks):
         if all(map(lambda s: s.status == 'ready', subtasks)):
             task.status = 'ready'
             task.progress = 100
             task.message = '; '.join(map(
                 lambda s: s.message, filter(
                     lambda s: s.message is not None, subtasks)))
             db.add(task)
             db.commit()
             cls.update_cluster_status(uuid)
         elif all(map(lambda s: s.status in ('ready', 'error'), subtasks)):
             task.status = 'error'
             task.progress = 100
             task.message = '; '.join(map(
                 lambda s: s.message, filter(
                     lambda s: s.status == 'error', subtasks)))
             db.add(task)
             db.commit()
             cls.update_cluster_status(uuid)
         else:
             subtasks_with_progress = filter(
                 lambda s: s.progress is not None,
                 subtasks
             )
             if subtasks_with_progress:
                 task.progress = int(
                     round(
                         sum(
                             [s.weight * s.progress for s
                              in subtasks_with_progress]
                         ) /
                         sum(
                             [s.weight for s
                              in subtasks_with_progress]
                         ), 0)
                 )
             else:
                 task.progress = 0
             db.add(task)
             db.commit()
Ejemplo n.º 39
0
    def __init__(self, node=None, data=None):
        """
        VolumeManager can be initialized with node
        and with data. Being initialized with node
        disks and volumes will be set according to node
        attributes. In later case disk and volumes will
        be set according to init data.
        """
        self.db = None
        self.node = None
        self.disks = []
        self.volumes = []
        if node:
            logger.debug("VolumeManager initialized with node: %s", node.id)
            self.db = orm()
            self.node = node
            self.volumes = self.node.attributes.volumes or []

            if not "disks" in self.node.meta:
                raise Exception("No disk metadata specified for node")
            for d in sorted(self.node.meta["disks"],
                            key=lambda i: i["name"]):
                disk = Disk(self, d["disk"], d["size"])
                for v in self.volumes:
                    if v.get("type") == "disk" and v.get("id") == disk.id:
                        disk.volumes = v.get("volumes", [])
                self.disks.append(disk)
        elif data:
            logger.debug("VolumeManager initialized with data: %s", data)
            for v in data:
                if v.get("type") == "disk" and v.get("id") and v.get("size"):
                    disk = Disk(self, v["id"], v["size"])
                    disk.volumes = v.get("volumes", [])
                    self.disks.append(disk)
                self.volumes.append(v)

        else:
            raise Exception("VolumeManager can't be initialized."
                            "Both node and data are None.")

        logger.debug("VolumeManager: volumes: %s", self.volumes)
        logger.debug("VolumeManager: disks: %s", self.disks)
        self.validate()
Ejemplo n.º 40
0
    def execute(self):
        task = Task(
            name='check_before_deployment',
            cluster=self.cluster
        )
        orm().add(task)
        orm().commit()
        self._call_silently(task, tasks.CheckBeforeDeploymentTask)
        orm().refresh(task)
        if task.status == 'running':
            TaskHelper.update_task_status(
                task.uuid, status="ready", progress=100)

        return task
Ejemplo n.º 41
0
 def execute(self, data):
     task = Task(
         name="check_networks",
         cluster=self.cluster
     )
     orm().add(task)
     orm().commit()
     self._call_silently(
         task,
         tasks.CheckNetworksTask,
         data
     )
     orm().refresh(task)
     if task.status == 'running':
         TaskHelper.update_task_status(
             task.uuid,
             status="ready",
             progress=100
         )
     return task
Ejemplo n.º 42
0
 def generate_fields(self):
     self.generated = self.traverse(self.generated)
     orm().add(self)
     orm().commit()
Ejemplo n.º 43
0
    def execute(self):
        current_cluster_tasks = orm().query(Task).filter_by(
            cluster=self.cluster, name='cluster_deletion').all()
        deploy_running = orm().query(Task).filter_by(cluster=self.cluster,
                                                     name='deploy',
                                                     status='running').first()
        if deploy_running:
            logger.error(u"Deleting cluster '{0}' "
                         "while deployment is still running".format(
                             self.cluster.name))

        logger.debug("Removing cluster tasks")
        for task in current_cluster_tasks:
            if task.status == "running":
                raise errors.DeletionAlreadyStarted()
            elif task.status in ("ready", "error"):
                for subtask in task.subtasks:
                    orm().delete(subtask)
                orm().delete(task)
                orm().commit()

        logger.debug("Labeling cluster nodes to delete")
        for node in self.cluster.nodes:
            node.pending_deletion = True
            orm().add(node)
            orm().commit()

        self.cluster.status = 'remove'
        orm().add(self.cluster)
        orm().commit()

        logger.debug("Creating cluster deletion task")
        task = Task(name="cluster_deletion", cluster=self.cluster)
        orm().add(task)
        orm().commit()
        self._call_silently(task, tasks.ClusterDeletionTask)
        return task
Ejemplo n.º 44
0
    def execute(self):
        logger.info(u"Trying to start deployment at cluster '{0}'".format(
            self.cluster.name or self.cluster.id, ))
        current_tasks = orm().query(Task).filter_by(cluster_id=self.cluster.id,
                                                    name="deploy")
        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                for subtask in task.subtasks:
                    orm().delete(subtask)
                orm().delete(task)
                orm().commit()

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)

        if not any([nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        self.cluster.status = 'deployment'
        orm().add(self.cluster)
        orm().commit()

        supertask = Task(name="deploy", cluster=self.cluster)
        orm().add(supertask)
        orm().commit()
        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = supertask.create_subtask("node_deletion")
            self._call_silently(task_deletion, tasks.DeletionTask)

        if nodes_to_deploy:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy)

            task_provision = supertask.create_subtask("provision")
            # we assume here that task_provision just adds system to
            # cobbler and reboots systems, so it has extreamly small weight
            task_provision.weight = 0.05
            provision_message = self._call_silently(task_provision,
                                                    tasks.ProvisionTask,
                                                    method_name='message')
            task_provision.cache = provision_message
            orm().add(task_provision)
            orm().commit()

            task_deployment = supertask.create_subtask("deployment")
            deployment_message = self._call_silently(task_deployment,
                                                     tasks.DeploymentTask,
                                                     method_name='message')
            task_deployment.cache = deployment_message
            orm().add(task_deployment)
            orm().commit()

            rpc.cast('naily', [provision_message, deployment_message])

        logger.debug(u"Deployment: task to deploy cluster '{0}' is {1}".format(
            self.cluster.name or self.cluster.id, supertask.uuid))
        return supertask
Ejemplo n.º 45
0
 def update_slave_nodes_fqdn(cls, nodes):
     for n in nodes:
         n.fqdn = cls.slave_fqdn_by_id(n.id, n.role)
         logger.debug("Updating node fqdn: %s %s", n.id, n.fqdn)
         orm().add(n)
         orm().commit()
Ejemplo n.º 46
0
    def message(cls, task):
        logger.debug("DeploymentTask.message(task=%s)" % task.uuid)
        task_uuid = task.uuid
        cluster_id = task.cluster.id
        netmanager = NetworkManager()

        nodes = TaskHelper.nodes_to_deploy(task.cluster)

        logger.info("Associated FQDNs to nodes: %s" %
                    ', '.join([n.fqdn for n in nodes]))

        nodes_ids = [n.id for n in nodes]
        if nodes_ids:
            logger.info("Assigning IP addresses to nodes..")
            netmanager.assign_ips(nodes_ids, "management")
            netmanager.assign_ips(nodes_ids, "public")
            netmanager.assign_ips(nodes_ids, "storage")

        nodes_with_attrs = []
        for n in nodes:
            n.pending_addition = False
            if n.status in ('ready', 'deploying'):
                n.status = 'provisioned'
            n.progress = 0
            orm().add(n)
            orm().commit()
            nodes_with_attrs.append(cls.__format_node_for_naily(n))

        cluster_attrs = task.cluster.attributes.merged_attrs_values()
        cluster_attrs['controller_nodes'] = cls.__controller_nodes(cluster_id)

        nets_db = orm().query(Network).join(NetworkGroup).\
            filter(NetworkGroup.cluster_id == cluster_id).all()

        ng_db = orm().query(NetworkGroup).filter_by(
            cluster_id=cluster_id).all()
        for net in ng_db:
            cluster_attrs[net.name + '_network_range'] = net.cidr

        cluster_attrs['network_manager'] = task.cluster.net_manager

        if cluster_attrs['network_manager'] == 'VlanManager':
            fixed_net = orm().query(NetworkGroup).filter_by(
                cluster_id=cluster_id).filter_by(name='fixed').first()

            cluster_attrs['network_size'] = fixed_net.network_size
            cluster_attrs['num_networks'] = fixed_net.amount
            cluster_attrs['vlan_start'] = fixed_net.vlan_start
            cls.__add_vlan_interfaces(nodes_with_attrs)

        if task.cluster.mode == 'ha':
            logger.info("HA mode chosen, creating VIP addresses for it..")
            cluster_attrs['management_vip'] = netmanager.assign_vip(
                cluster_id, "management")
            cluster_attrs['public_vip'] = netmanager.assign_vip(
                cluster_id, "public")

        cluster_attrs['deployment_mode'] = task.cluster.mode
        cluster_attrs['deployment_id'] = cluster_id

        message = {
            'method': 'deploy',
            'respond_to': 'deploy_resp',
            'args': {
                'task_uuid': task.uuid,
                'nodes': nodes_with_attrs,
                'attributes': cluster_attrs
            }
        }

        return message
Ejemplo n.º 47
0
    def prepare_syslog_dir(cls, node, prefix=None):
        logger.debug("Preparing syslog directories for node: %s", node.fqdn)
        if not prefix:
            prefix = settings.SYSLOG_DIR
        logger.debug("prepare_syslog_dir prefix=%s", prefix)

        old = os.path.join(prefix, str(node.ip))
        bak = os.path.join(prefix, "%s.bak" % str(node.fqdn))
        new = os.path.join(prefix, str(node.fqdn))

        netmanager = NetworkManager()
        admin_net_id = netmanager.get_admin_network_id()
        links = map(
            lambda i: os.path.join(prefix, i.ip_addr),
            orm().query(IPAddr.ip_addr).
            filter_by(node=node.id).
            filter_by(network=admin_net_id).all()
        )

        logger.debug("prepare_syslog_dir old=%s", old)
        logger.debug("prepare_syslog_dir new=%s", new)
        logger.debug("prepare_syslog_dir bak=%s", bak)
        logger.debug("prepare_syslog_dir links=%s", str(links))

        # backup directory if it exists
        if os.path.isdir(new):
            logger.debug("New %s already exists. Trying to backup", new)
            if os.path.islink(bak):
                logger.debug("Bak %s already exists and it is link. "
                             "Trying to unlink", bak)
                os.unlink(bak)
            elif os.path.isdir(bak):
                logger.debug("Bak %s already exists and it is directory. "
                             "Trying to remove", bak)
                shutil.rmtree(bak)
            os.rename(new, bak)

        # rename bootstrap directory into fqdn
        if os.path.islink(old):
            logger.debug("Old %s exists and it is link. "
                         "Trying to unlink", old)
            os.unlink(old)
        if os.path.isdir(old):
            logger.debug("Old %s exists and it is directory. "
                         "Trying to rename into %s", old, new)
            os.rename(old, new)
        else:
            logger.debug("Creating %s", new)
            os.makedirs(new)

        # creating symlinks
        for l in links:
            if os.path.islink(l) or os.path.isfile(l):
                logger.debug("%s already exists. "
                             "Trying to unlink", l)
                os.unlink(l)
            if os.path.isdir(l):
                logger.debug("%s already exists and it directory. "
                             "Trying to remove", l)
                shutil.rmtree(l)
            logger.debug("Creating symlink %s -> %s", l, new)
            os.symlink(str(node.fqdn), l)

        os.system("/usr/bin/pkill -HUP rsyslog")
Ejemplo n.º 48
0
 def __init__(self, *args, **kwargs):
     super(JSONHandler, self).__init__(*args, **kwargs)
     self.db = orm()
Ejemplo n.º 49
0
    def execute(self):
        logger.info(
            u"Trying to start deployment at cluster '{0}'".format(
                self.cluster.name or self.cluster.id,
            )
        )
        current_tasks = orm().query(Task).filter_by(
            cluster_id=self.cluster.id,
            name="deploy"
        )
        for task in current_tasks:
            if task.status == "running":
                raise errors.DeploymentAlreadyStarted()
            elif task.status in ("ready", "error"):
                for subtask in task.subtasks:
                    orm().delete(subtask)
                orm().delete(task)
                orm().commit()

        nodes_to_delete = TaskHelper.nodes_to_delete(self.cluster)
        nodes_to_deploy = TaskHelper.nodes_to_deploy(self.cluster)

        if not any([nodes_to_deploy, nodes_to_delete]):
            raise errors.WrongNodeStatus("No changes to deploy")

        self.cluster.status = 'deployment'
        orm().add(self.cluster)
        orm().commit()

        supertask = Task(
            name="deploy",
            cluster=self.cluster
        )
        orm().add(supertask)
        orm().commit()
        task_deletion, task_provision, task_deployment = None, None, None

        if nodes_to_delete:
            task_deletion = supertask.create_subtask("node_deletion")
            self._call_silently(
                task_deletion,
                tasks.DeletionTask
            )

        if nodes_to_deploy:
            TaskHelper.update_slave_nodes_fqdn(nodes_to_deploy)

            task_provision = supertask.create_subtask("provision")
            # we assume here that task_provision just adds system to
            # cobbler and reboots systems, so it has extreamly small weight
            task_provision.weight = 0.05
            provision_message = self._call_silently(
                task_provision,
                tasks.ProvisionTask,
                method_name='message'
            )
            task_provision.cache = provision_message
            orm().add(task_provision)
            orm().commit()

            task_deployment = supertask.create_subtask("deployment")
            deployment_message = self._call_silently(
                task_deployment,
                tasks.DeploymentTask,
                method_name='message'
            )
            task_deployment.cache = deployment_message
            orm().add(task_deployment)
            orm().commit()

            rpc.cast('naily', [provision_message, deployment_message])

        logger.debug(
            u"Deployment: task to deploy cluster '{0}' is {1}".format(
                self.cluster.name or self.cluster.id,
                supertask.uuid
            )
        )
        return supertask
Ejemplo n.º 50
0
    def execute(self, task, data):
        task_uuid = task.uuid

        # If not set in data then fetch from db
        if 'net_manager' in data:
            netmanager = data['net_manager']
        else:
            netmanager = task.cluster.net_manager

        if 'networks' in data:
            networks = data['networks']
        else:
            networks = map(lambda x: x.__dict__, task.cluster.network_groups)

        result = []
        err_msgs = []
        for ng in networks:
            net_errors = []
            ng_db = orm().query(NetworkGroup).get(ng['id'])
            if not ng_db:
                net_errors.append("id")
                err_msgs.append("Invalid network ID: {0}".format(ng['id']))
            else:
                if 'cidr' in ng:
                    fnet = netaddr.IPSet([ng['cidr']])

                    if fnet & netaddr.IPSet(settings.NET_EXCLUDE):
                        net_errors.append("cidr")
                        err_msgs.append(
                            "Intersection with admin "
                            "network(s) '{0}' found".format(
                                settings.NET_EXCLUDE
                            )
                        )
                    if fnet.size < ng['network_size'] * ng['amount']:
                        net_errors.append("cidr")
                        err_msgs.append(
                            "CIDR size for network '{0}' "
                            "is less than required".format(
                                ng.get('name') or ng_db.name or ng_db.id
                            )
                        )
                if ng.get('amount') > 1 and netmanager == 'FlatDHCPManager':
                    net_errors.append("amount")
                    err_msgs.append(
                        "Network amount for '{0}' is more than 1 "
                        "while using FlatDHCP manager.".format(
                            ng.get('name') or ng_db.name or ng_db.id
                        )
                    )
            if net_errors:
                result.append({
                    "id": int(ng["id"]),
                    "errors": net_errors
                })
        if err_msgs:
            task.result = result
            orm().add(task)
            orm().commit()
            full_err_msg = "\n".join(err_msgs)
            raise errors.NetworkCheckError(full_err_msg)
Ejemplo n.º 51
0
 def __init__(self, cluster_id):
     self.cluster = orm().query(Cluster).get(cluster_id)
Ejemplo n.º 52
0
    def execute(self, task, respond_to='remove_nodes_resp'):
        logger.debug("DeletionTask.execute(task=%s)" % task.uuid)
        task_uuid = task.uuid
        logger.debug("Nodes deletion task is running")
        nodes_to_delete = []
        nodes_to_restore = []

        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP

        # no need to call naily if there are no nodes in cluster
        if respond_to == 'remove_cluster_resp' and \
                not list(task.cluster.nodes):
            rcvr = rpc.receiver.NailgunReceiver()
            rcvr.initialize()
            rcvr.remove_cluster_resp(
                task_uuid=task_uuid,
                status='ready',
                progress=100
            )
            return

        for node in task.cluster.nodes:
            if node.pending_deletion:
                nodes_to_delete.append({
                    'id': node.id,
                    'uid': node.id,
                    'role': node.role
                })

                if USE_FAKE:
                    # only fake tasks
                    new_node = Node()
                    keep_attrs = (
                        'id',
                        'cluster_id',
                        'role',
                        'pending_deletion',
                        'pending_addition'
                    )
                    for prop in object_mapper(new_node).iterate_properties:
                        if isinstance(
                            prop, ColumnProperty
                        ) and prop.key not in keep_attrs:
                            setattr(
                                new_node,
                                prop.key,
                                getattr(node, prop.key)
                            )
                    nodes_to_restore.append(new_node)
                    # /only fake tasks

        # Deletion offline nodes from db
        if nodes_to_delete:
            for node in list(nodes_to_delete):
                node_db = orm().query(Node).get(node['id'])

                if not node_db.online:
                    slave_name = TaskHelper.make_slave_name(
                        node['id'], node['role']
                    )
                    logger.info(
                        "Node %s is offline, removing node from db" %
                        slave_name)
                    orm().delete(node_db)
                    orm().commit()

                    nodes_to_delete.remove(node)

        # only real tasks
        engine_nodes = []
        if not USE_FAKE:
            if nodes_to_delete:
                logger.debug("There are nodes to delete")
                for node in nodes_to_delete:
                    slave_name = TaskHelper.make_slave_name(
                        node['id'], node['role']
                    )
                    engine_nodes.append(slave_name)
                    try:
                        logger.info("Deleting old certs from puppet..")
                        node_db = orm().query(Node).get(node['id'])
                        if node_db and node_db.fqdn:
                            node_hostname = node_db.fqdn
                        else:
                            node_hostname = '.'.join([
                                slave_name, settings.DNS_DOMAIN])
                        cmd = "puppet cert clean {0}".format(node_hostname)
                        proc = subprocess.Popen(
                            shlex.split(cmd),
                            shell=False,
                            stdout=subprocess.PIPE,
                            stderr=subprocess.PIPE
                        )
                        p_stdout, p_stderr = proc.communicate()
                        logger.info(
                            "'{0}' executed, STDOUT: '{1}',"
                            " STDERR: '{2}'".format(
                                cmd,
                                p_stdout,
                                p_stderr
                            )
                        )
                    except OSError:
                        logger.warning(
                            "'{0}' returned non-zero exit code".format(
                                cmd
                            )
                        )
                    except Exception as e:
                        logger.warning("Exception occurred while trying to \
                                remove the system from Cobbler: '{0}'".format(
                            e.message))

        # /only real tasks

        msg_delete = {
            'method': 'remove_nodes',
            'respond_to': respond_to,
            'args': {
                'task_uuid': task.uuid,
                'nodes': nodes_to_delete,
                'engine': {
                    'url': settings.COBBLER_URL,
                    'username': settings.COBBLER_USER,
                    'password': settings.COBBLER_PASSWORD,
                },
                'engine_nodes': engine_nodes
            }
        }
        # only fake tasks
        if USE_FAKE and nodes_to_restore:
            msg_delete['args']['nodes_to_restore'] = nodes_to_restore
        # /only fake tasks
        logger.debug("Calling rpc remove_nodes method")
        rpc.cast('naily', msg_delete)
Ejemplo n.º 53
0
        fixman.upload_fixtures()
        logger.info("Done")
    elif params.action == "dump_settings":
        sys.stdout.write(settings.dump())
    elif params.action in ("run",):
        settings.update({
            'LISTEN_PORT': int(params.port),
            'LISTEN_ADDRESS': params.address,
        })
        for attr in ['FAKE_TASKS', 'FAKE_TASKS_TICK_COUNT',
                     'FAKE_TASKS_TICK_INTERVAL', 'FAKE_TASKS_AMQP']:
            param = getattr(params, attr.lower())
            if param is not None:
                settings.update({attr: param})
        if params.config_file:
            settings.update_from_file(params.config_file)
        from nailgun.wsgi import appstart
        appstart(keepalive=params.keepalive)
    elif params.action == "shell":
        from nailgun.db import orm
        if params.config_file:
            settings.update_from_file(params.config_file)
        try:
            from IPython import embed
            embed()
        except ImportError:
            code.interact(local={'orm': orm, 'settings': settings})
        orm().commit()
    else:
        parser.print_help()
Ejemplo n.º 54
0
 def __init__(self, db=None):
     self.db = db or orm()
Ejemplo n.º 55
0
    def message(cls, task):
        logger.debug("ProvisionTask.message(task=%s)" % task.uuid)
        task_uuid = task.uuid
        cluster_id = task.cluster.id
        cluster_attrs = task.cluster.attributes.merged_attrs_values()

        netmanager = NetworkManager()
        nodes = orm().query(Node).filter_by(
            cluster_id=task.cluster.id,
            pending_deletion=False).order_by(Node.id)

        USE_FAKE = settings.FAKE_TASKS or settings.FAKE_TASKS_AMQP

        nodes_to_provision = []

        # FIXME: why can't we use needs_reprovision and pending_addition
        # attributes of node to constract valid list of nodes which need
        # to be provisioned and instead use this ugly loop?
        for node in nodes:
            if not node.online:
                if not USE_FAKE:
                    raise Exception(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id)
                    )
                else:
                    logger.warning(
                        u"Node '%s' (id=%s) is offline."
                        " Remove it from environment and try again." %
                        (node.name, node.id)
                    )
            if node.status in ('discover', 'provisioning') or \
                    (node.status == 'error' and
                     node.error_type == 'provision'):
                nodes_to_provision.append(node)

        # TODO: For now we send nodes data to orchestrator
        # which are cobbler oriented. But for future we
        # need to use more abstract data structure.
        nodes_data = []
        for node in nodes_to_provision:
            node_data = {
                'profile': settings.COBBLER_PROFILE,
                'power_type': 'ssh',
                'power_user': '******',
                'power_address': node.ip,
                'name': TaskHelper.make_slave_name(node.id, node.role),
                'hostname': node.fqdn,
                'name_servers': '\"%s\"' % settings.DNS_SERVERS,
                'name_servers_search': '\"%s\"' % settings.DNS_SEARCH,
                'netboot_enabled': '1',
                'ks_meta': {
                    'puppet_auto_setup': 1,
                    'puppet_master': settings.PUPPET_MASTER_HOST,
                    'puppet_version': settings.PUPPET_VERSION,
                    'puppet_enable': 0,
                    'mco_auto_setup': 1,
                    'install_log_2_syslog': 1,
                    'mco_pskey': settings.MCO_PSKEY,
                    'mco_vhost': settings.MCO_VHOST,
                    'mco_host': settings.MCO_HOST,
                    'mco_user': settings.MCO_USER,
                    'mco_password': settings.MCO_PASSWORD,
                    'mco_connector': settings.MCO_CONNECTOR,
                    'mco_enable': 1,
                    'auth_key': "\"%s\"" % cluster_attrs.get('auth_key', ''),
                    'ks_spaces': "\"%s\"" % json.dumps(
                        node.attributes.volumes).replace("\"", "\\\"")
                }
            }

            if node.status == "discover":
                logger.info(
                    "Node %s seems booted with bootstrap image",
                    node.id
                )
                node_data['power_pass'] = settings.PATH_TO_BOOTSTRAP_SSH_KEY
            else:
                # If it's not in discover, we expect it to be booted
                #   in target system.
                # TODO: Get rid of expectations!
                logger.info(
                    "Node %s seems booted with real system",
                    node.id
                )
                node_data['power_pass'] = settings.PATH_TO_SSH_KEY

            # FIXME: move this code (updating) into receiver.provision_resp
            if not USE_FAKE:
                node.status = "provisioning"
                orm().add(node)
                orm().commit()

            # here we assign admin network IPs for node
            # one IP for every node interface
            netmanager.assign_admin_ips(
                node.id,
                len(node.meta.get('interfaces', []))
            )
            admin_net_id = netmanager.get_admin_network_id()
            admin_ips = set([i.ip_addr for i in orm().query(IPAddr).
                            filter_by(node=node.id).
                            filter_by(network=admin_net_id)])
            for i in node.meta.get('interfaces', []):
                if 'interfaces' not in node_data:
                    node_data['interfaces'] = {}
                node_data['interfaces'][i['name']] = {
                    'mac_address': i['mac'],
                    'static': '0',
                    'netmask': settings.ADMIN_NETWORK['netmask'],
                    'ip_address': admin_ips.pop(),
                }
                # interfaces_extra field in cobbler ks_meta
                # means some extra data for network interfaces
                # configuration. It is used by cobbler snippet.
                # For example, cobbler interface model does not
                # have 'peerdns' field, but we need this field
                # to be configured. So we use interfaces_extra
                # branch in order to set this unsupported field.
                if 'interfaces_extra' not in node_data:
                    node_data['interfaces_extra'] = {}
                node_data['interfaces_extra'][i['name']] = {
                    'peerdns': 'no',
                    'onboot': 'no'
                }

                # We want node to be able to PXE boot via any of its
                # interfaces. That is why we add all discovered
                # interfaces into cobbler system. But we want
                # assignted fqdn to be resolved into one IP address
                # because we don't completely support multiinterface
                # configuration yet.
                if i['mac'] == node.mac:
                    node_data['interfaces'][i['name']]['dns_name'] = node.fqdn
                    node_data['interfaces_extra'][i['name']]['onboot'] = 'yes'

            nodes_data.append(node_data)
            if not USE_FAKE:
                TaskHelper.prepare_syslog_dir(node)

        message = {
            'method': 'provision',
            'respond_to': 'provision_resp',
            'args': {
                'task_uuid': task.uuid,
                'engine': {
                    'url': settings.COBBLER_URL,
                    'username': settings.COBBLER_USER,
                    'password': settings.COBBLER_PASSWORD,
                },
                'nodes': nodes_data
            }
        }
        return message