Beispiel #1
0
    def create(cls, data):
        topic = data.get("topic")
        node_id = data.get("node_id")
        task_uuid = data.pop("task_uuid", None)
        message = data.get("message")

        if topic == 'discover' and node_id is None:
            raise errors.CannotFindNodeIDForDiscovering(
                "No node id in discover notification"
            )

        if "datetime" not in data:
            data["datetime"] = datetime.now()

        task = None
        exist = None
        if task_uuid:
            task = Task.get_by_uuid(task_uuid)
            if task and node_id:
                exist = NotificationCollection.filter_by(
                    query=None,
                    node_id=node_id,
                    message=message,
                    task_id=task.id
                ).first()

        if not exist:
            super(Notification, cls).create(data)
            logger.info(
                u"Notification: topic: {0} message: {1}".format(
                    data.get("topic"),
                    data.get("message")
                )
            )
Beispiel #2
0
    def PUT(self, cluster_id):
        """:returns: JSONized Task object.
        :http: * 202 (deployment stopping initiated)
               * 400 (can't stop deployment)
               * 404 (environment not found in db)
        """
        cluster = self.get_object_or_404(Cluster, cluster_id)

        try:
            logger.info(
                u"Trying to stop deployment "
                u"on environment '{0}'".format(
                    cluster_id
                )
            )
            task_manager = StopDeploymentTaskManager(
                cluster_id=cluster.id
            )
            task = task_manager.execute()
        except errors.StopAlreadyRunning as exc:
            err = web.conflict
            err.message = exc.message
            raise err
        except Exception as exc:
            logger.warn(u'Error during execution '
                        u'deployment stopping task: {0}'.format(str(exc)))
            raise web.badrequest(str(exc))

        raise web.webapi.HTTPError(
            status="202 Accepted",
            data=TaskHandler.render(task)
        )
Beispiel #3
0
    def check_repositories_resp(cls, **kwargs):
        logger.info(
            "RPC method check_repositories_resp received: %s",
            jsonutils.dumps(kwargs)
        )
        task_uuid = kwargs.get('task_uuid')
        nodes = kwargs.get('nodes')

        task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)
        failed_nodes = [node for node in nodes if node['status'] != 0]
        failed_nodes_ids = [node['uid'] for node in failed_nodes]

        progress = 100
        message = ''

        if not failed_nodes_ids:
            status = consts.TASK_STATUSES.ready
        else:
            failed_urls = set()
            for n in failed_nodes:
                failed_urls.update(n['out'].get('failed_urls', []))

            message = ('These nodes: "{0}" failed to connect to '
                       'some of these repositories: "{1}"').format(
                           '", "'.join([str(id) for id in failed_nodes_ids]),
                           '", "'.join(failed_urls))

            status = consts.TASK_STATUSES.error

        objects.Task.update_verify_networks(
            task, status, progress, message, [])
Beispiel #4
0
    def PUT(self, cluster_id):
        """:returns: JSONized Task object.
        :http: * 202 (environment reset initiated)
               * 400 (can't reset environment)
               * 404 (environment not found in db)
        """
        cluster = self.get_object_or_404(Cluster, cluster_id)

        try:
            logger.info(
                u"Trying to reset environment '{0}'".format(
                    cluster_id
                )
            )
            task_manager = ResetEnvironmentTaskManager(
                cluster_id=cluster.id
            )
            task = task_manager.execute()
        except Exception as exc:
            logger.warn(u'Error during execution '
                        u'environment resetting '
                        u'task: {0}'.format(str(exc)))
            raise web.badrequest(str(exc))

        raise web.webapi.HTTPError(
            status="202 Accepted",
            data=TaskHandler.render(task)
        )
Beispiel #5
0
    def _check_vmware_consistency(cls, task):
        """Checks vmware attributes consistency and proper values
        """
        attributes = task.cluster.attributes.editable
        vmware_attributes = task.cluster.vmware_attributes
        # Old(< 6.1) clusters haven't vmware support
        if vmware_attributes:
            cinder_nodes = filter(
                lambda node: 'cinder' in node.all_roles,
                task.cluster.nodes)

            if not cinder_nodes:
                logger.info('There is no any node with "cinder" role provided')

            models = {
                'settings': attributes,
                'default': vmware_attributes.editable,
                'cluster': task.cluster,
                'version': settings.VERSION,
                'networking_parameters': task.cluster.network_config
            }

            errors_msg = VmwareAttributesRestriction.check_data(
                models=models,
                metadata=vmware_attributes.editable['metadata'],
                data=vmware_attributes.editable['value'])

            if errors_msg:
                raise errors.CheckBeforeDeploymentError('\n'.join(errors_msg))
def set_proxy(proxy):
    """Replace http_proxy environment variable for the scope
    of context execution. After exit from context old proxy value
    (if any) is restored

    :param proxy: - proxy url
    """
    variable_values = {
        'http_proxy': os.environ.get('http_proxy'),
        'https_proxy': os.environ.get('https_proxy')
    }
    for variable_name, variable_value in variable_values.items():
        if variable_value:
            logger.warning("{0} variable is already set with "
                           "value: {1}. Changing to {2}. Old value "
                           "will be restored after exit from script's "
                           "execution context"
                           .format(variable_name, variable_value, proxy))
        os.environ[variable_name] = proxy

    try:
        yield
    finally:
        for variable_name, variable_value in variable_values.items():
            if variable_value:
                logger.info("Restoring old value for http_proxy")
                os.environ[variable_name] = variable_value
            else:
                logger.info("Deleting set {0} environment variable"
                            .format(variable_name))
                del os.environ[variable_name]
Beispiel #7
0
def run():
    logger.info("Starting standalone stats sender...")
    try:
        StatsSender().run()
    except (KeyboardInterrupt, SystemExit) as e:
        logger.error("Stats sender exception: %s", six.text_type(e))
    logger.info("Stopping standalone stats sender...")
Beispiel #8
0
    def dump_environment_resp(cls, **kwargs):
        logger.info(
            "RPC method dump_environment_resp received: %s" %
            jsonutils.dumps(kwargs)
        )
        task_uuid = kwargs.get('task_uuid')
        status = kwargs.get('status')
        progress = kwargs.get('progress')
        error = kwargs.get('error')
        msg = kwargs.get('msg')

        task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)

        if status == 'error':
            notifier.notify('error', error)

            data = {'status': status, 'progress': 100, 'message': error}
            objects.Task.update(task, data)

        elif status == 'ready':
            dumpfile = os.path.basename(msg)
            notifier.notify('done', 'Snapshot is ready. '
                            'Visit Support page to download')
            data = {'status': status, 'progress': progress,
                    'message': '/dump/{0}'.format(dumpfile)}
            objects.Task.update(task, data)
Beispiel #9
0
    def provision_resp(cls, **kwargs):
        logger.info(
            "RPC method provision_resp received: %s" %
            json.dumps(kwargs))

        task_uuid = kwargs.get('task_uuid')
        message = kwargs.get('error')
        status = kwargs.get('status')
        progress = kwargs.get('progress')
        nodes = kwargs.get('nodes', [])

        task = get_task_by_uuid(task_uuid)

        for node in nodes:
            uid = node.get('uid')
            node_db = db().query(Node).get(uid)

            if not node_db:
                logger.warn('Task with uid "{0}" not found'.format(uid))
                continue

            if node.get('status') == 'error':
                node_db.status = 'error'
                node_db.progress = 100
                node_db.error_type = 'provision'
                node_db.error_msg = node.get('error_msg', 'Unknown error')
            else:
                node_db.status = node.get('status')
                node_db.progress = node.get('progress')

        TaskHelper.update_task_status(task.uuid, status, progress, message)
Beispiel #10
0
    def stats_user_resp(cls, **kwargs):
        logger.info("RPC method stats_user_resp received: %s",
                    jsonutils.dumps(kwargs))

        task_uuid = kwargs.get('task_uuid')
        nodes = kwargs.get('nodes', [])
        status = kwargs.get('status')
        error = kwargs.get('error')
        message = kwargs.get('msg')

        task = objects.Task.get_by_uuid(
            task_uuid, fail_if_not_found=True, lock_for_update=True)

        if status not in (consts.TASK_STATUSES.ready,
                          consts.TASK_STATUSES.error):
            logger.debug("Task %s, id: %s in status: %s",
                         task.name, task.id, task.status)
            return

        data = {'status': status, 'progress': 100, 'message': message}
        if status == consts.TASK_STATUSES.error:
            logger.error("Task %s, id: %s failed: %s",
                         task.name, task.id, error)
            data['message'] = error

        objects.Task.update(task, data)
        cls._update_action_log_entry(status, task.name, task_uuid, nodes)
        logger.info("RPC method stats_user_resp processed")
Beispiel #11
0
    def download_release_resp(cls, **kwargs):
        logger.info("RPC method download_release_resp received: %s" % jsonutils.dumps(kwargs))
        task_uuid = kwargs.get("task_uuid")
        error_msg = kwargs.get("error")
        status = kwargs.get("status")
        progress = kwargs.get("progress")

        task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)

        release_info = task.cache["args"]["release_info"]
        release_id = release_info["release_id"]
        release = db().query(Release).get(release_id)
        if not release:
            logger.error("download_release_resp: Release" " with ID %s not found", release_id)
            return

        if error_msg:
            status = "error"
            error_msg = "{0} download and preparation " "has failed.".format(release.name)
            cls._download_release_error(release_id, error_msg)
        elif progress == 100 and status == "ready":
            cls._download_release_completed(release_id)

        result = {"release_info": {"release_id": release_id}}

        data = {"status": status, "progress": progress, "message": error_msg, "result": result}
        objects.Task.update(task, data)
Beispiel #12
0
def action_syncdb(params):
    from nailgun.db import syncdb
    from nailgun.logger import logger

    logger.info("Syncing database...")
    syncdb()
    logger.info("Done")
Beispiel #13
0
    def assign_admin_ips(cls, node_id, num=1):
        """Method for assigning admin IP addresses to nodes.

        :param node_id: Node database ID.
        :type  node_id: int
        :param num: Number of IP addresses for node.
        :type  num: int
        :returns: None
        """
        admin_net_id = cls.get_admin_network_group_id()
        node_admin_ips = db().query(IPAddr).filter_by(
            node=node_id,
            network=admin_net_id
        ).all()

        if not node_admin_ips or len(node_admin_ips) < num:
            admin_net = db().query(NetworkGroup).get(admin_net_id)
            logger.debug(
                u"Trying to assign admin ips: node=%s count=%s",
                node_id,
                num - len(node_admin_ips)
            )
            free_ips = cls.get_free_ips(
                admin_net.id,
                num=num - len(node_admin_ips)
            )
            logger.info(len(free_ips))
            for ip in free_ips:
                ip_db = IPAddr(
                    node=node_id,
                    ip_addr=ip,
                    network=admin_net_id
                )
                db().add(ip_db)
            db().commit()
Beispiel #14
0
 def send_log_serialized(self, records, ids):
     if records:
         logger.info("Send %d action logs records", len(records))
         resp = self.send_data_to_url(
             url=self.build_collector_url("COLLECTOR_ACTION_LOGS_URL"),
             data={"action_logs": records}
         )
         resp_dict = resp.json()
         if self.is_status_acceptable(resp.status_code,
                                      resp_dict["status"]):
             records_resp = resp_dict["action_logs"]
             saved_ids = set()
             failed_ids = set()
             for record in records_resp:
                 if record["status"] == \
                         consts.LOG_RECORD_SEND_STATUS.failed:
                     failed_ids.add(record["external_id"])
                 else:
                     saved_ids.add(record["external_id"])
             sent_saved_ids = set(saved_ids) & set(ids)
             logger.info("Action logs records saved: %s, failed: %s",
                         six.text_type(list(sent_saved_ids)),
                         six.text_type(list(failed_ids)))
             db().query(models.ActionLog).filter(
                 models.ActionLog.id.in_(sent_saved_ids)
             ).update(
                 {"is_sent": True}, synchronize_session=False
             )
             db().commit()
         else:
             logger.error("Unexpected collector answer: %s",
                          six.text_type(resp.text))
Beispiel #15
0
def run():
    logger.info("Starting standalone RPC consumer...")
    with Connection(rpc.conn_str) as conn:
        try:
            RPCConsumer(conn, NailgunReceiver).run()
        except (KeyboardInterrupt, SystemExit):
            logger.info("Stopping standalone RPC consumer...")
Beispiel #16
0
    def update_task_status(cls, uuid, status, progress, msg="", result=None):
        # verify_networks - task is expecting to receive result with
        # some data if connectivity_verification fails
        logger.debug("Updating task: %s", uuid)
        task = db().query(Task).filter_by(uuid=uuid).first()
        if not task:
            logger.error("Can't set status='%s', message='%s':no task \
                    with UUID %s found!", status, msg, uuid)
            return
        previous_status = task.status
        data = {'status': status, 'progress': progress,
                'message': msg, 'result': result}
        for key, value in data.iteritems():
            if value is not None:
                setattr(task, key, value)
                logger.info(
                    u"Task {0} ({1}) {2} is set to {3}".format(
                        task.uuid,
                        task.name,
                        key,
                        value
                    )
                )
        db().add(task)
        db().commit()

        if previous_status != status and task.cluster_id:
            logger.debug("Updating cluster status: "
                         "cluster_id: %s status: %s",
                         task.cluster_id, status)
            cls.update_cluster_status(uuid)
        if task.parent:
            logger.debug("Updating parent task: %s.", task.parent.uuid)
            cls.update_parent_task(task.parent.uuid)
Beispiel #17
0
 def send_oswl_serialized(self, rec_data, ids):
     if rec_data:
         resp = self.send_data_to_url(
             url=self.build_collector_url("COLLECTOR_OSWL_INFO_URL"),
             data={"oswl_stats": rec_data}
         )
         resp_dict = resp.json()
         if self.is_status_acceptable(resp.status_code,
                                      resp_dict["status"]):
             records_resp = resp_dict["oswl_stats"]
             status_failed = consts.LOG_RECORD_SEND_STATUS.failed
             saved_ids = set(r["id"] for r in records_resp
                             if r["status"] != status_failed)
             failed_ids = set(r["id"] for r in records_resp
                              if r["status"] == status_failed)
             sent_saved_ids = set(saved_ids) & set(ids)
             logger.info("OSWL info records saved: %s, failed: %s",
                         six.text_type(list(sent_saved_ids)),
                         six.text_type(list(failed_ids)))
             if sent_saved_ids:
                 db().query(models.OpenStackWorkloadStats).filter(
                     models.OpenStackWorkloadStats.id.in_(sent_saved_ids)
                 ).update(
                     {"is_sent": True}, synchronize_session=False
                 )
                 db().commit()
         else:
             logger.error("Unexpected collector answer: %s",
                          six.text_type(resp.text))
Beispiel #18
0
def upload_fixtures():
    # TODO(akislitsky): Temporary workaround for preventing uploading
    # fixtures on Fuel upgrade and container restart. We should remove
    # this after DB data upload will be switched from fixtures loading
    # to the the API calls.
    if db().query(models.Release).count():
        logger.info("Fixtures are already uploaded. Skipping")
        return

    fixtures_paths = get_all_fixtures_paths()
    for orig_path in settings.FIXTURES_TO_UPLOAD:
        if os.path.isabs(orig_path):
            path = orig_path
        else:
            for fixtures_path in fixtures_paths:
                path = os.path.abspath(
                    os.path.join(
                        fixtures_path,
                        orig_path
                    )
                )
                if os.access(path, os.R_OK):
                    break
        if os.access(path, os.R_OK):
            with open(path, "r") as fileobj:
                upload_fixture(fileobj)
            logger.info("Fixture has been uploaded from file: %s", path)
Beispiel #19
0
def action_loaddefault(params):
    from nailgun.db.sqlalchemy import fixman
    from nailgun.logger import logger

    logger.info("Uploading fixture...")
    fixman.upload_fixtures()
    logger.info("Done")
Beispiel #20
0
    def send_action_log(self):
        action_log = db().query(models.ActionLog).order_by(
            models.ActionLog.id
        ).filter_by(
            is_sent=False
        ).limit(settings.STATS_SEND_COUNT)
        logger.info("Action log has %d unsent records", action_log.count())

        uid = InstallationInfo().get_master_node_uid()
        offset = 0
        while True:
            log_chunk = action_log.offset(offset)
            records = []
            ids = []
            logger.info("Send records: %s", six.text_type(log_chunk.count()))
            for log_record in log_chunk:
                body = objects.ActionLog.to_dict(log_record)
                record = {
                    'external_id': body['id'],
                    'master_node_uid': uid,
                    'body': body
                }
                records.append(record)
                ids.append(log_record.id)
            self.send_log_serialized(records, ids)
            if log_chunk.count() < settings.STATS_SEND_COUNT:
                break
            offset += settings.STATS_SEND_COUNT
Beispiel #21
0
def action_syncdb(params):
    from nailgun.db.migration import do_upgrade_head
    from nailgun.logger import logger

    logger.info("Syncing database...")
    do_upgrade_head()
    logger.info("Done")
Beispiel #22
0
    def PUT(self, cluster_id):
        cluster = self.get_object_or_404(
            Cluster,
            cluster_id,
            log_404=(
                "warning",
                "Error: there is no cluster "
                "with id '{0}' in DB.".format(cluster_id)))

        try:
            network_info = \
                NetworkConfigurationSerializer.serialize_for_cluster(
                    cluster
                )
            logger.info(
                u"Network info:\n{0}".format(
                    json.dumps(network_info, indent=4)
                )
            )
            task_manager = DeploymentTaskManager(
                cluster_id=cluster.id
            )
            task = task_manager.execute()
        except Exception as exc:
            logger.warn(u'ClusterChangesHandler: error while execution'
                        ' deploy task: {0}'.format(str(exc)))
            raise web.badrequest(str(exc))

        return TaskHandler.render(task)
Beispiel #23
0
def action_dropdb(params):
    from nailgun.db import dropdb
    from nailgun.logger import logger

    logger.info("Dropping database...")
    dropdb()
    logger.info("Done")
Beispiel #24
0
def run():
    logger.info("Starting standalone stats sender...")
    try:
        while True:
            StatsSender().send_stats_once()
    except (KeyboardInterrupt, SystemExit):
        logger.info("Stopping standalone stats sender...")
Beispiel #25
0
def action_test(params):
    from nailgun.logger import logger
    from nailgun.unit_test import TestRunner

    logger.info("Running tests...")
    TestRunner.run()
    logger.info("Done")
Beispiel #26
0
    def update_task_status(cls, uuid, status, progress,
                           msg="", result=None):
        logger.debug("Updating task: %s", uuid)
        task = db().query(Task).filter_by(uuid=uuid).first()
        if not task:
            logger.error("Can't set status='%s', message='%s':no task \
                    with UUID %s found!", status, msg, uuid)
            return

        data = {'status': status, 'progress': progress,
                'message': msg, 'result': result}

        for key, value in data.iteritems():
            if value is not None:
                setattr(task, key, value)
                logger.info(
                    u"Task {0} ({1}) {2} is set to {3}".format(
                        task.uuid, task.name, key, value))
        db().commit()

        if task.cluster_id:
            logger.debug("Updating cluster status: %s "
                         "cluster_id: %s status: %s",
                         uuid, task.cluster_id, status)
            cls.update_cluster_status(uuid)
        if task.parent:
            logger.debug("Updating parent task: %s.", task.parent.uuid)
            cls.update_parent_task(task.parent.uuid)
Beispiel #27
0
def action_loadfakedeploymenttasks(params):
    from nailgun.db.sqlalchemy import fixman
    from nailgun.logger import logger

    logger.info("Applying fake deployment tasks to all releases...")
    fixman.load_fake_deployment_tasks()
    logger.info("Done")
Beispiel #28
0
    def stop_deployment_resp(cls, **kwargs):
        logger.info("RPC method stop_deployment_resp received: %s" % jsonutils.dumps(kwargs))
        task_uuid = kwargs.get("task_uuid")
        nodes = kwargs.get("nodes", [])
        ia_nodes = kwargs.get("inaccessible_nodes", [])
        message = kwargs.get("error")
        status = kwargs.get("status")
        progress = kwargs.get("progress")

        task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)

        stopping_task_names = [consts.TASK_NAMES.deploy, consts.TASK_NAMES.deployment, consts.TASK_NAMES.provision]

        q_stop_tasks = objects.TaskCollection.filter_by_list(None, "name", stopping_task_names)
        q_stop_tasks = objects.TaskCollection.filter_by(q_stop_tasks, cluster_id=task.cluster_id)
        stop_tasks = objects.TaskCollection.order_by(q_stop_tasks, "id").all()

        # Locking cluster
        objects.Cluster.get_by_uid(task.cluster_id, fail_if_not_found=True, lock_for_update=True)

        if not stop_tasks:
            logger.warning(
                "stop_deployment_resp: deployment tasks \
                            not found for environment '%s'!",
                task.cluster_id,
            )

        if status == consts.TASK_STATUSES.ready:
            task.cluster.status = consts.CLUSTER_STATUSES.stopped

            if stop_tasks:
                map(db().delete, stop_tasks)

            node_uids = [n["uid"] for n in itertools.chain(nodes, ia_nodes)]
            q_nodes = objects.NodeCollection.filter_by_id_list(None, node_uids)
            q_nodes = objects.NodeCollection.filter_by(q_nodes, cluster_id=task.cluster_id)
            q_nodes = objects.NodeCollection.order_by(q_nodes, "id")
            q_nodes = objects.NodeCollection.lock_for_update(q_nodes)

            # locking Nodes for update
            update_nodes = objects.NodeCollection.lock_for_update(q_nodes).all()

            for node in update_nodes:
                objects.Node.reset_to_discover(node)

            if ia_nodes:
                cls._notify_inaccessible(task.cluster_id, [n["uid"] for n in ia_nodes], u"deployment stopping")

            message = (
                u"Deployment of environment '{0}' was successfully stopped. "
                u"Please make changes and reset the environment "
                u"if you want to redeploy it.".format(task.cluster.name or task.cluster_id)
            )

            notifier.notify("done", message, task.cluster_id)

        data = {"status": status, "progress": progress, "message": message}
        objects.Task.update(task, data)

        cls._update_action_log_entry(status, task.name, task_uuid, nodes)
Beispiel #29
0
    def PUT(self):
        """:returns: node id.

        :http: * 200 (node are successfully updated)
               * 304 (node data not changed since last request)
               * 400 (data validation failed)
               * 404 (node not found)
        """
        nd = self.checked_data(
            self.validator.validate_update,
            data=web.data())

        node = self.collection.single.get_by_meta(nd)

        if not node:
            raise self.http(404, "Can't find node: {0}".format(nd))

        node.timestamp = datetime.now()

        if not node.online:
            node.online = True
            msg = u"Node '{0}' is back online".format(node.human_readable_name)
            logger.info(msg)
            notifier.notify("discover", msg, node_id=node.id)
        db().flush()

        if 'agent_checksum' in nd and (
            node.agent_checksum == nd['agent_checksum']
        ):
            return {'id': node.id, 'cached': True}

        self.collection.single.update_by_agent(node, nd)
        return {"id": node.id}
Beispiel #30
0
    def _execute_async(self, supertask_id, deployment_tasks=None,
                       nodes_to_provision_deploy=None):
        """Function for execute task in the mule
        :param supertask_id: id of parent task
        """
        logger.info(u"ApplyChangesTask: execute async starting for task %s",
                    supertask_id)

        supertask = objects.Task.get_by_uid(supertask_id)

        try:
            self._execute_async_content(
                supertask,
                deployment_tasks=deployment_tasks,
                nodes_to_provision_deploy=nodes_to_provision_deploy)
        except Exception as e:
            logger.exception('Error occurred when running task')
            data = {
                'status': consts.TASK_STATUSES.error,
                'progress': 100,
                'message': u'Error occurred when running task: {0}'.format(
                    e.message),
            }
            objects.Task.update(supertask, data)
            db().commit()
Beispiel #31
0
    def reusable_ip_address(cls, node, network):
        """Verifies that ip belongs to network and creates IPAddr in case it did

        :param node: Node database object.
        :param network: Network database object.
        :returns: IPAddr object or None
        """
        #此处暂时先注释掉
        #if node.ip and cls.check_ip_belongs_to_net(node.ip, network):
        logger.info("the node {0} admin ip is {1} power ip is {2}".format(
            node.name, node.ip, node.power_ip))
        return IPAddr(node=node.id, ip_addr=node.ip, network=network.id)
Beispiel #32
0
    def execute(self):
        logger.info("Trying to start dump_environment task")
        self.check_running_task('dump')

        task = Task(name="dump")
        db().add(task)
        db().commit()
        self._call_silently(
            task,
            tasks.DumpTask,
        )
        return task
Beispiel #33
0
def upload_fixtures():
    fns = []
    for path in settings.FIXTURES_TO_UPLOAD:
        if not os.path.isabs(path):
            path = os.path.abspath(os.path.join(os.path.dirname(__file__),
                                                path))
        fns.append(path)

    for fn in fns:
        with open(fn, "r") as fileobj:
            upload_fixture(fileobj)
        logger.info("Fixture has been uploaded from file: %s" % fn)
Beispiel #34
0
    def multicast_verification_resp(cls, **kwargs):
        """Receiver for verification of multicast packages

        data - {1: response, 2: response}
        """
        logger.info(
            u"RPC method multicast_resp received: {0}".format(
                jsonutils.dumps(kwargs))
        )
        task_uuid = kwargs.get('task_uuid')
        task = objects.task.Task.get_by_uuid(uuid=task_uuid)
        if kwargs.get('status'):
            task.status = kwargs['status']
        task.progress = kwargs.get('progress', 0)

        response = kwargs.get('nodes', {})
        error_msg = kwargs.get('error')

        if task.status == TASK_STATUSES.error:
            task.message = error_msg
        elif task.status == TASK_STATUSES.ready:
            errors = []
            results = []
            node_ids = set(config['uid'] for config
                           in task.cache['args']['nodes'])
            not_received_nodes = node_ids - set(response.keys())
            if not_received_nodes:
                msg = (u'No answer from nodes: {0}').format(
                    list(not_received_nodes))
                errors.append(msg)
            for node_id, received_ids in response.iteritems():
                result = {}
                not_received_ids = node_ids - set(received_ids or [])
                result = {'node_id': node_id,
                          'not_received': list(not_received_ids)}
                results.append(result)
                if not_received_ids:
                    msg = (u'Not received ids {0}'
                           u' for node {1}.').format(not_received_ids, node_id)
                    errors.append(msg)

            task.message = '\n'.join(errors)
            if errors:
                task.status = TASK_STATUSES.error
            task.result = results
        if task.status == TASK_STATUSES.ready:
            editable = copy.deepcopy(task.cluster.attributes.editable)
            editable['corosync']['verified']['value'] = True
            task.cluster.attributes.editable = editable
        logger.debug(u'Multicast verification message %s', task.message)
        objects.Task.update_verify_networks(
            task, task.status,
            task.progress, task.message, task.result)
Beispiel #35
0
 def send_installation_info(self):
     logger.info("Sending installation structure info")
     inst_info = InstallationInfo().get_installation_info()
     resp = self.send_data_to_url(
         url=self.build_collector_url("COLLECTOR_INST_INFO_URL"),
         data={"installation_structure": inst_info})
     resp_dict = resp.json()
     if self.is_status_acceptable(resp.status_code, resp_dict["status"]):
         logger.info("Installation structure info saved in collector")
     else:
         logger.error("Unexpected collector answer: %s",
                      six.text_type(resp.text))
Beispiel #36
0
    def PUT(self, cluster_id):
        """:returns: JSONized Task object.

        :http: * 202 (task successfully executed)
               * 400 (invalid object data specified)
               * 404 (environment is not found)
               * 409 (task with such parameters already exists)
        """
        cluster = self.get_object_or_404(
            objects.Cluster,
            cluster_id,
            log_404=(
                u"warning",
                u"Error: there is no cluster "
                u"with id '{0}' in DB.".format(cluster_id)
            )
        )

        logger.info(self.log_message.format(env_id=cluster_id))

        try:
            options = self.get_options()
        except ValueError as e:
            raise self.http(400, six.text_type(e))

        try:
            self.validator.validate(cluster)
            task_manager = self.task_manager(cluster_id=cluster.id)
            task = task_manager.execute(**options)
        except (
            errors.AlreadyExists,
            errors.StopAlreadyRunning
        ) as exc:
            raise self.http(409, exc.message)
        except (
            errors.DeploymentNotRunning,
            errors.NoDeploymentTasks,
            errors.WrongNodeStatus,
            errors.UnavailableRelease,
            errors.CannotBeStopped,
        ) as exc:
            raise self.http(400, exc.message)
        except Exception as exc:
            logger.error(
                self.log_error.format(
                    env_id=cluster_id,
                    error=str(exc)
                )
            )
            # let it be 500
            raise

        self.raise_task(task)
Beispiel #37
0
    def execute(self, **kwargs):
        logger.info("Starting update_dnsmasq task")
        self.check_running_task(consts.TASK_NAMES.update_dnsmasq)

        task = Task(name=consts.TASK_NAMES.update_dnsmasq)
        db().add(task)
        db().commit()
        self._call_silently(
            task,
            tasks.UpdateDnsmasqTask
        )
        return task
Beispiel #38
0
def run():
    resource_type = sys.argv[1]
    poll_interval = settings.OSWL_COLLECTORS_POLLING_INTERVAL[resource_type]
    logger.info(
        "Starting OSWL collector for {0} resource".format(resource_type))
    try:
        while True:
            collect(resource_type)
            time.sleep(poll_interval)
    except (KeyboardInterrupt, SystemExit):
        logger.info(
            "Stopping OSWL collector for {0} resource".format(resource_type))
Beispiel #39
0
 def update_nodetype(cls, instance, node_type):
     #Node.update(instance,{'node_type': int(node_type)})
     #在下面的update函数中执行了node_type=instance.node_type所以执行无效.
     #只有处于discover状态下才能修改节点用处类型
     if instance.status in ("discover"):
         super(Node, cls).update(instance, {
             'node_type': int(node_type),
             'cluster_id': None,
             'group_id': None
         })
     logger.info('success update node {0} to type {1}'.format(
         instance.ip, node_type))
Beispiel #40
0
    def provision_resp(cls, **kwargs):
        logger.info("RPC method provision_resp received: %s" %
                    jsonutils.dumps(kwargs))

        task_uuid = kwargs.get('task_uuid')
        message = kwargs.get('error')
        status = kwargs.get('status')
        progress = kwargs.get('progress')
        nodes = kwargs.get('nodes', [])
        #如果返回error,那么将没有返回nodes参数
        #导致如果返回安装失败,那么前台node状态将不会更新
        task = objects.Task.get_by_uuid(task_uuid,
                                        fail_if_not_found=True,
                                        lock_for_update=True)

        # lock nodes for updating
        q_nodes = objects.NodeCollection.filter_by_id_list(
            None,
            [n['uid'] for n in nodes],
        )
        q_nodes = objects.NodeCollection.order_by(q_nodes, 'id')
        objects.NodeCollection.lock_for_update(q_nodes).all()

        for node in nodes:
            uid = node.get('uid')
            node_db = objects.Node.get_by_uid(node['uid'])

            if not node_db:
                logger.warn('Node with uid "{0}" not found'.format(uid))
                continue
            if node.get('status') == 'error':
                node_db.status = 'error'
                node_db.progress = 100
                node_db.error_type = 'provision'
                node_db.error_msg = node.get('error_msg', 'Unknown error')
            elif node.get('status') == 'provisioned':
                node_db.status = node.get('status')
                node_db.progress = node.get('progress')
                node_db.power_ip = node_db.ip
                logger.info("change the node {0} new power_ip is {1}".format(
                    node_db.id, node_db.power_ip))
            else:
                node_db.status = node.get('status')
                node_db.progress = node.get('progress')
        db().flush()
        if nodes and not progress:
            progress = TaskHelper.recalculate_provisioning_task_progress(task)

        data = {'status': status, 'progress': progress, 'message': message}
        objects.Task.update(task, data)

        cls._update_action_log_entry(status, task_uuid, nodes)
Beispiel #41
0
    def check_untagged_intersection(self):
        """check if there are untagged networks on the same interface
        (both nova-net and neutron)
        """
        netw_untagged = lambda n: (n['vlan_start'] is None) \
            and (not n['meta'].get('ext_net_data')) \
            and (not n['meta'].get('neutron_vlan_range'))
        untagged_nets = dict([(n['id'], n['name']) for n in self.networks
                              if netw_untagged(n)])
        # check if nic have assign only one untagged network
        if len(untagged_nets) >= 2:
            logger.info(
                "Untagged networks found, "
                "checking intersection between them...")

            bond_interfaces = (
                objects.Cluster.get_bond_interfaces_for_all_nodes(
                    self.cluster,
                    untagged_nets.keys()))
            nic_interfaces = (
                objects.Cluster.get_nic_interfaces_for_all_nodes(
                    self.cluster,
                    untagged_nets.keys()))
            found_intersection = []
            all_interfaces = bond_interfaces + nic_interfaces
            for iface in all_interfaces:
                # network name is changed for Admin on UI
                nets = [[ng['name'] for ng in self.networks
                        if n.id == ng['id']][0]
                        for n in iface.assigned_networks_list]
                crossed_nets = set(nets) & set(untagged_nets.values())
                if len(crossed_nets) > 1:
                    err_net_names = ['"{0}"'.format(i)
                                     for i in crossed_nets]
                    found_intersection.append((objects.Node.get_by_mac_or_uid(
                        node_uid=iface.node_id).name,
                        err_net_names))

            if found_intersection:
                nodes_with_errors = [
                    u'{1} networks at node "{0}"'.format(
                        int_node,
                        ", ".join(int_nets)
                    ) for int_node, int_nets in found_intersection]
                self.err_msgs.append(
                    u"Some untagged networks are assigned to the same "
                    u"physical interface. You should assign them to "
                    u"different physical interfaces. Affected:\n{0}".format(
                        "\n".join(nodes_with_errors)))
                self.result.append({"ids": [],
                                    "errors": []})
        self.expose_error_messages()
Beispiel #42
0
    def execute(self, conf=None, **kwargs):
        logger.info("Trying to start dump_environment task")
        self.check_running_task(consts.TASK_NAMES.dump)

        task = Task(name=consts.TASK_NAMES.dump)
        db().add(task)
        db().flush()
        self._call_silently(
            task,
            tasks.DumpTask,
            conf=conf
        )
        return task
Beispiel #43
0
def action_extensions(params):
    from nailgun.logger import logger
    from nailgun.db.migration import action_migrate_alembic_extension
    from nailgun.extensions import get_all_extensions

    for extension in get_all_extensions():
        if extension.alembic_migrations_path():
            logger.info('Running command for extension {0}'.format(
                extension.full_name()))
            action_migrate_alembic_extension(params, extension=extension)
        else:
            logger.info('Extension {0} does not have migrations. '
                        'Skipping...'.format(extension.full_name()))
Beispiel #44
0
    def remove_images_resp(cls, **kwargs):
        logger.info("RPC method remove_images_resp received: %s",
                    jsonutils.dumps(kwargs))
        status = kwargs.get('status')
        task_uuid = kwargs['task_uuid']
        task = objects.Task.get_by_uuid(task_uuid)

        if status == consts.TASK_STATUSES.ready:
            logger.info("IBP images from deleted cluster have been removed")
        elif status == consts.TASK_STATUSES.error:
            logger.error("Removing IBP images failed: task_uuid %s", task_uuid)

        objects.Task.update(task, {'status': status})
Beispiel #45
0
    def check_dhcp_resp(cls, **kwargs):
        """Receiver method for check_dhcp task

        For example of kwargs check FakeCheckingDhcpThread
        """
        logger.info(
            "RPC method check_dhcp_resp received: %s",
            jsonutils.dumps(kwargs)
        )
        messages = []

        result = collections.defaultdict(list)
        message_template = (
            u"Node {node_name} discovered DHCP server "
            u"via {iface} with following parameters: IP: {server_id}, "
            u"MAC: {mac}. This server will conflict with the installation.")
        task_uuid = kwargs.get('task_uuid')
        nodes = kwargs.get('nodes', [])
        error_msg = kwargs.get('error')
        status = kwargs.get('status')
        progress = kwargs.get('progress')

        nodes_uids = [node['uid'] for node in nodes]
        nodes_db = db().query(Node).filter(Node.id.in_(nodes_uids)).all()
        nodes_map = dict((str(node.id), node) for node in nodes_db)

        master_network_mac = settings.ADMIN_NETWORK['mac']
        logger.debug('Mac addr on master node %s', master_network_mac)

        for node in nodes:
            if node['status'] == 'ready':
                for row in node.get('data', []):
                    if not net_utils.is_same_mac(row['mac'],
                                                 master_network_mac):
                        node_db = nodes_map.get(node['uid'])
                        if node_db:
                            row['node_name'] = node_db.name
                            message = message_template.format(**row)
                            messages.append(message)
                            result[node['uid']].append(row)
                        else:
                            logger.warning(
                                'Received message from nonexistent node. '
                                'Message %s', row)
        status = status if not messages else "error"
        error_msg = '\n'.join(messages) if messages else error_msg
        logger.debug('Check dhcp message %s', error_msg)

        task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)
        objects.Task.update_verify_networks(task, status, progress,
                                            error_msg, result)
Beispiel #46
0
    def check_repositories_with_setup_resp(cls, **kwargs):
        logger.info("RPC method check_repositories_with_setup received: %s",
                    jsonutils.dumps(kwargs))

        failed_nodes_msg = (
            'Repo availability verification using public network'
            ' failed on following nodes {0}.\n ')
        suggestion_msg = (
            'Check your public network settings and '
            'availability of the repositories from public network. '
            'Please examine nailgun and astute'
            ' logs for additional details.')

        cls._check_repos_connectivity(kwargs, failed_nodes_msg, suggestion_msg)
Beispiel #47
0
 def copyfile(self, filesrc, targetsrc, service):
     #filesrc   文件源路径
     #targetsrc 文件目标路径
     #service   是否需要重启服务
     #执行shell命令0代表成功,其他数字都是失败.
     cmd = "scp -r {0} root@{1}:{2}".format(filesrc, self.ip, targetsrc)
     logger.info(cmd)
     result = os.system(cmd)
     if result == 0:
         logger.info(u"成功复制net_probe.rb到{0}节点上".format(self.ip))
         if service == "mcollective":
             self.restartmocllective()
     else:
         logger.error(u"复制net_probe.rb到{0}节点上失败".format(self.ip))
Beispiel #48
0
def execRemoteCmd(ip, cmd, timeout=600):
    ssh_user = '******'
    ssh_password = '******'
    key_filename = '/root/.ssh/id_rsa'
    if timeout == None:
        timeout = 600
        pass

    ssh_client = SSHClient(ip, ssh_user, ssh_password, timeout, key_filename)

    result = ssh_client.exec_command(cmd)
    logger.info('exec remote cmd:%s to ip:%s, the result:%s.' %
                (cmd, ip, result))
    pass
Beispiel #49
0
    def update_by_agent(cls, instance, data):
        """Update Node instance with some specific cases for agent.

        * don't update provisioning or error state back to discover
        * don't update volume information if disks arrays is empty

        :param data: dictionary of key-value pairs as object fields
        :returns: Node instance
        """
        # don't update provisioning and error back to discover
        if instance.status in ('provisioning', 'error'):
            if data.get('status', 'discover') == 'discover':
                logger.debug(
                    u"Node {0} has provisioning or error status - "
                    u"status not updated by agent".format(
                        instance.human_readable_name
                    )
                )

                data['status'] = instance.status

        meta = data.get('meta', {})
        # don't update volume information, if agent has sent an empty array
        if len(meta.get('disks', [])) == 0 and instance.meta.get('disks'):

            logger.warning(
                u'Node {0} has received an empty disks array - '
                u'volume information will not be updated'.format(
                    instance.human_readable_name
                )
            )
            meta['disks'] = instance.meta['disks']

        # don't update volume information, if it is locked by node status
        if 'disks' in meta and cls.hardware_info_locked(instance):
            logger.info("Volume information is locked for update on node %s",
                        instance.human_readable_name)
            meta['disks'] = instance.meta['disks']

        #(dshulyak) change this verification to NODE_STATUSES.deploying
        # after we will reuse ips from dhcp range
        netmanager = Cluster.get_network_manager()
        admin_ng = netmanager.get_admin_network_group(instance.id)
        if data.get('ip') and not netmanager.is_same_network(data['ip'],
                                                             admin_ng.cidr):
            logger.debug(
                'Corrupted network data %s, skipping update',
                instance.id)
            return instance
        return cls.update(instance, data)
Beispiel #50
0
def upload_fixtures():
    fixtures_paths = get_all_fixtures_paths()
    for orig_path in settings.FIXTURES_TO_UPLOAD:
        if os.path.isabs(orig_path):
            path = orig_path
        else:
            for fixtures_path in fixtures_paths:
                path = os.path.abspath(os.path.join(fixtures_path, orig_path))
                if os.access(path, os.R_OK):
                    break
        if os.access(path, os.R_OK):
            with open(path, "r") as fileobj:
                upload_fixture(fileobj)
            logger.info("Fixture has been uploaded from file: %s", path)
Beispiel #51
0
    def remove_cluster_resp(cls, **kwargs):
        logger.info(
            "RPC method remove_cluster_resp received: %s" %
            jsonutils.dumps(kwargs)
        )
        task_uuid = kwargs.get('task_uuid')

        # in remove_nodes_resp method all objects are already locked
        cls.remove_nodes_resp(**kwargs)

        task = objects.Task.get_by_uuid(task_uuid, fail_if_not_found=True)
        cluster = task.cluster

        if task.status in ('ready',):
            logger.debug("Removing environment itself")
            cluster_name = cluster.name

            ips = db().query(IPAddr).filter(
                IPAddr.network.in_([n.id for n in cluster.network_groups])
            )
            map(db().delete, ips)
            db().flush()

            db().delete(cluster)
            db().flush()

            notifier.notify(
                "done",
                u"Environment '%s' and all its nodes are deleted" % (
                    cluster_name
                )
            )

        elif task.status in ('error',):
            cluster.status = 'error'
            db().add(cluster)
            db().flush()
            if not task.message:
                task.message = "Failed to delete nodes:\n{0}".format(
                    cls._generate_error_message(
                        task,
                        error_types=('deletion',)
                    )
                )
            notifier.notify(
                "error",
                task.message,
                cluster.id
            )
Beispiel #52
0
     def POST(self):
          x=web.input(ids='')
          idlist = x.ids.split(",")
          for pid in idlist:
              p=objects.PhysicalMachineInfoObject.get_by_uid(int(pid))
              powerstatuscmd="ipmitool -I lanplus -H %s -U %s -P %s chassis power status" %(p.mp_ip,p.mp_username,p.mp_passwd)
              cmd=os.popen(powerstatuscmd).read()
              logger.info("the node %s is %s" % (p.ip,cmd))
              #Chassis Power is on  Chassis Power is off
              if "on" in cmd:
                 PhysicalMachineInfo.update(p,{'power_status':1})
              else:
                 PhysicalMachineInfo.update(p,{'power_status':0})

          return json.dumps({'result':"sucess"})
Beispiel #53
0
    def POST(self):
        x = web.input(ids='')
        idlist = x.ids.split(",")
        for pid in idlist:
            p = objects.PhysicalMachineInfoObject.get_by_uid(int(pid))
            additional_info = dict(
                p.additional_info) if p.additional_info else {}
            additional_info["init_status"] = "wait"
            PhysicalMachineInfo.update(p, {'additional_info': additional_info})
            db().commit()

        t = threading.Thread(target=self.thread_excutemethd, args=(idlist, ))
        t.start()
        logger.info(u"PhymachineInitAgentIdsHandler主线程执行完毕")
        return json.dumps({'result': "success"})
Beispiel #54
0
    def __init__(self, task, data):
        """Collect Network Groups data
        """
        self.cluster = task.cluster
        self.task = task
        self.data = data
        self.net_man = objects.Cluster.get_network_manager(self.cluster)
        #NetworkManager,NeutronManager,NovaNetworkManager
        self.net_provider = self.cluster.net_provider
        #net_provider列的值(枚举)
        admin_ng = self.net_man.get_admin_network_group()
        #1.admin_ngs = db().query(NetworkGroup).filter_by(name="fuelweb_admin")
        #2.admin_ng = admin_ng or admin_ngs.filter_by(group_id=None).first()
        fields = NetworkGroup.__mapper__.columns.keys() + ['meta']
        net = NetworkConfigurationSerializer.serialize_network_group(admin_ng,
                                                                     fields)
        # change Admin name for UI
        net.update(name='admin (PXE)')
        self.networks = [net]
        for ng in self.cluster.network_groups:
            net = NetworkConfigurationSerializer.serialize_network_group(
                ng,
                fields)
            self.networks.append(net)
        # merge with data['networks']
        logger.info(data)
        if 'networks' in data:
            for data_net in data['networks']:
                for net in self.networks:
                    if data_net['id'] == net['id']:
                        if data_net.get('meta'):
                            data_net.pop('meta')
                        net.update(data_net)
                        if data_net.get('name') == 'fuelweb_admin':
                            net.update(name='admin (PXE)')
                        break
                else:
                    raise errors.NetworkCheckError(
                        u"Invalid network ID: {0}".format(data_net['id']))
        # get common networking parameters
        serializer = {'neutron': NeutronNetworkConfigurationSerializer,
                      'nova_network': NovaNetworkConfigurationSerializer}
        self.network_config = serializer[self.net_provider].\
            serialize_network_params(self.cluster)
        self.network_config.update(data.get('networking_parameters', {}))

        self.result = []
        self.err_msgs = []
Beispiel #55
0
 def threadupdatestatus(self, pid, util):
     #excute_initnodeshell 是个阻塞函数,一直会等到init_node.sh在
     #每个节点上执行完毕才会执行后面的代码
     util.excute_initnodeshell()
     phymachine = objects.PhysicalMachineInfoObject.get_by_uid(pid)
     additional_info = dict(
         phymachine.additional_info) if phymachine.additional_info else {}
     additional_info["init_status"] = False
     try:
         util.getshellresulst()
         util.copynailgunagent()
         logger.info(phymachine.ip +
                     u":读到initnode_res文件,子线程设置init_status为true")
         additional_info["init_status"] = True
     except Exception, e:
         logger.info(phymachine.ip + u"读取initnode_res文件异常")
Beispiel #56
0
    def remove_nodes_resp(cls, **kwargs):
        logger.info("RPC method remove_nodes_resp received: %s" % kwargs)
        task_uuid = kwargs.get('task_uuid')
        nodes = kwargs.get('nodes') or []
        error_nodes = kwargs.get('error_nodes') or []
        error_msg = kwargs.get('error')
        status = kwargs.get('status')
        progress = kwargs.get('progress')

        for node in nodes:
            node_db = cls.db.query(Node).get(node['uid'])
            if not node_db:
                logger.error(u"Failed to delete node '%s': node doesn't exist",
                             str(node))
                break
            cls.db.delete(node_db)

        for node in error_nodes:
            node_db = cls.db.query(Node).get(node['uid'])
            if not node_db:
                logger.error(
                    u"Failed to delete node '%s' marked as error from Naily:"
                    " node doesn't exist", str(node))
                break
            node_db.pending_deletion = False
            node_db.status = 'error'
            cls.db.add(node_db)
            node['name'] = node_db.name
        cls.db.commit()

        success_msg = u"No nodes were removed"
        err_msg = u"No errors occurred"
        if nodes:
            success_msg = u"Successfully removed {0} node(s)".format(
                len(nodes))
            notifier.notify("done", success_msg)
        if error_nodes:
            err_msg = u"Failed to remove {0} node(s): {1}".format(
                len(error_nodes), ', '.join([
                    n.get('name') or "ID: {0}".format(n['uid'])
                    for n in error_nodes
                ]))
            notifier.notify("error", err_msg)
        if not error_msg:
            error_msg = ". ".join([success_msg, err_msg])

        TaskHelper.update_task_status(task_uuid, status, progress, error_msg)
Beispiel #57
0
def upgrade_release_fill_orchestrator_data(connection, versions):
    """Fill release_orchestrator_data if it's not filled yet.

    :param connection: a database connection
    :param versions: a list of versions to be forbidden
    """
    logger.info("upgrade_release_fill_orchestrator_data")
    for version in versions:
        select_query = text(
            "SELECT id, operating_system FROM releases "
            "   WHERE version LIKE :version AND id NOT IN ("
            "       SELECT release_id FROM release_orchestrator_data "
            "   )")

        releases = connection.execute(select_query, version=version)

        for release in releases:
            insert_query = text(
                "INSERT INTO release_orchestrator_data ("
                "       release_id, repo_metadata, puppet_manifests_source, "
                "       puppet_modules_source)"
                "   VALUES ("
                "       :release_id, "
                "       :repo_metadata, "
                "       :puppet_manifests_source, "
                "       :puppet_modules_source)")

            # if release_orchestrator_data isn't filled then releases'
            # repos stores in unversioned directory with "fuelweb" word
            repo_path = 'http://{MASTER_IP}:8080/{OS}/fuelweb/x86_64'.format(
                MASTER_IP=settings.MASTER_IP, OS=release[1].lower())

            # for ubuntu we need to add 'precise main'
            if release[1].lower() == 'ubuntu':
                repo_path += ' precise main'

            connection.execute(
                insert_query,
                release_id=release[0],
                repo_metadata=('{{ "nailgun": "{0}" }}'.format(repo_path)),
                puppet_manifests_source=(
                    'rsync://{MASTER_IP}:/puppet/manifests/'.format(
                        MASTER_IP=settings.MASTER_IP)),
                puppet_modules_source=(
                    'rsync://{MASTER_IP}:/puppet/modules/'.format(
                        MASTER_IP=settings.MASTER_IP)),
            )
Beispiel #58
0
def upload_fixtures():
    fixtures_paths = [
        '/etc/nailgun/fixtures',
        os.path.join(os.path.dirname(__file__), '..', '..', 'fixtures')
    ]
    for orig_path in settings.FIXTURES_TO_UPLOAD:
        if os.path.isabs(orig_path):
            path = orig_path
        else:
            for fixtures_path in fixtures_paths:
                path = os.path.abspath(os.path.join(fixtures_path, orig_path))
                if os.access(path, os.R_OK):
                    break
        if os.access(path, os.R_OK):
            with open(path, "r") as fileobj:
                upload_fixture(fileobj)
            logger.info("Fixture has been uploaded from file: %s", path)
Beispiel #59
0
    def provision_resp(cls, **kwargs):
        # For now provision task is nothing more than just adding
        # system into cobbler and rebooting node. Then we think task
        # is ready. We don't wait for end of node provisioning.
        logger.info("RPC method provision_resp received: %s" %
                    json.dumps(kwargs))
        task_uuid = kwargs.get('task_uuid')
        message = kwargs.get('error')
        status = kwargs.get('status')
        progress = kwargs.get('progress')

        task = db().query(Task).filter_by(uuid=task_uuid).first()
        if not task:
            logger.warning(u"No task with uuid %s found", task_uuid)
            return

        TaskHelper.update_task_status(task.uuid, status, progress, message)
Beispiel #60
0
def delete_expired_oswl_entries():
    try:
        deleted_rows_count = \
            objects.OpenStackWorkloadStatsCollection.clean_expired_entries()

        if deleted_rows_count == 0:
            logger.info("There are no expired OSWL entries in db.")

        db().commit()

        logger.info("Expired OSWL entries are " "successfully cleaned from db")

    except Exception as e:
        logger.exception("Exception while cleaning oswls entries from "
                         "db. Details: {0}".format(six.text_type(e)))
    finally:
        db.remove()