示例#1
0
def delete_replica_schedule(context,
                            replica_id,
                            schedule_id,
                            pre_delete_callable=None,
                            post_delete_callable=None):
    # NOTE(gsamfira): we need to refactor the DB layer a bit to allow
    # two-phase transactions or at least allow running these functions
    # inside a single transaction block.

    q = _soft_delete_aware_query(context, models.ReplicaSchedule).filter(
        models.ReplicaSchedule.id == schedule_id,
        models.ReplicaSchedule.replica_id == replica_id)
    schedule = q.first()
    if not schedule:
        raise exception.NotFound("No such schedule")
    if is_user_context(context):
        if not q.join(models.Replica).filter(
                models.Replica.project_id == context.tenant).first():
            raise exception.NotAuthorized()
    if pre_delete_callable:
        pre_delete_callable(context, schedule)
    count = q.soft_delete()
    if post_delete_callable:
        post_delete_callable(context, schedule)
    if count == 0:
        raise exception.NotFound("0 entries were soft deleted")
示例#2
0
def update_endpoint(context, endpoint_id, updated_values):
    endpoint = get_endpoint(context, endpoint_id)
    if not endpoint:
        raise exception.NotFound("Endpoint not found")
    for n in ["name", "description", "connection_info"]:
        if n in updated_values:
            setattr(endpoint, n, updated_values[n])
示例#3
0
def update_replica(context, replica_id, updated_values):
    replica = get_replica(context, replica_id)
    if not replica:
        raise exception.NotFound("Replica not found")

    mapped_info_fields = {'destination_environment': 'target_environment'}

    updateable_fields = [
        "source_environment", "destination_environment", "notes",
        "network_map", "storage_mappings"
    ]
    for field in updateable_fields:
        if mapped_info_fields.get(field, field) in updated_values:
            LOG.debug("Updating the '%s' field of Replica '%s' to: '%s'",
                      field, replica_id,
                      updated_values[mapped_info_fields.get(field, field)])
            setattr(replica, field,
                    updated_values[mapped_info_fields.get(field, field)])

    non_updateable_fields = set(updated_values.keys()).difference(
        {mapped_info_fields.get(field, field)
         for field in updateable_fields})
    if non_updateable_fields:
        LOG.warn("The following Replica fields can NOT be updated: %s",
                 non_updateable_fields)

    # the oslo_db library uses this method for both the `created_at` and
    # `updated_at` fields
    setattr(replica, 'updated_at', timeutils.utcnow())
示例#4
0
def delete_endpoint(context, endpoint_id):
    args = {"id": endpoint_id}
    if is_user_context(context):
        args["project_id"] = context.tenant
    count = _soft_delete_aware_query(
        context, models.Endpoint).filter_by(**args).soft_delete()
    if count == 0:
        raise exception.NotFound("0 entries were soft deleted")
示例#5
0
def delete_minion_machine(context, minion_machine_id):
    minion_machine = get_minion_machine(context, minion_machine_id)
    # TODO(aznashwan): update models to be soft-delete-aware to
    # avoid needing to hard-delete here:
    count = _soft_delete_aware_query(context, models.MinionMachine).filter_by(
        id=minion_machine_id).delete()
    if count == 0:
        raise exception.NotFound("0 MinionMachine entries were soft deleted")
示例#6
0
def get_provider(platform_name, provider_type, event_handler):
    if provider_type == constants.PROVIDER_TYPE_EXPORT:
        cls = EXPORT_PROVIDERS.get(platform_name)
    elif provider_type == constants.PROVIDER_TYPE_IMPORT:
        cls = IMPORT_PROVIDERS.get(platform_name)

    if not cls:
        raise exception.NotFound("Provider not found: %s" % platform_name)
    return cls(event_handler)
示例#7
0
def delete_replica_tasks_execution(context, execution_id):
    q = _soft_delete_aware_query(context, models.TasksExecution).filter(
        models.TasksExecution.id == execution_id)
    if is_user_context(context):
        if not q.join(models.Replica).filter(
                models.Replica.project_id == context.tenant).first():
            raise exception.NotAuthorized()
    count = q.soft_delete()
    if count == 0:
        raise exception.NotFound("0 entries were soft deleted")
示例#8
0
def get_action(context, action_id):
    action = _soft_delete_aware_query(context, models.BaseTransferAction)
    if is_user_context(context):
        action = action.filter(
            models.BaseTransferAction.project_id == context.tenant)
    action = action.filter(
        models.BaseTransferAction.base_id == action_id).first()
    if not action:
        raise exception.NotFound("Transfer action not found: %s" % action_id)
    return action
示例#9
0
文件: api.py 项目: atoaca/coriolis
def update_replica(context, replica_id, updated_values):
    replica = get_replica(context, replica_id)
    if not replica:
        raise exception.NotFound("Replica not found")
    for n in [
            "source_environment", "destination_environment", "notes",
            "network_map", "storage_mappings"
    ]:
        if n in updated_values:
            setattr(replica, n, updated_values[n])
    setattr(replica, 'updated_at', func.now())
示例#10
0
def delete_minion_pool_lifecycle_execution(context, execution_id):
    q = _soft_delete_aware_query(context, models.TasksExecution).filter(
        models.TasksExecution.id == execution_id)
    if is_user_context(context):
        if not q.join(models.MinionPoolLifecycle).filter(
                models.MinionPoolLifecycle.project_id == (
                    context.tenant)).first():
            raise exception.NotAuthorized()
    count = q.soft_delete()
    if count == 0:
        raise exception.NotFound("0 entries were soft deleted")
示例#11
0
def _delete_transfer_action(context, cls, id):
    args = {"base_id": id}
    if is_user_context(context):
        args["project_id"] = context.tenant
    count = _soft_delete_aware_query(context,
                                     cls).filter_by(**args).soft_delete()
    if count == 0:
        raise exception.NotFound("0 entries were soft deleted")

    _soft_delete_aware_query(
        context, models.TasksExecution).filter_by(action_id=id).soft_delete()
示例#12
0
def delete_service(context, service_id):
    service = get_service(context, service_id)
    count = _soft_delete_aware_query(context, models.Service).filter_by(
        id=service_id).soft_delete()
    if count == 0:
        raise exception.NotFound("0 service entries were soft deleted")
    # NOTE(aznashwan): many-to-many tables with soft deletion on either end of
    # the association are not handled properly so we must manually delete each
    # association ourselves:
    for reg in service.mapped_regions:
        delete_service_region_mapping(context, service_id, reg.id)
示例#13
0
def update_region(context, region_id, updated_values):
    if not region_id:
        raise exception.InvalidInput(
            "No region ID specified for updating.")
    region = get_region(context, region_id)
    if not region:
        raise exception.NotFound(
            "Region with ID '%s' does not exist." % region_id)

    updateable_fields = ["name", "description", "enabled"]
    _update_sqlalchemy_object_fields(
        region, updateable_fields, updated_values)
示例#14
0
def get_provider(platform_name, provider_type, event_handler):
    for provider in CONF.providers:
        cls = utils.load_class(provider)
        if (cls.platform == platform_name
                and issubclass(cls, PROVIDER_TYPE_MAP[provider_type])):
            return cls(event_handler)

    raise exception.NotFound(
        "Provider not found for: %(platform_name)s, %(provider_type)s" % {
            "platform_name": platform_name,
            "provider_type": provider_type
        })
示例#15
0
 def get_replica_schedule(self,
                          ctxt,
                          replica_id,
                          schedule_id,
                          expired=True):
     schedule = self._get_replica_schedule(ctxt,
                                           replica_id,
                                           schedule_id,
                                           expired=True)
     if not schedule:
         raise exception.NotFound("Schedule not found")
     return schedule
示例#16
0
def get_tasks_execution(context, execution_id):
    q = _soft_delete_aware_query(context, models.TasksExecution)
    q = q.join(models.BaseTransferAction)
    q = q.options(orm.joinedload("action"))
    q = q.options(orm.joinedload("tasks"))
    if is_user_context(context):
        q = q.filter(models.BaseTransferAction.project_id == context.tenant)
    execution = q.filter(models.TasksExecution.id == execution_id).first()
    if not execution:
        raise exception.NotFound("Tasks execution not found: %s" %
                                 execution_id)
    return execution
示例#17
0
def set_execution_status(context, execution_id, status):
    execution = _soft_delete_aware_query(context, models.TasksExecution).join(
        models.TasksExecution.action)
    if is_user_context(context):
        execution = execution.filter(
            models.BaseTransferAction.project_id == context.tenant)
    execution = execution.filter(
        models.TasksExecution.id == execution_id).first()
    if not execution:
        raise exception.NotFound("Tasks execution not found: %s" %
                                 execution_id)

    execution.status = status
示例#18
0
def delete_region(context, region_id):
    region = get_region(context, region_id)
    count = _soft_delete_aware_query(context, models.Region).filter_by(
        id=region_id).soft_delete()
    if count == 0:
        raise exception.NotFound("0 region entries were soft deleted")
    # NOTE(aznashwan): many-to-many tables with soft deletion on either end of
    # the association are not handled properly so we must manually delete each
    # association ourselves:
    for endp in region.mapped_endpoints:
        delete_endpoint_region_mapping(context, endp.id, region_id)
    for svc in region.mapped_services:
        delete_service_region_mapping(context, svc.id, region_id)
示例#19
0
def delete_service_region_mapping(context, service_id, region_id):
    args = {"service_id": service_id, "region_id": region_id}
    # TODO(aznashwan): many-to-many realtionships have no sane way of
    # supporting soft deletion from the sqlalchemy layer wihout
    # writing join condictions, so we hard-`delete()` instead of
    # `soft_delete()` util we find a better option:
    count = _soft_delete_aware_query(
        context, models.ServiceRegionMapping).filter_by(
            **args).delete()
    if count == 0:
        raise exception.NotFound(
            "There is no mapping between service '%s' and region '%s'." % (
                service_id, region_id))
示例#20
0
def update_minion_machine(context, minion_machine_id, updated_values):
    if not minion_machine_id:
        raise exception.InvalidInput(
            "No minion_machine ID specified for updating.")
    minion_machine = get_minion_machine(context, minion_machine_id)
    if not minion_machine:
        raise exception.NotFound(
            "MinionMachine with ID '%s' does not exist." % minion_machine_id)

    updateable_fields = [
        "connection_info", "provider_properties", "status",
        "backup_writer_connection_info", "allocated_action"]
    _update_sqlalchemy_object_fields(
        minion_machine, updateable_fields, updated_values)
示例#21
0
def delete_endpoint(context, endpoint_id):
    endpoint = get_endpoint(context, endpoint_id)
    args = {"id": endpoint_id}
    if is_user_context(context):
        args["project_id"] = context.tenant
    count = _soft_delete_aware_query(context, models.Endpoint).filter_by(
        **args).soft_delete()
    if count == 0:
        raise exception.NotFound("0 Endpoint entries were soft deleted")
    # NOTE(aznashwan): many-to-many tables with soft deletion on either end of
    # the association are not handled properly so we must manually delete each
    # association ourselves:
    for reg in endpoint.mapped_regions:
        delete_endpoint_region_mapping(context, endpoint_id, reg.id)
示例#22
0
def update_replica(context, replica_id, updated_values):
    replica = get_replica(context, replica_id)
    if not replica:
        raise exception.NotFound("Replica not found")
    for n in [
            "source_environment", "destination_environment", "notes",
            "network_map", "storage_mappings"
    ]:
        if n in updated_values:
            setattr(replica, n, updated_values[n])

    # the oslo_db library uses this method for both the `created_at` and
    # `updated_at` fields
    setattr(replica, 'updated_at', timeutils.utcnow())
示例#23
0
def get_provider(
        platform_name, provider_type, event_handler, raise_if_not_found=True):
    for provider in CONF.providers:
        cls = utils.load_class(provider)
        parent = PROVIDER_TYPE_MAP.get(provider_type)
        if not parent:
            continue
        if (cls.platform == platform_name and issubclass(cls, parent)):
            return cls(event_handler)

    if raise_if_not_found:
        raise exception.NotFound(
            "Provider not found for: %(platform_name)s, %(provider_type)s" %
            {"platform_name": platform_name, "provider_type": provider_type})

    return None
示例#24
0
def _task_process(ctxt, task_id, task_type, origin, destination, instance,
                  task_info, mp_q, mp_log_q):
    try:
        _setup_task_process(mp_log_q)

        if task_type == constants.TASK_TYPE_EXPORT_INSTANCE:
            provider_type = constants.PROVIDER_TYPE_EXPORT
            data = origin
        elif task_type == constants.TASK_TYPE_IMPORT_INSTANCE:
            provider_type = constants.PROVIDER_TYPE_IMPORT
            data = destination
        else:
            raise exception.NotFound("Unknown task type: %s" % task_type)

        event_handler = _ConductorProviderEventHandler(ctxt, task_id)
        provider = factory.get_provider(data["type"], provider_type,
                                        event_handler)

        connection_info = data.get("connection_info") or {}
        target_environment = data.get("target_environment") or {}

        secret_ref = connection_info.get("secret_ref")
        if secret_ref:
            LOG.info("Retrieving connection info from secret: %s", secret_ref)
            connection_info = secrets.get_secret(ctxt, secret_ref)

        if provider_type == constants.PROVIDER_TYPE_EXPORT:
            export_path = _get_task_export_path(task_id, create=True)

            result = provider.export_instance(ctxt, connection_info, instance,
                                              export_path)
            result[TMP_DIRS_KEY] = [export_path]
        else:
            result = provider.import_instance(ctxt, connection_info,
                                              target_environment, instance,
                                              task_info)
        mp_q.put(result)
    except Exception as ex:
        mp_q.put(str(ex))
        LOG.exception(ex)
    finally:
        # Signal the log event handler that there are no more events
        mp_log_q.put(None)
示例#25
0
def get_rpc_client_for_service(service, *client_args, **client_kwargs):
    rpc_client_class = RPC_TOPIC_TO_CLIENT_CLASS_MAP.get(service.topic)
    if not rpc_client_class:
        raise exception.NotFound(
            "No RPC client class for service with topic '%s'." %
            (service.topic))

    topic = service.topic
    if service.topic == constants.WORKER_MAIN_MESSAGING_TOPIC:
        # NOTE: coriolis.service.MessagingService-type services (such
        # as the worker), always have a dedicated per-host queue
        # which can be used to target the service:
        topic = constants.SERVICE_MESSAGING_TOPIC_FORMAT % (
            {
                "main_topic": constants.WORKER_MAIN_MESSAGING_TOPIC,
                "host": service.host
            })

    return rpc_client_class(*client_args, topic=topic, **client_kwargs)
示例#26
0
def update_minion_pool_lifecycle(context, minion_pool_id, updated_values):
    lifecycle = get_minion_pool_lifecycle(
        context, minion_pool_id, include_tasks_executions=False,
        include_machines=False)
    if not lifecycle:
        raise exception.NotFound(
            "Minion pool '%s' not found" % minion_pool_id)

    updateable_fields = [
        "minimum_minions", "maximum_minions", "minion_max_idle_time",
        "minion_retention_strategy", "environment_options",
        "pool_shared_resources", "notes", "pool_name", "pool_os_type"]
    # TODO(aznashwan): this should no longer be required when the
    # transfer action class hirearchy is to be overhauled:
    redundancies = {
        "environment_options": [
            "source_environment", "destination_environment"]}
    for field in updateable_fields:
        if field in updated_values:
            if field in redundancies:
                for old_key in redundancies[field]:
                    LOG.debug(
                        "Updating the '%s' field of Minion Pool '%s' to: '%s'",
                        old_key, minion_pool_id, updated_values[field])
                    setattr(lifecycle, old_key, updated_values[field])
            else:
                LOG.debug(
                    "Updating the '%s' field of Minion Pool '%s' to: '%s'",
                    field, minion_pool_id, updated_values[field])
                setattr(lifecycle, field, updated_values[field])

    non_updateable_fields = set(
        updated_values.keys()).difference(updateable_fields)
    if non_updateable_fields:
        LOG.warn(
            "The following Replica fields can NOT be updated: %s",
            non_updateable_fields)

    # the oslo_db library uses this method for both the `created_at` and
    # `updated_at` fields
    setattr(lifecycle, 'updated_at', timeutils.utcnow())
示例#27
0
def set_minion_machines_allocation_statuses(
        context, minion_machine_ids, action_id, allocation_status):
    machines = get_minion_machines(context)
    existing_machine_id_mappings = {
        machine.id: machine for machine in machines}
    missing = [
        mid for mid in minion_machine_ids
        if mid not in existing_machine_id_mappings]
    if missing:
        raise exception.NotFound(
            "The following minion machines could not be found: %s" % (
                missing))

    for machine_id in minion_machine_ids:
        machine = existing_machine_id_mappings[machine_id]
        LOG.debug(
            "Changing allocation status in DB for minion machine '%s' "
            "from '%s' to '%s' and allocated action from '%s' to '%s'" % (
                machine.id, machine.status, allocation_status,
                machine.allocated_action, action_id))
        machine.allocated_action = action_id
        machine.status = allocation_status
示例#28
0
def get_task_runner(task_type):
    cls = _TASKS_MAP.get(task_type)
    if not cls:
        raise exception.NotFound("TaskRunner not found for task type: %s" %
                                 task_type)
    return cls()
示例#29
0
def _get_task(context, task_id):
    task = _soft_delete_aware_query(context,
                                    models.Task).filter_by(id=task_id).first()
    if not task:
        raise exception.NotFound("Task not found: %s" % task_id)
    return task
示例#30
0
 def _get_migration(self, ctxt, migration_id):
     migration = db_api.get_migration(ctxt, migration_id)
     if not migration:
         raise exception.NotFound("Migration not found")
     return migration