コード例 #1
0
def delete_user(username, only_stats=True, dry=True):
    vms = VirtualMachine.objects.filter(userid__exact=username)
    networks = Network.objects.filter(userid__exact=username)
    keys = PublicKeyPair.objects.filter(user__exact=username)

    if not len(
            list(
                itertools.ifilter(
                    bool, map(lambda q: q.count(), [vms, networks, keys])))):
        print "No entries exist for '%s'" % username
        return -1

    if only_stats:
        print "The following entries will be deleted if you decide to remove this user"
        print "%d Virtual Machines" % vms.exclude(
            operstate='DESTROYED').count()
        print "%d Destroyed Virtual Machines" % vms.filter(
            operstate='DESTROYED').count()
        print "%d Networks" % networks.count()
        print "%d PublicKeyPairs" % keys.count()
        return

    for o in itertools.chain(vms, networks):
        o.delete()

    for key in keys:
        key.delete()

    if dry:
        print "Skipping database commit."
        transaction.rollback()
    else:
        transaction.commit()
        print "User entries removed."
コード例 #2
0
def delete_user(username, only_stats=True, dry=True):
    vms = VirtualMachine.objects.filter(userid__exact=username)
    networks = Network.objects.filter(userid__exact=username)
    keys = PublicKeyPair.objects.filter(user__exact=username)

    if not len(list(itertools.ifilter(bool, map(lambda q: q.count(), [vms,
                                                                      networks,
                                                                      keys])))):
        print "No entries exist for '%s'" % username
        return -1

    if only_stats:
        print "The following entries will be deleted if you decide to remove this user"
        print "%d Virtual Machines" % vms.exclude(operstate='DESTROYED').count()
        print "%d Destroyed Virtual Machines" % vms.filter(operstate='DESTROYED').count()
        print "%d Networks" % networks.count()
        print "%d PublicKeyPairs" % keys.count()
        return

    for o in itertools.chain(vms, networks):
        o.delete()

    for key in keys:
        key.delete()

    if dry:
        print "Skipping database commit."
        transaction.rollback()
    else:
        transaction.commit()
        print "User entries removed."
コード例 #3
0
ファイル: ips.py プロジェクト: AthinaB/synnefo
def create_floating_ip(userid, network=None, address=None, project=None):
    if network is None:
        floating_ip = allocate_public_ip(userid, floating_ip=True)
    else:
        if not network.floating_ip_pool:
            msg = ("Cannot allocate floating IP. Network %s is"
                   " not a floating IP pool.")
            raise faults.Conflict(msg % network.id)
        if network.action == "DESTROY":
            msg = "Cannot allocate floating IP. Network %s is being deleted."
            raise faults.Conflict(msg % network.id)

        # Allocate the floating IP
        floating_ip = allocate_ip(network, userid, address=address,
                                  floating_ip=True)

    if project is None:
        project = userid
    floating_ip.project = project
    floating_ip.save()
    # Issue commission (quotas)
    quotas.issue_and_accept_commission(floating_ip)
    transaction.commit()

    log.info("Created floating IP '%s' for user IP '%s'", floating_ip, userid)

    return floating_ip
コード例 #4
0
def create_floating_ip(userid,
                       network=None,
                       address=None,
                       project=None,
                       shared_to_project=False):
    if network is None:
        floating_ip = allocate_public_ip(userid, floating_ip=True)
    else:
        if not network.floating_ip_pool:
            msg = ("Cannot allocate floating IP. Network %s is"
                   " not a floating IP pool.")
            raise faults.Conflict(msg % network.id)
        if network.action == "DESTROY":
            msg = "Cannot allocate floating IP. Network %s is being deleted."
            raise faults.Conflict(msg % network.id)

        # Allocate the floating IP
        floating_ip = allocate_ip(network,
                                  userid,
                                  address=address,
                                  floating_ip=True)

    if project is None:
        project = userid
    floating_ip.project = project
    floating_ip.shared_to_project = shared_to_project
    floating_ip.save()
    # Issue commission (quotas)
    quotas.issue_and_accept_commission(floating_ip)
    transaction.commit()

    log.info("Created floating IP '%s' for user IP '%s'", floating_ip, userid)

    return floating_ip
コード例 #5
0
        def wrapper(vm, *args, **kwargs):
            user_id = for_user
            if user_id is None:
                user_id = vm.userid

            validate_server_action(vm, action)
            vm.action = action

            commission_name = "client: api, resource: %s" % vm
            quotas.handle_resource_commission(vm,
                                              action=action,
                                              action_fields=action_fields,
                                              commission_name=commission_name,
                                              for_user=user_id)
            vm.save()

            # XXX: Special case for server creation!
            if action == "BUILD":
                serial = vm.serial
                serial.pending = False
                serial.accept = True
                serial.save()
                # Perform a commit, because the VirtualMachine must be saved to
                # DB before the OP_INSTANCE_CREATE job in enqueued in Ganeti.
                # Otherwise, messages will arrive from snf-dispatcher about
                # this instance, before the VM is stored in DB.
                transaction.commit()
                # After committing the locks are released. Refetch the instance
                # to guarantee x-lock.
                vm = VirtualMachine.objects.select_for_update().get(id=vm.id)
                # XXX: Special case for server creation: we must accept the
                # commission because the VM has been stored in DB. Also, if
                # communication with Ganeti fails, the job will never reach
                # Ganeti, and the commission will never be resolved.
                quotas.accept_resource_serial(vm)

            # Send the job to Ganeti and get the associated jobID
            try:
                job_id = func(vm, *args, **kwargs)
            except Exception as e:
                if vm.serial is not None and action != "BUILD":
                    # Since the job never reached Ganeti, reject the commission
                    log.debug("Rejecting commission: '%s', could not perform"
                              " action '%s': %s" % (vm.serial, action, e))
                    transaction.rollback()
                    quotas.reject_serial(vm.serial)
                    transaction.commit()
                raise

            log.info("user: %s, vm: %s, action: %s, job_id: %s, serial: %s",
                     user_id, vm.id, action, job_id, vm.serial)

            # store the new task in the VM
            if job_id is not None:
                vm.task = action
                vm.task_job_id = job_id
            vm.save()

            return vm
コード例 #6
0
ファイル: commands.py プロジェクト: AthinaB/synnefo
        def wrapper(vm, *args, **kwargs):
            user_id = vm.userid
            validate_server_action(vm, action)
            vm.action = action

            commission_name = "client: api, resource: %s" % vm
            quotas.handle_resource_commission(vm, action=action,
                                              action_fields=action_fields,
                                              commission_name=commission_name)
            vm.save()

            # XXX: Special case for server creation!
            if action == "BUILD":
                serial = vm.serial
                serial.pending = False
                serial.accept = True
                serial.save()
                # Perform a commit, because the VirtualMachine must be saved to
                # DB before the OP_INSTANCE_CREATE job in enqueued in Ganeti.
                # Otherwise, messages will arrive from snf-dispatcher about
                # this instance, before the VM is stored in DB.
                transaction.commit()
                # After committing the locks are released. Refetch the instance
                # to guarantee x-lock.
                vm = VirtualMachine.objects.select_for_update().get(id=vm.id)
                # XXX: Special case for server creation: we must accept the
                # commission because the VM has been stored in DB. Also, if
                # communication with Ganeti fails, the job will never reach
                # Ganeti, and the commission will never be resolved.
                quotas.accept_resource_serial(vm)

            # Send the job to Ganeti and get the associated jobID
            try:
                job_id = func(vm, *args, **kwargs)
            except Exception as e:
                if vm.serial is not None and action != "BUILD":
                    # Since the job never reached Ganeti, reject the commission
                    log.debug("Rejecting commission: '%s', could not perform"
                              " action '%s': %s" % (vm.serial,  action, e))
                    transaction.rollback()
                    quotas.reject_serial(vm.serial)
                    transaction.commit()
                raise

            log.info("user: %s, vm: %s, action: %s, job_id: %s, serial: %s",
                     user_id, vm.id, action, job_id, vm.serial)

            # store the new task in the VM
            if job_id is not None:
                vm.task = action
                vm.task_job_id = job_id
            vm.save()

            return vm
コード例 #7
0
ファイル: ips.py プロジェクト: AthinaB/synnefo
def delete_floating_ip(floating_ip):
    # Lock network to prevent deadlock
    Network.objects.select_for_update().get(id=floating_ip.network_id)

    # Return the address of the floating IP back to pool
    floating_ip.release_address()
    # And mark the floating IP as deleted
    floating_ip.deleted = True
    floating_ip.save()
    # Release quota for floating IP
    quotas.issue_and_accept_commission(floating_ip, action="DESTROY")
    transaction.commit()
    # Delete the floating IP from DB
    log.info("Deleted floating IP '%s' of user '%s", floating_ip,
             floating_ip.userid)
    floating_ip.delete()
コード例 #8
0
def delete_floating_ip(floating_ip):
    # Lock network to prevent deadlock
    Network.objects.select_for_update().get(id=floating_ip.network_id)

    # Return the address of the floating IP back to pool
    floating_ip.release_address()
    # And mark the floating IP as deleted
    floating_ip.deleted = True
    floating_ip.save()
    # Release quota for floating IP
    quotas.issue_and_accept_commission(floating_ip, action="DESTROY")
    transaction.commit()
    # Delete the floating IP from DB
    log.info("Deleted floating IP '%s' of user '%s", floating_ip,
             floating_ip.userid)
    floating_ip.delete()
コード例 #9
0
def issue_and_accept_commission(resource, action="BUILD", action_fields=None):
    """Issue and accept a commission to Quotaholder.

    This function implements the Commission workflow, and must be called
    exactly after and in the same transaction that created/updated the
    resource. The workflow that implements is the following:
    0) Resolve previous unresolved commission if exists
    1) Issue commission, get a serial and correlate it with the resource
    2) Store the serial in DB as a serial to accept
    3) COMMIT!
    4) Accept commission to QH

    """
    commission_reason = ("client: api, resource: %s, action: %s" %
                         (resource, action))
    serial = handle_resource_commission(resource=resource,
                                        action=action,
                                        action_fields=action_fields,
                                        commission_name=commission_reason)

    if serial is None:
        return

    # Mark the serial as one to accept and associate it with the resource
    serial.pending = False
    serial.accept = True
    serial.save()
    transaction.commit()

    try:
        # Accept the commission to quotaholder
        accept_serial(serial)
    except:
        # Do not crash if we can not accept commission to Quotaholder. Quotas
        # have already been reserved and the resource already exists in DB.
        # Just log the error
        log.exception("Failed to accept commission: %s", resource.serial)
コード例 #10
0
ファイル: __init__.py プロジェクト: AthinaB/synnefo
def issue_and_accept_commission(resource, action="BUILD", action_fields=None):
    """Issue and accept a commission to Quotaholder.

    This function implements the Commission workflow, and must be called
    exactly after and in the same transaction that created/updated the
    resource. The workflow that implements is the following:
    0) Resolve previous unresolved commission if exists
    1) Issue commission, get a serial and correlate it with the resource
    2) Store the serial in DB as a serial to accept
    3) COMMIT!
    4) Accept commission to QH

    """
    commission_reason = ("client: api, resource: %s, action: %s"
                         % (resource, action))
    serial = handle_resource_commission(resource=resource, action=action,
                                        action_fields=action_fields,
                                        commission_name=commission_reason)

    if serial is None:
        return

    # Mark the serial as one to accept and associate it with the resource
    serial.pending = False
    serial.accept = True
    serial.save()
    transaction.commit()

    try:
        # Accept the commission to quotaholder
        accept_serial(serial)
    except:
        # Do not crash if we can not accept commission to Quotaholder. Quotas
        # have already been reserved and the resource already exists in DB.
        # Just log the error
        log.exception("Failed to accept commission: %s", resource.serial)
コード例 #11
0
        try:
            uuid = get_user_uuid(u)
            print "%s -> %s" % (u, uuid)
            if not uuid:
                raise Exception("No uuid for %s" % u)
            migrate_user(u, uuid)
            count += 1
        except Exception, e:
            print "ERROR: User id migration failed (%s)" % e

    if dry:
        print "Skipping database commit."
        transaction.rollback()
    else:
        transaction.commit()
        print "Migrated %d users" % count


class Command(NoArgsCommand):
    help = "Quotas migration helper"

    option_list = BaseCommand.option_list + (
        make_option('--strict',
                    dest='strict',
                    action="store_false",
                    default=True,
                    help="Exit on warnings."),
        make_option('--validate-db',
                    dest='validate',
                    action="store_true",
コード例 #12
0
        try:
            uuid = get_user_uuid(u)
            print "%s -> %s" % (u, uuid)
            if not uuid:
                raise Exception("No uuid for %s" % u)
            migrate_user(u, uuid)
            count += 1
        except Exception, e:
            print "ERROR: User id migration failed (%s)" % e

    if dry:
        print "Skipping database commit."
        transaction.rollback()
    else:
        transaction.commit()
        print "Migrated %d users" % count


class Command(NoArgsCommand):
    help = "Quotas migration helper"

    option_list = BaseCommand.option_list + (
        make_option('--strict',
                    dest='strict',
                    action="store_false",
                    default=True,
                    help="Exit on warnings."),
        make_option('--validate-db',
                    dest='validate',
                    action="store_true",
コード例 #13
0
ファイル: snapshots.py プロジェクト: AthinaB/synnefo
def create(user_id, volume, name, description, metadata, force=False):
    """Create a snapshot from a given volume

    Create a snapshot from a given volume. The snapshot is first created as
    a file in Pithos, with specified metadata to indicate that it is a
    snapshot. Then a job is sent to Ganeti backend to create the actual
    snapshot of the volume.

    Snapshots are only supported for volumes of ext_ disk template. Also,
    the volume must be attached to some server.

    """

    if name is None:
        raise faults.BadRequest("Snapshot 'name' is required")

    # Check that taking a snapshot is feasible
    if volume.machine is None:
        raise faults.BadRequest("Cannot snapshot a detached volume!")
    if volume.status not in ["AVAILABLE", "IN_USE"]:
        raise faults.BadRequest("Cannot create snapshot while volume is in"
                                " '%s' status" % volume.status)

    volume_type = volume.volume_type
    if not volume_type.disk_template.startswith("ext_"):
        msg = ("Cannot take a snapshot from a volume with volume type '%s' and"
               " '%s' disk template" %
               (volume_type.id, volume_type.disk_template))
        raise faults.BadRequest(msg)

    # Increase the snapshot counter of the volume that is used in order to
    # generate unique snapshot names
    volume.snapshot_counter += 1
    volume.save()
    transaction.commit()

    snapshot_metadata = {
        "name": name,
        "disk_format": "diskdump",
        "container_format": "bare",
        # Snapshot specific
        "description": description,
        "volume_id": volume.id,
    }

    # Snapshots are used as images. We set the most important properties
    # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass
    # image customization. Also, we get some basic metadata for the volume from
    # the server that the volume is attached
    metadata.update({"exclude_all_tasks": "yes",
                     "description": description})
    if volume.index == 0:
        # Copy the metadata of the VM into the image properties only when the
        # volume is the root volume of the VM.
        vm_metadata = dict(volume.machine.metadata
                                         .filter(meta_key__in=["OS", "users"])
                                         .values_list("meta_key",
                                                      "meta_value"))
        metadata.update(vm_metadata)

    snapshot_properties = PlanktonBackend._prefix_properties(metadata)
    snapshot_metadata.update(snapshot_properties)

    # Generate a name for the Archipelago mapfile.
    mapfile = generate_mapfile_name(volume)

    # Convert size from Gbytes to bytes
    size = volume.size << 30

    with PlanktonBackend(user_id) as b:
        try:
            snapshot_id = b.register_snapshot(name=name,
                                              mapfile=mapfile,
                                              size=size,
                                              metadata=snapshot_metadata)
        except faults.OverLimit:
            msg = ("Resource limit exceeded for your account."
                   " Not enough storage space to create snapshot of"
                   " %s size." % units.show(size, "bytes", "gb"))
            raise faults.OverLimit(msg)

        try:
            job_id = backend.snapshot_instance(volume.machine, volume,
                                               snapshot_name=mapfile,
                                               snapshot_id=snapshot_id)
        except:
            # If failed to enqueue job to Ganeti, mark snapshot as ERROR
            b.update_snapshot_state(snapshot_id, OBJECT_ERROR)
            raise

        # Store the backend and job id as metadata in the snapshot in order
        # to make reconciliation based on the Ganeti job possible.
        backend_info = {
            "ganeti_job_id": job_id,
            "ganeti_backend_id": volume.machine.backend_id
        }
        metadata = {"backend_info": json.dumps(backend_info)}
        b.update_metadata(snapshot_id, metadata)

    snapshot = util.get_snapshot(user_id, snapshot_id)

    return snapshot
コード例 #14
0
ファイル: snapshots.py プロジェクト: kins299/mycloud
def create(user_id, volume, name, description, metadata, force=False):
    """Create a snapshot from a given volume

    Create a snapshot from a given volume. The snapshot is first created as
    a file in Pithos, with specified metadata to indicate that it is a
    snapshot. Then a job is sent to Ganeti backend to create the actual
    snapshot of the volume.

    Snapshots are only supported for volumes of ext_ disk template. Also,
    the volume must be attached to some server.

    """

    if name is None:
        raise faults.BadRequest("Snapshot 'name' is required")

    # Check that taking a snapshot is feasible
    if volume.machine is None:
        raise faults.BadRequest("Cannot snapshot a detached volume!")
    if volume.status not in ["AVAILABLE", "IN_USE"]:
        raise faults.BadRequest("Cannot create snapshot while volume is in"
                                " '%s' status" % volume.status)

    volume_type = volume.volume_type
    if not volume_type.disk_template.startswith("ext_"):
        msg = ("Cannot take a snapshot from a volume with volume type '%s' and"
               " '%s' disk template" %
               (volume_type.id, volume_type.disk_template))
        raise faults.BadRequest(msg)

    # Increase the snapshot counter of the volume that is used in order to
    # generate unique snapshot names
    volume.snapshot_counter += 1
    volume.save()
    transaction.commit()

    snapshot_metadata = {
        "name": name,
        "disk_format": "diskdump",
        "container_format": "bare",
        # Snapshot specific
        "description": description,
        "volume_id": volume.id,
    }

    # Snapshots are used as images. We set the most important properties
    # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass
    # image customization. Also, we get some basic metadata for the volume from
    # the server that the volume is attached
    metadata.update({"exclude_all_tasks": "yes", "description": description})
    if volume.index == 0:
        # Copy the metadata of the VM into the image properties only when the
        # volume is the root volume of the VM.
        vm_metadata = dict(
            volume.machine.metadata.filter(
                meta_key__in=["OS", "users"]).values_list(
                    "meta_key", "meta_value"))
        metadata.update(vm_metadata)

    snapshot_properties = PlanktonBackend._prefix_properties(metadata)
    snapshot_metadata.update(snapshot_properties)

    # Generate a name for the Archipelago mapfile.
    mapfile = generate_mapfile_name(volume)

    # Convert size from Gbytes to bytes
    size = volume.size << 30

    with PlanktonBackend(user_id) as b:
        try:
            snapshot_id = b.register_snapshot(name=name,
                                              mapfile=mapfile,
                                              size=size,
                                              metadata=snapshot_metadata)
        except faults.OverLimit:
            msg = ("Resource limit exceeded for your account."
                   " Not enough storage space to create snapshot of"
                   " %s size." % units.show(size, "bytes", "gb"))
            raise faults.OverLimit(msg)

        try:
            job_id = backend.snapshot_instance(volume.machine,
                                               volume,
                                               snapshot_name=mapfile,
                                               snapshot_id=snapshot_id)
        except:
            # If failed to enqueue job to Ganeti, mark snapshot as ERROR
            b.update_snapshot_state(snapshot_id, OBJECT_ERROR)
            raise

        # Store the backend and job id as metadata in the snapshot in order
        # to make reconciliation based on the Ganeti job possible.
        backend_info = {
            "ganeti_job_id": job_id,
            "ganeti_backend_id": volume.machine.backend_id
        }
        metadata = {"backend_info": json.dumps(backend_info)}
        b.update_metadata(snapshot_id, metadata)

    snapshot = util.get_snapshot(user_id, snapshot_id)

    return snapshot