コード例 #1
0
ファイル: util.py プロジェクト: mpastyl/websocket-console
def get_public_ip(backend):
    """Reserve an IP from a public network.

    This method should run inside a transaction.

    """

    # Guarantee exclusive access to backend, because accessing the IP pools of
    # the backend networks may result in a deadlock with backend allocator
    # which also checks that backend networks have a free IP.
    backend = Backend.objects.select_for_update().get(id=backend.id)

    address = None
    if settings.PUBLIC_USE_POOL:
        (network, address) = allocate_public_address(backend)
    else:
        for net in list(backend_public_networks(backend)):
            pool = net.get_pool()
            if not pool.empty():
                address = 'pool'
                network = net
                break
    if address is None:
        log.error("Public networks of backend %s are full", backend)
        raise faults.OverLimit("Can not allocate IP for new machine."
                               " Public networks are full.")
    return (network, address)
コード例 #2
0
def add(request, net, args):
    # Normal Response Code: 202
    # Error Response Codes: computeFault (400, 500),
    #                       serviceUnavailable (503),
    #                       unauthorized (401),
    #                       badRequest (400),
    #                       buildInProgress (409),
    #                       badMediaType(415),
    #                       itemNotFound (404),
    #                       overLimit (413)

    if net.state != 'ACTIVE':
        raise faults.BuildInProgress('Network not active yet')

    server_id = args.get('serverRef', None)
    if not server_id:
        raise faults.BadRequest('Malformed Request.')

    vm = get_vm(server_id, request.user_uniq, non_suspended=True)

    address = None
    if net.dhcp:
        # Get a free IP from the address pool.
        try:
            address = get_network_free_address(net)
        except EmptyPool:
            raise faults.OverLimit('Network is full')

    log.info("Connecting VM %s to Network %s(%s)", vm, net, address)

    backend.connect_to_network(vm, net, address)
    return HttpResponse(status=202)
コード例 #3
0
ファイル: util.py プロジェクト: mpastyl/websocket-console
def validate_network_params(subnet, gateway=None, subnet6=None, gateway6=None):
    try:
        # Use strict option to not all subnets with host bits set
        network = ipaddr.IPv4Network(subnet, strict=True)
    except ValueError:
        raise faults.BadRequest("Invalid network IPv4 subnet")

    # Check that network size is allowed!
    if not validate_network_size(network.prefixlen):
        raise faults.OverLimit(
            message="Unsupported network size",
            details="Network mask must be in range (%s, 29]" % MAX_CIDR_BLOCK)

    # Check that gateway belongs to network
    if gateway:
        try:
            gateway = ipaddr.IPv4Address(gateway)
        except ValueError:
            raise faults.BadRequest("Invalid network IPv4 gateway")
        if not gateway in network:
            raise faults.BadRequest("Invalid network IPv4 gateway")

    if subnet6:
        try:
            # Use strict option to not all subnets with host bits set
            network6 = ipaddr.IPv6Network(subnet6, strict=True)
        except ValueError:
            raise faults.BadRequest("Invalid network IPv6 subnet")
        if gateway6:
            try:
                gateway6 = ipaddr.IPv6Address(gateway6)
            except ValueError:
                raise faults.BadRequest("Invalid network IPv6 gateway")
            if not gateway6 in network6:
                raise faults.BadRequest("Invalid network IPv6 gateway")
コード例 #4
0
ファイル: util.py プロジェクト: mpastyl/websocket-console
def verify_personality(personality):
    """Verify that a a list of personalities is well formed"""
    if len(personality) > settings.MAX_PERSONALITY:
        raise faults.OverLimit("Maximum number of personalities" " exceeded")
    for p in personality:
        # Verify that personalities are well-formed
        try:
            assert isinstance(p, dict)
            keys = set(p.keys())
            allowed = set(['contents', 'group', 'mode', 'owner', 'path'])
            assert keys.issubset(allowed)
            contents = p['contents']
            if len(contents) > settings.MAX_PERSONALITY_SIZE:
                # No need to decode if contents already exceed limit
                raise faults.OverLimit("Maximum size of personality exceeded")
            if len(b64decode(contents)) > settings.MAX_PERSONALITY_SIZE:
                raise faults.OverLimit("Maximum size of personality exceeded")
        except AssertionError:
            raise faults.BadRequest("Malformed personality in request")
コード例 #5
0
ファイル: __init__.py プロジェクト: salsa-dev/synnefo
    def __exit__(self, exc_type, value, traceback):
        if value is not None:  # exception
            if not isinstance(value, errors.AstakosClientException):
                return False  # reraise
            if exc_type is errors.QuotaLimit:
                msg, details = render_overlimit_exception(value)
                raise faults.OverLimit(msg, details=details)

            log.exception("Unexpected error %s" % value.message)
            raise faults.InternalServerError("Unexpected error")
コード例 #6
0
    def __exit__(self, exc_type, value, traceback):
        if value is not None:  # exception
            if not isinstance(value, errors.AstakosClientException):
                return False  # reraise
            if exc_type is errors.QuotaLimit:
                raise faults.OverLimit(value.message, details=value.details)
            if exc_type is errors.NotFound:
                self.check_not_found()

            log.exception("Unexpected error %s" % value.message)
            raise faults.InternalServerError("Unexpected error")
コード例 #7
0
def validate_subnet_params(subnet=None,
                           gateway=None,
                           subnet6=None,
                           gateway6=None):
    if subnet:
        try:
            # Use strict option to not all subnets with host bits set
            network = ipaddr.IPv4Network(subnet, strict=True)
        except ValueError:
            raise faults.BadRequest("Invalid network IPv4 subnet")

        # Check that network size is allowed!
        prefixlen = network.prefixlen
        if prefixlen > 29 or prefixlen < settings.MAX_CIDR_BLOCK:
            raise faults.OverLimit(
                message="Unsupported network size",
                details="Netmask must be in range: [%s, 29]" %
                settings.MAX_CIDR_BLOCK)
        if gateway:  # Check that gateway belongs to network
            try:
                gateway = ipaddr.IPv4Address(gateway)
            except ValueError:
                raise faults.BadRequest("Invalid network IPv4 gateway")
            if gateway not in network:
                raise faults.BadRequest("Invalid network IPv4 gateway")

    if subnet6:
        try:
            # Use strict option to not all subnets with host bits set
            network6 = ipaddr.IPv6Network(subnet6, strict=True)
        except ValueError:
            raise faults.BadRequest("Invalid network IPv6 subnet")
        # Check that network6 is an /64 subnet, because this is imposed by
        # 'mac2eui64' utiity.
        if network6.prefixlen != 64:
            msg = ("Unsupported IPv6 subnet size. Network netmask must be"
                   " /64")
            raise faults.BadRequest(msg)
        if gateway6:
            try:
                gateway6 = ipaddr.IPv6Address(gateway6)
            except ValueError:
                raise faults.BadRequest("Invalid network IPv6 gateway")
            if not gateway6 in network6:
                raise faults.BadRequest("Invalid network IPv6 gateway")
コード例 #8
0
ファイル: __init__.py プロジェクト: mpastyl/websocket-console
def issue_commission(user, source, provisions, force=False, auto_accept=False):
    """Issue a new commission to the quotaholder.

    Issue a new commission to the quotaholder, and create the
    corresponing QuotaHolderSerial object in DB.

    """

    qh = Quotaholder.get()
    try:
        serial = qh.issue_one_commission(ASTAKOS_TOKEN,
                                         user,
                                         source,
                                         provisions,
                                         force=force,
                                         auto_accept=auto_accept)
    except QuotaLimit as e:
        msg, details = render_overlimit_exception(e)
        raise faults.OverLimit(msg, details=details)

    if serial:
        return QuotaHolderSerial.objects.create(serial=serial)
    else:
        raise Exception("No serial")
コード例 #9
0
ファイル: snapshots.py プロジェクト: kins299/mycloud
def create(user_id, volume, name, description, metadata, force=False):
    """Create a snapshot from a given volume

    Create a snapshot from a given volume. The snapshot is first created as
    a file in Pithos, with specified metadata to indicate that it is a
    snapshot. Then a job is sent to Ganeti backend to create the actual
    snapshot of the volume.

    Snapshots are only supported for volumes of ext_ disk template. Also,
    the volume must be attached to some server.

    """

    if name is None:
        raise faults.BadRequest("Snapshot 'name' is required")

    # Check that taking a snapshot is feasible
    if volume.machine is None:
        raise faults.BadRequest("Cannot snapshot a detached volume!")
    if volume.status not in ["AVAILABLE", "IN_USE"]:
        raise faults.BadRequest("Cannot create snapshot while volume is in"
                                " '%s' status" % volume.status)

    volume_type = volume.volume_type
    if not volume_type.disk_template.startswith("ext_"):
        msg = ("Cannot take a snapshot from a volume with volume type '%s' and"
               " '%s' disk template" %
               (volume_type.id, volume_type.disk_template))
        raise faults.BadRequest(msg)

    # Increase the snapshot counter of the volume that is used in order to
    # generate unique snapshot names
    volume.snapshot_counter += 1
    volume.save()
    transaction.commit()

    snapshot_metadata = {
        "name": name,
        "disk_format": "diskdump",
        "container_format": "bare",
        # Snapshot specific
        "description": description,
        "volume_id": volume.id,
    }

    # Snapshots are used as images. We set the most important properties
    # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass
    # image customization. Also, we get some basic metadata for the volume from
    # the server that the volume is attached
    metadata.update({"exclude_all_tasks": "yes", "description": description})
    if volume.index == 0:
        # Copy the metadata of the VM into the image properties only when the
        # volume is the root volume of the VM.
        vm_metadata = dict(
            volume.machine.metadata.filter(
                meta_key__in=["OS", "users"]).values_list(
                    "meta_key", "meta_value"))
        metadata.update(vm_metadata)

    snapshot_properties = PlanktonBackend._prefix_properties(metadata)
    snapshot_metadata.update(snapshot_properties)

    # Generate a name for the Archipelago mapfile.
    mapfile = generate_mapfile_name(volume)

    # Convert size from Gbytes to bytes
    size = volume.size << 30

    with PlanktonBackend(user_id) as b:
        try:
            snapshot_id = b.register_snapshot(name=name,
                                              mapfile=mapfile,
                                              size=size,
                                              metadata=snapshot_metadata)
        except faults.OverLimit:
            msg = ("Resource limit exceeded for your account."
                   " Not enough storage space to create snapshot of"
                   " %s size." % units.show(size, "bytes", "gb"))
            raise faults.OverLimit(msg)

        try:
            job_id = backend.snapshot_instance(volume.machine,
                                               volume,
                                               snapshot_name=mapfile,
                                               snapshot_id=snapshot_id)
        except:
            # If failed to enqueue job to Ganeti, mark snapshot as ERROR
            b.update_snapshot_state(snapshot_id, OBJECT_ERROR)
            raise

        # Store the backend and job id as metadata in the snapshot in order
        # to make reconciliation based on the Ganeti job possible.
        backend_info = {
            "ganeti_job_id": job_id,
            "ganeti_backend_id": volume.machine.backend_id
        }
        metadata = {"backend_info": json.dumps(backend_info)}
        b.update_metadata(snapshot_id, metadata)

    snapshot = util.get_snapshot(user_id, snapshot_id)

    return snapshot
コード例 #10
0
def do_create(user_id,
              volume_id,
              name,
              description,
              metadata,
              force=False,
              credentials=None):
    volume = util.get_volume(credentials,
                             volume_id,
                             for_update=True,
                             non_deleted=True,
                             exception=faults.BadRequest)
    _check(volume)
    snapshot_metadata = {
        "name": name,
        "disk_format": "diskdump",
        "container_format": "bare",
        # Snapshot specific
        "description": description,
        "volume_id": volume_id,
    }

    # Snapshots are used as images. We set the most important properties
    # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass
    # image customization. Also, we get some basic metadata for the volume from
    # the server that the volume is attached
    metadata.update({"exclude_all_tasks": "yes", "description": description})
    if volume.index == 0:
        # Copy the metadata of the VM into the image properties only when the
        # volume is the root volume of the VM.
        vm_metadata = dict(
            volume.machine.metadata.filter(
                meta_key__in=["OS", "users"]).values_list(
                    "meta_key", "meta_value"))
        metadata.update(vm_metadata)

    snapshot_properties = PlanktonBackend._prefix_properties(metadata)
    snapshot_metadata.update(snapshot_properties)

    # Generate a name for the Archipelago mapfile.
    mapfile = generate_mapfile_name(volume)

    # Convert size from Gbytes to bytes
    size = volume.size << 30

    with PlanktonBackend(user_id) as b:
        try:
            snapshot_id = b.register_snapshot(name=name,
                                              mapfile=mapfile,
                                              size=size,
                                              metadata=snapshot_metadata)
        except faults.OverLimit:
            msg = ("Resource limit exceeded for your account."
                   " Not enough storage space to create snapshot of"
                   " %s size." % units.show(size, "bytes", "gb"))
            raise faults.OverLimit(msg)

        try:
            job_id = backend.snapshot_instance(volume.machine,
                                               volume,
                                               snapshot_name=mapfile,
                                               snapshot_id=snapshot_id)
        except:
            # If failed to enqueue job to Ganeti, mark snapshot as ERROR
            b.update_snapshot_state(snapshot_id, OBJECT_ERROR)
            raise

        # Store the backend and job id as metadata in the snapshot in order
        # to make reconciliation based on the Ganeti job possible.
        backend_info = {
            "ganeti_job_id": job_id,
            "ganeti_backend_id": volume.machine.backend_id
        }
        metadata = {"backend_info": json.dumps(backend_info)}
        b.update_metadata(snapshot_id, metadata)

    snapshot = util.get_snapshot(user_id, snapshot_id)

    return snapshot