Beispiel #1
0
def get_quotas(user):
    """Transform the resource usage dictionary of a user.

    Return a list of dictionaries that represent the quotas of the user. Each
    dictionary has the following form:

    {
        'project': <Project instance>,
        'resources': [('Resource Name1', <Resource dict>),
                      ('Resource Name2', <Resource dict>),...]
    }

    where 'Resource Name' is the name of the resource and <Resource dict> is
    the dictionary that is returned by list_user_quotas and has the following
    fields:

        pending, project_pending, project_limit, project_usage, usage.

    Note, the get_quota_usage function returns many dicts, but we only keep the
    ones that have limit > 0
    """
    usage = get_user_quotas(user)

    quotas = []
    for project_id, resource_dict in usage.iteritems():
        source = {}
        source['project'] = Project.objects.get(uuid=project_id)
        q_res = source['resources'] = []

        for resource_name, resource in resource_dict.iteritems():
            # Chech if the resource is useful to display
            project_limit = resource['project_limit']
            usage = resource['usage']
            r = get_resource(resource_name)
            if not is_resource_useful(r, project_limit, usage):
                continue

            usage = units.show(usage, r.unit)
            limit = units.show(resource['limit'], r.unit)
            taken_by_others = resource['project_usage'] - resource['usage']
            effective_limit = min(resource['limit'],
                                  project_limit - taken_by_others)
            if effective_limit < 0:
                effective_limit = 0
            effective_limit = units.show(effective_limit, r.unit)

            if limit != effective_limit:
                limit += " (Effective Limit: " + effective_limit + ")"

            q_res.append((
                r.report_desc,
                usage,
                limit,
            ))

        quotas.append(source)

    return quotas
Beispiel #2
0
def pretty_print_stats(stats, unit_style, stdout):
    newline = lambda: stdout.write("\n")

    _datetime = stats.get("datetime")
    stdout.write("datetime: %s\n" % _datetime)
    newline()

    user_stats = stats.get("users", {})
    table = []
    headers = ["Provider Name", "Verified Users", "Active Users",
               "Total Users"]
    for provider, user_info in sorted(user_stats.items()):
        table.append((provider, user_info["verified"], user_info["active"],
                      user_info["total"]))
    pprint_table(stdout, table, headers,
                 title="Users")

    newline()
    resource_stats = stats.get("resources", {})
    total_resources = {}
    headers = ["Resource Name", "Used", "Limit", "Usage"]
    for provider, resources in sorted(resource_stats.items()):
        table = []
        for resource_name, resource_info in sorted(resources.items()):
            unit = resource_info["unit"]
            used = resource_info["used"]
            limit = resource_info["limit"]
            usage = round(used / limit, 1)
            table.append((resource_name,
                          units.show(used, unit, style=unit_style),
                          units.show(limit, unit, style=unit_style),
                          usage))
            # Also count them for total
            total_resources.setdefault(resource_name, {})
            total_resources[resource_name].setdefault("used", 0)
            total_resources[resource_name].setdefault("limit", 0)
            total_resources[resource_name]["used"] += used
            total_resources[resource_name]["limit"] += limit
            total_resources[resource_name]["unit"] = unit
        pprint_table(stdout, table, headers,
                     title="Resources for Provider '%s'" % provider)
        newline()

    if len(resource_stats) > 1:
        table = []
        for resource_name, resource_info in sorted(total_resources.items()):
            unit = resource_info["unit"]
            used = resource_info["used"]
            limit = resource_info["limit"]
            usage = round(used / limit, 1)
            table.append((resource_name,
                          units.show(used, unit, style=unit_style),
                          units.show(limit, unit, style=unit_style),
                          usage))
        pprint_table(stdout, table, headers,
                     title="Resources for all Providers")
        newline()
Beispiel #3
0
def pretty_print_stats(stats, unit_style, stdout):
    newline = lambda: stdout.write("\n")

    _datetime = stats.get("datetime")
    stdout.write("datetime: %s\n" % _datetime)
    newline()

    user_stats = stats.get("users", {})
    table = []
    headers = [
        "Provider", "Total Users", "Verified Users", "Active Users",
        "Exclusive Users per Provider"
    ]
    for provider, user_info in sorted(user_stats.items()):
        table.append((provider, user_info["total"], user_info["verified"],
                      user_info["active"], user_info.get("exclusive", "-")))
    pprint_table(stdout, table, headers, separator=" | ", title="Users")

    newline()
    resource_stats = stats.get("resources", {})
    total_resources = resource_stats.pop("all", {})
    headers = ["Resource Name", "Used", "Allocated", "Usage"]
    table = []
    for resource_name, resource_info in sorted(total_resources.items()):
        unit = resource_info["unit"]
        used = resource_info["used"]
        allocated = resource_info["allocated"]
        usage = "%.2f%%" % (100 * (used / allocated))\
            if allocated != 0 else "-"
        table.append((resource_name, units.show(used, unit, style=unit_style),
                      units.show(allocated, unit, style=unit_style), usage))
    pprint_table(stdout,
                 table,
                 headers,
                 separator=" | ",
                 title="Resources for all providers")
    newline()
    for provider, resources in sorted(resource_stats.items()):
        table = []
        for resource_name, resource_info in sorted(resources.items()):
            unit = resource_info["unit"]
            used = resource_info["used"]
            allocated = resource_info["allocated"]
            usage = "%.2f%%" % (100 * (used / allocated)) \
                    if allocated != 0 else "-"
            table.append(
                (resource_name, units.show(used, unit, style=unit_style),
                 units.show(allocated, unit, style=unit_style), usage))
        pprint_table(stdout,
                     table,
                     headers,
                     separator=" | ",
                     title=("Resources for users with only the '%s' provider" %
                            provider))
Beispiel #4
0
def get_quotas(user):
    """Transform the resource usage dictionary of a user.

    Return a list of dictionaries that represent the quotas of the user. Each
    dictionary has the following form:

    {
        'project': <Project instance>,
        'resources': [('Resource Name1', <Resource dict>),
                      ('Resource Name2', <Resource dict>),...]
    }

    where 'Resource Name' is the name of the resource and <Resource dict> is
    the dictionary that is returned by list_user_quotas and has the following
    fields:

        pending, project_pending, project_limit, project_usage, usage.

    Note, the get_quota_usage function returns many dicts, but we only keep the
    ones that have limit > 0
    """
    usage = get_user_quotas(user)

    quotas = []
    for project_id, resource_dict in usage.iteritems():
        source = {}
        source['project'] = Project.objects.get(uuid=project_id)
        q_res = source['resources'] = []

        for resource_name, resource in resource_dict.iteritems():
            # Chech if the resource is useful to display
            project_limit = resource['project_limit']
            usage = resource['usage']
            r = get_resource(resource_name)
            if not is_resource_useful(r, project_limit, usage):
                continue

            usage = units.show(usage, r.unit)
            limit = units.show(resource['limit'], r.unit)
            taken_by_others = resource['project_usage'] - resource['usage']
            effective_limit = min(resource['limit'], project_limit - taken_by_others)
            if effective_limit < 0:
                effective_limit = 0
            effective_limit = units.show(effective_limit, r.unit)

            if limit != effective_limit:
                limit += " (Effective Limit: " + effective_limit + ")"

            q_res.append((r.report_desc, usage, limit,))

        quotas.append(source)

    return quotas
Beispiel #5
0
def pretty_print_stats(stats, unit_style, stdout):
    newline = lambda: stdout.write("\n")

    _datetime = stats.get("datetime")
    stdout.write("datetime: %s\n" % _datetime)
    newline()

    user_stats = stats.get("users", {})
    table = []
    headers = ["Provider", "Total Users", "Verified Users",
               "Active Users", "Exclusive Users per Provider"]
    for provider, user_info in sorted(user_stats.items()):
        table.append((provider, user_info["total"], user_info["verified"],
                      user_info["active"], user_info.get("exclusive", "-")))
    pprint_table(stdout, table, headers, separator=" | ", title="Users")

    newline()
    resource_stats = stats.get("resources", {})
    total_resources = resource_stats.pop("all", {})
    headers = ["Resource Name", "Used", "Allocated", "Usage"]
    table = []
    for resource_name, resource_info in sorted(total_resources.items()):
        unit = resource_info["unit"]
        used = resource_info["used"]
        allocated = resource_info["allocated"]
        usage = "%.2f%%" % (100 * (used / allocated))\
            if allocated != 0 else "-"
        table.append((resource_name,
                      units.show(used, unit, style=unit_style),
                      units.show(allocated, unit, style=unit_style),
                      usage))
    pprint_table(stdout, table, headers, separator=" | ",
                 title="Resources for all providers")
    newline()
    for provider, resources in sorted(resource_stats.items()):
        table = []
        for resource_name, resource_info in sorted(resources.items()):
            unit = resource_info["unit"]
            used = resource_info["used"]
            allocated = resource_info["allocated"]
            usage = "%.2f%%" % (100 * (used / allocated)) \
                    if allocated != 0 else "-"
            table.append((resource_name,
                          units.show(used, unit, style=unit_style),
                          units.show(allocated, unit, style=unit_style),
                          usage))
        pprint_table(stdout, table, headers, separator=" | ",
                     title=("Resources for users with only the '%s' provider"
                            % provider))
Beispiel #6
0
def project_fields(project):
    app = project.last_application
    pending_app = (app.id if app and app.state == app.PENDING
                   else None)

    d = OrderedDict([
        ('project id', project.uuid),
        ('name', project.realname),
        ('status', project.state_display()),
        ('pending_app', pending_app),
        ('owner', project.owner),
        ('homepage', project.homepage),
        ('description', project.description),
        ('creation date', project.creation_date),
        ('request end date', project.end_date),
        ])

    deact = project.last_deactivation()
    if deact is not None:
        d['deactivation date'] = deact.date

    d.update([
            ('join policy', project.member_join_policy_display),
            ('leave policy', project.member_leave_policy_display),
            ('max members', units.show(project.limit_on_members_number, None)),
            ('total members', project.members_count()),
    ])

    return d
Beispiel #7
0
def app_fields(app):
    d = OrderedDict([
        ('project id', app.chain.uuid),
        ('application id', app.id),
        ('status', app.state_display()),
        ('applicant', app.applicant),
        ('comments for review', app.comments),
        ('request issue date', app.issue_date),
        ])
    if app.name:
        d['name'] = app.name
    if app.owner:
        d['owner'] = app.owner
    if app.homepage:
        d['homepage'] = app.homepage
    if app.description:
        d['description'] = app.description
    if app.start_date:
        d['request start date'] = app.start_date
    if app.end_date:
        d['request end date'] = app.end_date
    if app.member_join_policy:
        d['join policy'] = app.member_join_policy_display
    if app.member_leave_policy:
        d['leave policy'] = app.member_leave_policy_display
    if app.limit_on_members_number:
        d['max members'] = units.show(app.limit_on_members_number, None)

    return d
Beispiel #8
0
def project_fields(project):
    app = project.last_application
    pending_app = (app.id if app and app.state == app.PENDING else None)

    d = OrderedDict([
        ('project id', project.uuid),
        ('name', project.realname),
        ('status', project.state_display()),
        ('pending_app', pending_app),
        ('owner', project.owner),
        ('homepage', project.homepage),
        ('description', project.description),
        ('creation date', project.creation_date),
        ('request end date', project.end_date),
    ])

    deact = project.last_deactivation()
    if deact is not None:
        d['deactivation date'] = deact.date

    d.update([
        ('join policy', project.member_join_policy_display),
        ('leave policy', project.member_leave_policy_display),
        ('max members', units.show(project.limit_on_members_number, None)),
        ('total members', project.members_count()),
    ])

    return d
Beispiel #9
0
def display_resource_usage_for_project(resource, project):
    usage_map = presentation.USAGE_TAG_MAP
    quota = quotas.get_project_quota(project).get(resource.name, None)

    if not quota:
        return "No usage"

    cls = ''
    usage = quota['project_usage']
    limit = quota['project_limit']

    if limit == 0 and usage == 0:
        return "--"

    usage_perc = "%d" % ((float(usage) / limit) * 100) if limit else "100"
    _keys = usage_map.keys()
    _keys.reverse()
    closest = filter(lambda x: int(x) <= int(usage_perc), _keys)[0]
    cls = usage_map[closest]

    usage_display = units.show(usage, resource.unit)
    usage_perc_display = "%s%%" % usage_perc

    resp = """<span class="%s policy-diff">%s (%s)</span>""" % \
            (cls, usage_perc_display, usage_display)
    return mark_safe(resp)
Beispiel #10
0
def app_fields(app):
    d = OrderedDict([
        ('project id', app.chain.uuid),
        ('application id', app.id),
        ('status', app.state_display()),
        ('applicant', app.applicant),
        ('comments for review', app.comments),
        ('request issue date', app.issue_date),
    ])
    if app.name:
        d['name'] = app.name
    if app.owner:
        d['owner'] = app.owner
    if app.homepage:
        d['homepage'] = app.homepage
    if app.description:
        d['description'] = app.description
    if app.start_date:
        d['request start date'] = app.start_date
    if app.end_date:
        d['request end date'] = app.end_date
    if app.member_join_policy:
        d['join policy'] = app.member_join_policy_display
    if app.member_leave_policy:
        d['leave policy'] = app.member_leave_policy_display
    if app.limit_on_members_number:
        d['max members'] = units.show(app.limit_on_members_number, None)

    return d
Beispiel #11
0
def is_resource_useful(resource, limit):
    """Simple function to check if the resource is useful to show.

    Values that have infinite or zero limits are discarded.
    """
    displayed_limit = units.show(limit, resource.unit)
    if limit == 0 or displayed_limit == 'inf':
        return False
    return True
Beispiel #12
0
def is_resource_useful(resource, limit):
    """Simple function to check if the resource is useful to show.

    Values that have infinite or zero limits are discarded.
    """
    displayed_limit = units.show(limit, resource.unit)
    if limit == 0 or displayed_limit == 'inf':
        return False
    return True
Beispiel #13
0
def get_project_usage(inst):
    """Return requested project quota type.

    Accepted stats are: 'project_limit', 'project_pending', 'project_usage'.
    Note that the output is sanitized, meaning that stats that correspond
    to infinite or zero limits will not be returned.
    """
    resource_list = []
    quota_dict = get_project_quota(inst)
    if not quota_dict:
        return []

    policies = get_policies(inst)
    for p in policies:
        r = p.resource
        value = units.show(quota_dict[r.name]['project_usage'], r.unit)
        resource_list.append((r.report_desc, value))

    return resource_list
Beispiel #14
0
def get_project_usage(inst):
    """Return requested project quota type.

    Accepted stats are: 'project_limit', 'project_pending', 'project_usage'.
    Note that the output is sanitized, meaning that stats that correspond
    to infinite or zero limits will not be returned.
    """
    resource_list = []
    quota_dict = get_project_quota(inst)
    if not quota_dict:
        return []

    policies = get_policies(inst, quota_dict)
    for p in policies:
        r = p.resource
        value = units.show(quota_dict[r.name]['project_usage'], r.unit)
        resource_list.append((r.report_desc, value))

    return resource_list
Beispiel #15
0
    def resource_policies(self):
        policies = []
        append = policies.append
        for name, value in self.data.iteritems():
            if not value:
                continue
            uplimit = value
            if name.endswith('_uplimit'):
                subs = name.split('_uplimit')
                prefix, suffix = subs
                try:
                    resource = Resource.objects.get(name=prefix)
                except Resource.DoesNotExist:
                    raise forms.ValidationError("Resource %s does not exist" %
                                                resource.name)
                # keep only resource limits for selected resource groups
                if self.data.get('is_selected_%s' %
                                 resource.group, "0") == "1":
                    if not resource.ui_visible:
                        raise forms.ValidationError("Invalid resource %s" %
                                                    resource.name)
                    d = model_to_dict(resource)
                    try:
                        uplimit = long(uplimit)
                    except ValueError:
                        m = "Limit should be an integer"
                        raise forms.ValidationError(m)
                    display = units.show(uplimit, resource.unit)
                    d.update(dict(resource=prefix, uplimit=uplimit,
                                  display_uplimit=display))
                    append(d)

        ordered_keys = presentation.RESOURCES['resources_order']

        def resource_order(r):
            if r['str_repr'] in ordered_keys:
                return ordered_keys.index(r['str_repr'])
            else:
                return -1

        policies = sorted(policies, key=resource_order)
        return policies
Beispiel #16
0
    def resource_policies(self):
        policies = []
        append = policies.append
        resource_indexes = {}
        include_diffs = False
        is_new = self.instance and self.instance.id is None

        existing_policies = []
        existing_data = {}

        # normalize to single values dict
        data = dict()
        for key, value in self.data.iteritems():
            data[key] = value

        if not is_new:
            # User may have emptied some fields. Empty values are not handled
            # below. Fill data as if user typed "0" in field, but only
            # for resources which exist in application project and have
            # non-zero capacity (either for member or project).
            include_diffs = True
            existing_policies = self.instance.resource_set
            append_groups = set()
            for policy in existing_policies:
                cap_set = max(policy.project_capacity, policy.member_capacity)

                if not policy.resource.ui_visible:
                    continue

                rname = policy.resource.name
                group = policy.resource.group
                existing_data["%s_p_uplimit" % rname] = "0"
                existing_data["%s_m_uplimit" % rname] = "0"
                append_groups.add(group)

            for key, value in existing_data.iteritems():
                if not key in data or data.get(key, '') == '':
                    data[key] = value
            for group in append_groups:
                data["is_selected_%s" % group] = "1"

        for name, value in data.iteritems():

            if not value:
                continue

            if name.endswith('_uplimit'):
                is_project_limit = name.endswith('_p_uplimit')
                suffix = '_p_uplimit' if is_project_limit else '_m_uplimit'
                if value == 'inf' or value == 'Unlimited':
                    value = units.PRACTICALLY_INFINITE
                uplimit = value
                prefix, _suffix = name.split(suffix)

                try:
                    resource = Resource.objects.get(name=prefix)
                except Resource.DoesNotExist:
                    raise forms.ValidationError("Resource %s does not exist" %
                                                resource.name)

                if is_project_limit:
                    member_limit = data.get(prefix + '_m_uplimit')
                    try:
                        pvalue = int(value)
                        mvalue = int(member_limit)
                    except:
                        raise forms.ValidationError("Invalid format")
                else:
                    project_limit = data.get(prefix + '_p_uplimit')
                    try:
                        mvalue = int(value)
                        pvalue = int(project_limit)
                    except:
                        raise forms.ValidationError("Invalid format")

                if mvalue > pvalue:
                    msg = "%s per member limit exceeds total limit"
                    raise forms.ValidationError(msg % resource.name)

                # keep only resource limits for selected resource groups
                if data.get('is_selected_%s' % \
                                     resource.group, "0") == "1":
                    if not resource.ui_visible:
                        raise forms.ValidationError("Invalid resource %s" %
                                                    resource.name)
                    d = model_to_dict(resource)
                    try:
                        uplimit = long(uplimit)
                    except ValueError:
                        m = "Limit should be an integer"
                        raise forms.ValidationError(m)

                    display = units.show(uplimit, resource.unit)
                    if display == "inf":
                        display = "Unlimited"

                    handled = resource_indexes.get(prefix)

                    diff_data = None
                    if include_diffs:
                        try:
                            policy = existing_policies.get(resource=resource)
                            if is_project_limit:
                                pval = policy.project_capacity
                            else:
                                pval = policy.member_capacity

                            if pval != uplimit:
                                diff = pval - uplimit

                                diff_display = units.show(abs(diff),
                                                          resource.unit,
                                                          inf="Unlimited")
                                diff_is_inf = False
                                prev_is_inf = False
                                if uplimit == units.PRACTICALLY_INFINITE:
                                    diff_display = "Unlimited"
                                    diff_is_inf = True
                                if pval == units.PRACTICALLY_INFINITE:
                                    diff_display = "Unlimited"
                                    prev_is_inf = True

                                prev_display = units.show(pval, resource.unit,
                                                          inf="Unlimited")

                                diff_data = {
                                    'prev': pval,
                                    'prev_display': prev_display,
                                    'diff': diff,
                                    'diff_display': diff_display,
                                    'increased': diff < 0,
                                    'diff_is_inf': diff_is_inf,
                                    'prev_is_inf': prev_is_inf,
                                    'operator': '+' if diff < 0 else '-'
                                }

                        except:
                            pass

                    if is_project_limit:
                        d.update(dict(resource=prefix,
                                      p_uplimit=uplimit,
                                      display_p_uplimit=display))

                        if diff_data:
                            d.update(dict(resource=prefix, p_diff=diff_data))

                        if not handled:
                            d.update(dict(resource=prefix, m_uplimit=0,
                                      display_m_uplimit=units.show(0,
                                           resource.unit)))
                    else:
                        d.update(dict(resource=prefix, m_uplimit=uplimit,
                                      display_m_uplimit=display))

                        if diff_data:
                            d.update(dict(resource=prefix, m_diff=diff_data))

                        if not handled:
                            d.update(dict(resource=prefix, p_uplimit=0,
                                      display_p_uplimit=units.show(0,
                                           resource.unit)))

                    if resource_indexes.get(prefix, None) is not None:
                        # already included in policies
                        handled.update(d)
                    else:
                        # keep track of resource dicts
                        append(d)
                        resource_indexes[prefix] = d

        ordered_keys = presentation.RESOURCES['resources_order']

        def resource_order(r):
            if r['str_repr'] in ordered_keys:
                return ordered_keys.index(r['str_repr'])
            else:
                return -1

        policies = sorted(policies, key=resource_order)
        return policies
Beispiel #17
0
def show_resource_value(number, resource, style):
    resources = ResourceDict.get()
    resource_dict = resources.get(resource)
    unit = resource_dict.get('unit') if resource_dict else None
    return units.show(number, unit, style)
Beispiel #18
0
def create(user_id, volume, name, description, metadata, force=False):
    """Create a snapshot from a given volume

    Create a snapshot from a given volume. The snapshot is first created as
    a file in Pithos, with specified metadata to indicate that it is a
    snapshot. Then a job is sent to Ganeti backend to create the actual
    snapshot of the volume.

    Snapshots are only supported for volumes of ext_ disk template. Also,
    the volume must be attached to some server.

    """

    if name is None:
        raise faults.BadRequest("Snapshot 'name' is required")

    # Check that taking a snapshot is feasible
    if volume.machine is None:
        raise faults.BadRequest("Cannot snapshot a detached volume!")
    if volume.status not in ["AVAILABLE", "IN_USE"]:
        raise faults.BadRequest("Cannot create snapshot while volume is in"
                                " '%s' status" % volume.status)

    volume_type = volume.volume_type
    if not volume_type.disk_template.startswith("ext_"):
        msg = ("Cannot take a snapshot from a volume with volume type '%s' and"
               " '%s' disk template" %
               (volume_type.id, volume_type.disk_template))
        raise faults.BadRequest(msg)

    # Increase the snapshot counter of the volume that is used in order to
    # generate unique snapshot names
    volume.snapshot_counter += 1
    volume.save()
    transaction.commit()

    snapshot_metadata = {
        "name": name,
        "disk_format": "diskdump",
        "container_format": "bare",
        # Snapshot specific
        "description": description,
        "volume_id": volume.id,
    }

    # Snapshots are used as images. We set the most important properties
    # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass
    # image customization. Also, we get some basic metadata for the volume from
    # the server that the volume is attached
    metadata.update({"exclude_all_tasks": "yes",
                     "description": description})
    if volume.index == 0:
        # Copy the metadata of the VM into the image properties only when the
        # volume is the root volume of the VM.
        vm_metadata = dict(volume.machine.metadata
                                         .filter(meta_key__in=["OS", "users"])
                                         .values_list("meta_key",
                                                      "meta_value"))
        metadata.update(vm_metadata)

    snapshot_properties = PlanktonBackend._prefix_properties(metadata)
    snapshot_metadata.update(snapshot_properties)

    # Generate a name for the Archipelago mapfile.
    mapfile = generate_mapfile_name(volume)

    # Convert size from Gbytes to bytes
    size = volume.size << 30

    with PlanktonBackend(user_id) as b:
        try:
            snapshot_id = b.register_snapshot(name=name,
                                              mapfile=mapfile,
                                              size=size,
                                              metadata=snapshot_metadata)
        except faults.OverLimit:
            msg = ("Resource limit exceeded for your account."
                   " Not enough storage space to create snapshot of"
                   " %s size." % units.show(size, "bytes", "gb"))
            raise faults.OverLimit(msg)

        try:
            job_id = backend.snapshot_instance(volume.machine, volume,
                                               snapshot_name=mapfile,
                                               snapshot_id=snapshot_id)
        except:
            # If failed to enqueue job to Ganeti, mark snapshot as ERROR
            b.update_snapshot_state(snapshot_id, OBJECT_ERROR)
            raise

        # Store the backend and job id as metadata in the snapshot in order
        # to make reconciliation based on the Ganeti job possible.
        backend_info = {
            "ganeti_job_id": job_id,
            "ganeti_backend_id": volume.machine.backend_id
        }
        metadata = {"backend_info": json.dumps(backend_info)}
        b.update_metadata(snapshot_id, metadata)

    snapshot = util.get_snapshot(user_id, snapshot_id)

    return snapshot
Beispiel #19
0
def pprint_clusters(clusters, stdout, detail=True):
    t_backend_cnt, t_vms = 0, 0
    t_nodes_cnt, t_vm_cap_cnt, t_offline_cnt, t_drained_cnt = 0, 0, 0, 0
    t_cpu = 0
    t_mused, t_mtotal = 0, 0
    t_dused, t_dtotal = 0, 0
    t_vcpu, t_vram, t_vdisk = 0, 0, 0
    for cluster_name, c_info in sorted(clusters.items()):
        t_backend_cnt += 1
        node_table = []
        c_nodes_cnt, c_vm_cap_cnt, c_offline_cnt, c_drained_cnt = 0, 0, 0, 0
        c_cpu = 0
        c_mfree, c_mused, c_mtotal = 0, 0, 0
        c_dfree, c_dused, c_dtotal = 0, 0, 0
        for node_name, n_info in sorted(c_info["nodes"].items()):
            c_nodes_cnt += 1
            if not n_info["vm_capable"]:
                continue
            c_vm_cap_cnt += 1
            drained, offline = n_info["drained"], n_info["offline"]
            state = "online"
            if c_info["offline"]:
                state = "offline"
            if c_info["drained"]:
                state += " (drained)"
            cpu = n_info["cpu"]
            ram, disk = n_info["ram"], n_info["disk"]
            mfree, mtotal = int(ram["free"]), int(ram["total"])
            mused = mtotal - mfree
            dfree, dtotal = int(disk["free"]), int(disk["total"])
            dused = dtotal - dfree
            if offline:
                c_offline_cnt += 1
            if drained:
                c_drained_cnt += 1
            c_mtotal += mtotal
            c_dtotal += dtotal
            if not offline:
                c_cpu += cpu
                c_mfree += mfree
                c_mused += mused
                c_dfree += dfree
                c_dused += dused
            mpercentage = ("%.2f%%" % (100 * mused / mtotal))\
                if mtotal != 0 else "-"
            dpercentage = ("%.2f%%" % (100 * dused / dtotal))\
                if dtotal != 0 else "-"
            node_table.append((
                node_name,
                state,
                n_info["instances"],
                cpu,
                "%s/%s %s" % (units.show(
                    mused, "bytes"), units.show(mtotal, "bytes"), mpercentage),
                "%s/%s %s" % (units.show(
                    dused, "bytes"), units.show(dtotal, "bytes"), dpercentage),
            ))
        state = "online"
        if c_info["offline"]:
            state = "offline"
        if c_info["drained"]:
            state += " (drained)"
        virtual_cpu = c_info["virtual_cpu"]
        virtual_ram = c_info["virtual_ram"]
        virtual_disk = c_info["virtual_disk"]
        if not c_info["offline"]:
            t_cpu += c_cpu
            t_mused += c_mused
            t_mtotal += c_mtotal
            t_dused += c_dused
            t_dtotal += c_dtotal
            t_vcpu += virtual_cpu
            t_vdisk += virtual_disk
            t_vram += virtual_ram
            t_nodes_cnt += c_nodes_cnt
            t_vm_cap_cnt += c_vm_cap_cnt
            t_offline_cnt += c_offline_cnt
            t_drained_cnt += c_drained_cnt
            t_vms += int(c_info["virtual_servers"])
        if not detail:
            continue
        cluster_table = (
            ("Name", cluster_name),
            ("State", state),
            ("Nodes", "Total: %s, VM Capable: %s, Drained %s Offline: %s" %
             (c_nodes_cnt, c_vm_cap_cnt, c_drained_cnt, c_offline_cnt)),
            ("Disk Templates", ", ".join(c_info["disk_templates"])),
            ("Hypervisor", c_info["hypervisor"]),
            ("Instances", c_info["virtual_servers"]),
            ("Virtual CPUs", virtual_cpu),
            ("Physical CPUs", c_cpu),
            ("V/P CPUs", ("%.2f%%" % (100 * virtual_cpu / c_cpu))),
            ("Virtual RAM", units.show(virtual_ram, "bytes")),
            ("Physical RAM (used/total)", "%s/%s %s%%" %
             (units.show(c_mused, "bytes"), units.show(c_mtotal, "bytes"),
              ("%.2f%%" %
               (100 * c_mused / c_mtotal) if c_mtotal != 0 else "-"))),
            ("V/P used RAM",
             ("%.2f%%" %
              (100 * virtual_ram / c_mused) if c_mused != 0 else "-")),
            ("V/P total RAM",
             ("%.2f%%" %
              (100 * virtual_ram / c_mtotal) if c_mtotal != 0 else "-")),
            ("Virtual disk", units.show(virtual_disk, "bytes")),
            ("Physical Disk (used/total)", "%s/%s %s%%" %
             (units.show(c_dused, "bytes"), units.show(c_dtotal, "bytes"),
              ("%.2f%%" %
               (100 * c_dused / c_dtotal) if c_dtotal != 0 else "-"))),
            ("V/P used disk",
             ("%.2f%%" %
              (100 * virtual_disk / c_dused) if c_dused != 0 else "-")),
            ("V/P total disk",
             ("%.2f%%" %
              (100 * virtual_disk / c_dtotal) if c_dtotal != 0 else "-")),
        )
        pprint_table(stdout,
                     cluster_table,
                     headers=None,
                     separator=" | ",
                     title="Statistics for backend %s" % cluster_name)
        headers = ("Node Name", "State", "VMs", "CPUs", "RAM (used/total)",
                   "Disk (used/total)")
        pprint_table(stdout,
                     node_table,
                     headers,
                     separator=" | ",
                     title="Statistics per node for backend %s" % cluster_name)

    total_table = (
        ("Backend", t_backend_cnt),
        ("Nodes", "Total: %s, VM Capable: %s, Drained %s Offline: %s" %
         (t_nodes_cnt, t_vm_cap_cnt, t_drained_cnt, t_offline_cnt)),
        ("Instances", t_vms),
        ("Virtual CPUs", t_vcpu),
        ("Physical CPUs", t_cpu),
        ("V/P CPUs", ("%.2f%%" % (100 * t_vcpu / t_cpu))),
        ("Virtual RAM", units.show(t_vram, "bytes")),
        ("Physical RAM (used/total)", "%s/%s %s%%" %
         (units.show(t_mused, "bytes"), units.show(t_mtotal, "bytes"),
          ("%.2f%%" % (100 * t_mused / t_mtotal) if t_mtotal != 0 else "-"))),
        ("V/P used RAM",
         ("%.2f%%" % (100 * t_vram / t_mused) if t_mused != 0 else "-")),
        ("V/P total RAM",
         ("%.2f%%" % (100 * t_vram / t_mtotal) if t_mtotal != 0 else "-")),
        ("Virtual disk", units.show(t_vdisk, "bytes")),
        ("Physical Disk (used/total)", "%s/%s %s%%" %
         (units.show(t_dused, "bytes"), units.show(t_dtotal, "bytes"),
          ("%.2f%%" % (100 * t_dused / t_dtotal) if t_dtotal != 0 else "-"))),
        ("V/P used disk",
         ("%.2f%%" % (100 * t_vdisk / t_dused) if t_dused != 0 else "-")),
        ("V/P total disk",
         ("%.2f%%" % (100 * t_vdisk / t_dtotal) if t_dtotal != 0 else "-")),
    )

    if len(clusters) > 1:
        stdout.write("\n")
        pprint_table(stdout,
                     total_table,
                     headers=None,
                     separator=" | ",
                     title="Statistics for all backends")
Beispiel #20
0
def do_create(user_id,
              volume_id,
              name,
              description,
              metadata,
              force=False,
              credentials=None):
    volume = util.get_volume(credentials,
                             volume_id,
                             for_update=True,
                             non_deleted=True,
                             exception=faults.BadRequest)
    _check(volume)
    snapshot_metadata = {
        "name": name,
        "disk_format": "diskdump",
        "container_format": "bare",
        # Snapshot specific
        "description": description,
        "volume_id": volume_id,
    }

    # Snapshots are used as images. We set the most important properties
    # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass
    # image customization. Also, we get some basic metadata for the volume from
    # the server that the volume is attached
    metadata.update({"exclude_all_tasks": "yes", "description": description})
    if volume.index == 0:
        # Copy the metadata of the VM into the image properties only when the
        # volume is the root volume of the VM.
        vm_metadata = dict(
            volume.machine.metadata.filter(
                meta_key__in=["OS", "users"]).values_list(
                    "meta_key", "meta_value"))
        metadata.update(vm_metadata)

    snapshot_properties = PlanktonBackend._prefix_properties(metadata)
    snapshot_metadata.update(snapshot_properties)

    # Generate a name for the Archipelago mapfile.
    mapfile = generate_mapfile_name(volume)

    # Convert size from Gbytes to bytes
    size = volume.size << 30

    with PlanktonBackend(user_id) as b:
        try:
            snapshot_id = b.register_snapshot(name=name,
                                              mapfile=mapfile,
                                              size=size,
                                              metadata=snapshot_metadata)
        except faults.OverLimit:
            msg = ("Resource limit exceeded for your account."
                   " Not enough storage space to create snapshot of"
                   " %s size." % units.show(size, "bytes", "gb"))
            raise faults.OverLimit(msg)

        try:
            job_id = backend.snapshot_instance(volume.machine,
                                               volume,
                                               snapshot_name=mapfile,
                                               snapshot_id=snapshot_id)
        except:
            # If failed to enqueue job to Ganeti, mark snapshot as ERROR
            b.update_snapshot_state(snapshot_id, OBJECT_ERROR)
            raise

        # Store the backend and job id as metadata in the snapshot in order
        # to make reconciliation based on the Ganeti job possible.
        backend_info = {
            "ganeti_job_id": job_id,
            "ganeti_backend_id": volume.machine.backend_id
        }
        metadata = {"backend_info": json.dumps(backend_info)}
        b.update_metadata(snapshot_id, metadata)

    snapshot = util.get_snapshot(user_id, snapshot_id)

    return snapshot
Beispiel #21
0
def show_resource_value(number, resource, style):
    resources = ResourceDict.get()
    resource_dict = resources.get(resource)
    unit = resource_dict.get('unit') if resource_dict else None
    return units.show(number, unit, style)
Beispiel #22
0
def pprint_clusters(clusters, stdout, detail=True):
    t_backend_cnt, t_vms = 0, 0
    t_nodes_cnt, t_vm_cap_cnt, t_offline_cnt, t_drained_cnt = 0, 0, 0, 0
    t_cpu = 0
    t_mused, t_mtotal = 0, 0
    t_dused, t_dtotal = 0, 0
    t_vcpu, t_vram, t_vdisk = 0, 0, 0
    for cluster_name, c_info in sorted(clusters.items()):
        t_backend_cnt += 1
        node_table = []
        c_nodes_cnt, c_vm_cap_cnt, c_offline_cnt, c_drained_cnt = 0, 0, 0, 0
        c_cpu = 0
        c_mfree, c_mused, c_mtotal = 0, 0, 0
        c_dfree, c_dused, c_dtotal = 0, 0, 0
        for node_name, n_info in sorted(c_info["nodes"].items()):
            c_nodes_cnt += 1
            if not n_info["vm_capable"]:
                continue
            c_vm_cap_cnt += 1
            drained, offline = n_info["drained"], n_info["offline"]
            state = "online"
            if c_info["offline"]:
                state = "offline"
            if c_info["drained"]:
                state += " (drained)"
            cpu = n_info["cpu"]
            ram, disk = n_info["ram"], n_info["disk"]
            mfree, mtotal = int(ram["free"]), int(ram["total"])
            mused = mtotal - mfree
            dfree, dtotal = int(disk["free"]), int(disk["total"])
            dused = dtotal - dfree
            if offline:
                c_offline_cnt += 1
            if drained:
                c_drained_cnt += 1
            c_mtotal += mtotal
            c_dtotal += dtotal
            if not offline:
                c_cpu += cpu
                c_mfree += mfree
                c_mused += mused
                c_dfree += dfree
                c_dused += dused
            mpercentage = ("%.2f%%" % (100 * mused / mtotal))\
                if mtotal != 0 else "-"
            dpercentage = ("%.2f%%" % (100 * dused / dtotal))\
                if dtotal != 0 else "-"
            node_table.append((node_name, state, n_info["instances"], cpu,
                               "%s/%s %s" % (units.show(mused, "bytes"),
                                             units.show(mtotal, "bytes"),
                                             mpercentage),
                               "%s/%s %s" % (units.show(dused, "bytes"),
                                             units.show(dtotal, "bytes"),
                                             dpercentage),))
        state = "online"
        if c_info["offline"]:
            state = "offline"
        if c_info["drained"]:
            state += " (drained)"
        virtual_cpu = c_info["virtual_cpu"]
        virtual_ram = c_info["virtual_ram"]
        virtual_disk = c_info["virtual_disk"]
        if not c_info["offline"]:
            t_cpu += c_cpu
            t_mused += c_mused
            t_mtotal += c_mtotal
            t_dused += c_dused
            t_dtotal += c_dtotal
            t_vcpu += virtual_cpu
            t_vdisk += virtual_disk
            t_vram += virtual_ram
            t_nodes_cnt += c_nodes_cnt
            t_vm_cap_cnt += c_vm_cap_cnt
            t_offline_cnt += c_offline_cnt
            t_drained_cnt += c_drained_cnt
            t_vms += int(c_info["virtual_servers"])
        if not detail:
            continue
        cluster_table = (
            ("Name", cluster_name),
            ("State", state),
            ("Nodes", "Total: %s, VM Capable: %s, Drained %s Offline: %s" %
                      (c_nodes_cnt, c_vm_cap_cnt, c_drained_cnt,
                       c_offline_cnt)),
            ("Disk Templates", ", ".join(c_info["disk_templates"])),
            ("Hypervisor", c_info["hypervisor"]),
            ("Instances", c_info["virtual_servers"]),
            ("Virtual CPUs", virtual_cpu),
            ("Physical CPUs", c_cpu),
            ("V/P CPUs", ("%.2f%%" % (100 * virtual_cpu / c_cpu))),
            ("Virtual RAM", units.show(virtual_ram, "bytes")),
            ("Physical RAM (used/total)",
                "%s/%s %s%%" % (units.show(c_mused, "bytes"),
                                units.show(c_mtotal, "bytes"),
                                ("%.2f%%" % (100 * c_mused / c_mtotal)
                                    if c_mtotal != 0 else "-"))),
            ("V/P used RAM", ("%.2f%%" % (100 * virtual_ram / c_mused)
                              if c_mused != 0 else "-")),
            ("V/P total RAM", ("%.2f%%" % (100 * virtual_ram / c_mtotal)
                               if c_mtotal != 0 else "-")),
            ("Virtual disk", units.show(virtual_disk, "bytes")),
            ("Physical Disk (used/total)",
                "%s/%s %s%%" % (units.show(c_dused, "bytes"),
                                units.show(c_dtotal, "bytes"),
                                ("%.2f%%" % (100 * c_dused/c_dtotal)
                                if c_dtotal != 0 else "-"))),
            ("V/P used disk", ("%.2f%%" % (100 * virtual_disk / c_dused)
                               if c_dused != 0 else "-")),
            ("V/P total disk", ("%.2f%%" % (100 * virtual_disk / c_dtotal)
                                if c_dtotal != 0 else "-")),
        )
        pprint_table(stdout, cluster_table, headers=None, separator=" | ",
                     title="Statistics for backend %s" % cluster_name)
        headers = ("Node Name", "State", "VMs",
                   "CPUs", "RAM (used/total)", "Disk (used/total)")
        pprint_table(stdout, node_table, headers, separator=" | ",
                     title="Statistics per node for backend %s" % cluster_name)
        stdout.write("\n")

    total_table = (
        ("Backend", t_backend_cnt),
        ("Nodes", "Total: %s, VM Capable: %s, Drained %s Offline: %s" %
                  (t_nodes_cnt, t_vm_cap_cnt, t_drained_cnt, t_offline_cnt)),
        ("Instances", t_vms),
        ("Virtual CPUs", t_vcpu),
        ("Physical CPUs", t_cpu),
        ("V/P CPUs", ("%.2f%%" % (100 * t_vcpu / t_cpu))),
        ("Virtual RAM", units.show(t_vram, "bytes")),
        ("Physical RAM (used/total)", "%s/%s %s%%" %
            (units.show(t_mused, "bytes"), units.show(t_mtotal, "bytes"),
             ("%.2f%%" % (100 * t_mused/t_mtotal) if t_mtotal != 0 else "-"))),
        ("V/P used RAM", ("%.2f%%" % (100 * t_vram / t_mused)
                          if t_mused != 0 else "-")),
        ("V/P total RAM", ("%.2f%%" % (100 * t_vram / t_mtotal)
                           if t_mtotal != 0 else "-")),
        ("Virtual disk", units.show(t_vdisk, "bytes")),
        ("Physical Disk (used/total)",
            "%s/%s %s%%" % (units.show(t_dused, "bytes"),
                            units.show(t_dtotal, "bytes"),
                            ("%.2f%%" % (100 * t_dused/t_dtotal)
                             if t_dtotal != 0 else "-"))),
        ("V/P used disk", ("%.2f%%" % (100 * t_vdisk / t_dused)
                           if t_dused != 0 else "-")),
        ("V/P total disk", ("%.2f%%" % (100 * t_vdisk / t_dtotal)
                            if t_dtotal != 0 else "-")),
    )
    pprint_table(stdout, total_table, headers=None, separator=" | ",
                 title="Statistics for all backends")
Beispiel #23
0
def pprint_servers(servers, stdout):
    # Print server stats per state
    per_state = []
    for state, stats in sorted(servers.items()):
        count = stats["count"]
        cpu = reduce(operator.add,
                     [int(k) * int(v) for k, v in stats["cpu"].items()], 0)
        ram = reduce(operator.add,
                     [int(k) * int(v) for k, v in stats["ram"].items()], 0)
        disk = 0
        for disk_template, disk_stats in stats["disk"].items():
            disk = reduce(operator.add,
                          [int(k) * int(v) for k, v in disk_stats.items()],
                          disk)
        per_state.append((state, count, cpu, units.show(ram, "bytes", "auto"),
                          units.show(disk, "bytes", "auto")))
    headers = ("State", "Servers", "CPUs", "RAM", "Disk")
    pprint_table(stdout,
                 per_state,
                 headers,
                 separator=" | ",
                 title="Servers Per Operational State")
    stdout.write("\n")

    # Print server stats per CPU
    per_cpu = []
    cpu_stats = defaultdict(dict)
    for state, stats in servers.items():
        for cpu, cpu_cnt in stats["cpu"].items():
            cpu_stats[cpu][state] = cpu_cnt
            cpu_stats[cpu]["total"] = \
                cpu_stats[cpu].setdefault("total", 0) + int(cpu_cnt)
    for cpu, _cpu_stats in sorted(cpu_stats.items()):
        per_cpu.append((cpu, _cpu_stats["total"], _cpu_stats.get("started", 0),
                        _cpu_stats.get("stopped",
                                       0), _cpu_stats.get("error", 0)))
    headers = ("CPUs", "Total", "Started", "Stopped", "Error")
    pprint_table(stdout,
                 per_cpu,
                 headers,
                 separator=" | ",
                 title="Servers Per CPU")
    stdout.write("\n")

    # Print server stats per RAM
    per_ram = []
    ram_stats = defaultdict(dict)
    for state, stats in servers.items():
        for ram, ram_cnt in stats["ram"].items():
            ram_stats[ram][state] = ram_cnt
            ram_stats[ram]["total"] = \
                ram_stats[ram].setdefault("total", 0) + int(ram_cnt)
    for ram, _ram_stats in sorted(ram_stats.items()):
        per_ram.append(
            (units.show(ram, "bytes", "auto"), _ram_stats["total"],
             _ram_stats.get("started",
                            0), _ram_stats.get("stopped",
                                               0), _ram_stats.get("error", 0)))
    headers = ("RAM", "Total", "Started", "Stopped", "Error")
    pprint_table(stdout,
                 per_ram,
                 headers,
                 separator=" | ",
                 title="Servers Per RAM")
    stdout.write("\n")

    # Print server stats per Disk Template
    per_disk_t = []
    disk_t_stats = defaultdict(dict)
    for state, stats in servers.items():
        for disk_t, disk_t_info in stats["disk"].items():
            disk_t_cnt = reduce(operator.add,
                                [v for v in disk_t_info.values()], 0)
            disk_t_stats[disk_t][state] = disk_t_cnt
            disk_t_stats[disk_t]["total"] = \
                disk_t_stats[disk_t].setdefault("total", 0) + int(disk_t_cnt)
    for disk_t, _disk_t_stats in sorted(disk_t_stats.items()):
        per_disk_t.append(
            (disk_t, _disk_t_stats["total"], _disk_t_stats.get("started", 0),
             _disk_t_stats.get("stopped", 0), _disk_t_stats.get("error", 0)))
    headers = ("Disk Template", "Total", "Started", "Stopped", "Error")
    pprint_table(stdout,
                 per_disk_t,
                 headers,
                 separator=" | ",
                 title="Servers Per Disk Template")
    stdout.write("\n")

    # Print server stats per Disk Template
    per_disk_t_size = []
    disk_template_sizes = defaultdict(dict)
    disk_sizes = set()
    for state, stats in servers.items():
        for disk_t, disk_t_info in stats["disk"].items():
            if disk_t not in disk_template_sizes:
                disk_template_sizes[disk_t] = defaultdict(int)
            for disk_size, vm_count in disk_t_info.items():
                disk_sizes.add(disk_size)
                disk_template_sizes[disk_t][disk_size] += vm_count
    disk_sizes = sorted(list(disk_sizes))

    for disk_t, disk_info in disk_template_sizes.items():
        _line = [disk_t]
        for size in disk_sizes:
            _line.append(disk_info[size])
        per_disk_t_size.append(_line)
    headers = ["Disk Template"] + map(lambda x: units.show(x, "bytes"),
                                      disk_sizes)
    pprint_table(stdout,
                 per_disk_t_size,
                 headers,
                 separator=" | ",
                 title="Servers per Disk Template and Disk Size")
    stdout.write("\n")

    # Print server stats per disk size
    per_disk = []
    disk_stats = defaultdict(dict)
    for state, stats in servers.items():
        for disk_t, disk_info in stats["disk"].items():
            for disk, disk_cnt in disk_info.items():
                if disk not in disk_stats:
                    disk_stats[disk] = defaultdict(dict)
                disk_stats[disk][state] = \
                    disk_stats[disk].setdefault(state, 0) + int(disk_cnt)
                disk_stats[disk]["total"] = \
                    disk_stats[disk].setdefault("total", 0) + int(disk_cnt)
    for disk, _disk_stats in sorted(disk_stats.items()):
        per_disk.append(
            (units.show(disk, "bytes", "auto"), _disk_stats["total"],
             _disk_stats.get("started", 0), _disk_stats.get("stopped", 0),
             _disk_stats.get("error", 0)))
    headers = ("Disk Size", "Total", "Started", "Stopped", "Error")
    pprint_table(stdout,
                 per_disk,
                 headers,
                 separator=" | ",
                 title="Servers Per Disk Size")
    stdout.write("\n")
Beispiel #24
0
def create(user_id, volume, name, description, metadata, force=False):
    """Create a snapshot from a given volume

    Create a snapshot from a given volume. The snapshot is first created as
    a file in Pithos, with specified metadata to indicate that it is a
    snapshot. Then a job is sent to Ganeti backend to create the actual
    snapshot of the volume.

    Snapshots are only supported for volumes of ext_ disk template. Also,
    the volume must be attached to some server.

    """

    if name is None:
        raise faults.BadRequest("Snapshot 'name' is required")

    # Check that taking a snapshot is feasible
    if volume.machine is None:
        raise faults.BadRequest("Cannot snapshot a detached volume!")
    if volume.status not in ["AVAILABLE", "IN_USE"]:
        raise faults.BadRequest("Cannot create snapshot while volume is in"
                                " '%s' status" % volume.status)

    volume_type = volume.volume_type
    if not volume_type.disk_template.startswith("ext_"):
        msg = ("Cannot take a snapshot from a volume with volume type '%s' and"
               " '%s' disk template" %
               (volume_type.id, volume_type.disk_template))
        raise faults.BadRequest(msg)

    # Increase the snapshot counter of the volume that is used in order to
    # generate unique snapshot names
    volume.snapshot_counter += 1
    volume.save()
    transaction.commit()

    snapshot_metadata = {
        "name": name,
        "disk_format": "diskdump",
        "container_format": "bare",
        # Snapshot specific
        "description": description,
        "volume_id": volume.id,
    }

    # Snapshots are used as images. We set the most important properties
    # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass
    # image customization. Also, we get some basic metadata for the volume from
    # the server that the volume is attached
    metadata.update({"exclude_all_tasks": "yes", "description": description})
    if volume.index == 0:
        # Copy the metadata of the VM into the image properties only when the
        # volume is the root volume of the VM.
        vm_metadata = dict(
            volume.machine.metadata.filter(
                meta_key__in=["OS", "users"]).values_list(
                    "meta_key", "meta_value"))
        metadata.update(vm_metadata)

    snapshot_properties = PlanktonBackend._prefix_properties(metadata)
    snapshot_metadata.update(snapshot_properties)

    # Generate a name for the Archipelago mapfile.
    mapfile = generate_mapfile_name(volume)

    # Convert size from Gbytes to bytes
    size = volume.size << 30

    with PlanktonBackend(user_id) as b:
        try:
            snapshot_id = b.register_snapshot(name=name,
                                              mapfile=mapfile,
                                              size=size,
                                              metadata=snapshot_metadata)
        except faults.OverLimit:
            msg = ("Resource limit exceeded for your account."
                   " Not enough storage space to create snapshot of"
                   " %s size." % units.show(size, "bytes", "gb"))
            raise faults.OverLimit(msg)

        try:
            job_id = backend.snapshot_instance(volume.machine,
                                               volume,
                                               snapshot_name=mapfile,
                                               snapshot_id=snapshot_id)
        except:
            # If failed to enqueue job to Ganeti, mark snapshot as ERROR
            b.update_snapshot_state(snapshot_id, OBJECT_ERROR)
            raise

        # Store the backend and job id as metadata in the snapshot in order
        # to make reconciliation based on the Ganeti job possible.
        backend_info = {
            "ganeti_job_id": job_id,
            "ganeti_backend_id": volume.machine.backend_id
        }
        metadata = {"backend_info": json.dumps(backend_info)}
        b.update_metadata(snapshot_id, metadata)

    snapshot = util.get_snapshot(user_id, snapshot_id)

    return snapshot
Beispiel #25
0
def do_create(user_id, volume_id, name, description, metadata, force=False,
              credentials=None):
    volume = util.get_volume(credentials, volume_id,
                             for_update=True, non_deleted=True,
                             exception=faults.BadRequest)
    _check(volume)
    snapshot_metadata = {
        "name": name,
        "disk_format": "diskdump",
        "container_format": "bare",
        # Snapshot specific
        "description": description,
        "volume_id": volume_id,
    }

    # Snapshots are used as images. We set the most important properties
    # that are being used for images. We set 'EXCLUDE_ALL_TASKS' to bypass
    # image customization. Also, we get some basic metadata for the volume from
    # the server that the volume is attached
    metadata.update({"exclude_all_tasks": "yes",
                     "description": description})
    if volume.index == 0:
        # Copy the metadata of the VM into the image properties only when the
        # volume is the root volume of the VM.
        vm_metadata = dict(volume.machine.metadata
                                         .filter(meta_key__in=["OS", "users"])
                                         .values_list("meta_key",
                                                      "meta_value"))
        metadata.update(vm_metadata)

    snapshot_properties = PlanktonBackend._prefix_properties(metadata)
    snapshot_metadata.update(snapshot_properties)

    # Generate a name for the Archipelago mapfile.
    mapfile = generate_mapfile_name(volume)

    # Convert size from Gbytes to bytes
    size = volume.size << 30

    with PlanktonBackend(user_id) as b:
        try:
            snapshot_id = b.register_snapshot(name=name,
                                              mapfile=mapfile,
                                              size=size,
                                              metadata=snapshot_metadata)
        except faults.OverLimit:
            msg = ("Resource limit exceeded for your account."
                   " Not enough storage space to create snapshot of"
                   " %s size." % units.show(size, "bytes", "gb"))
            raise faults.OverLimit(msg)

        try:
            job_id = backend.snapshot_instance(volume.machine, volume,
                                               snapshot_name=mapfile,
                                               snapshot_id=snapshot_id)
        except:
            # If failed to enqueue job to Ganeti, mark snapshot as ERROR
            b.update_snapshot_state(snapshot_id, OBJECT_ERROR)
            raise

        # Store the backend and job id as metadata in the snapshot in order
        # to make reconciliation based on the Ganeti job possible.
        backend_info = {
            "ganeti_job_id": job_id,
            "ganeti_backend_id": volume.machine.backend_id
        }
        metadata = {"backend_info": json.dumps(backend_info)}
        b.update_metadata(snapshot_id, metadata)

    snapshot = util.get_snapshot(user_id, snapshot_id)

    return snapshot
Beispiel #26
0
def pprint_servers(servers, stdout):
    # Print server stats per state
    per_state = []
    for state, stats in sorted(servers.items()):
        count = stats["count"]
        cpu = reduce(operator.add,
                     [int(k) * int(v) for k, v in stats["cpu"].items()], 0)
        ram = reduce(operator.add,
                     [int(k) * int(v) for k, v in stats["ram"].items()], 0)
        disk = 0
        for disk_template, disk_stats in stats["disk"].items():
            disk = reduce(operator.add,
                          [int(k) * int(v) for k, v in disk_stats.items()],
                          disk)
        per_state.append((state, count, cpu, units.show(ram, "bytes", "auto"),
                          units.show(disk, "bytes", "auto")))
    headers = ("State", "Servers", "CPUs", "RAM", "Disk")
    pprint_table(stdout, per_state, headers, separator=" | ",
                 title="Servers Per Operational State")
    stdout.write("\n")

    # Print server stats per CPU
    per_cpu = []
    cpu_stats = defaultdict(dict)
    for state, stats in servers.items():
        for cpu, cpu_cnt in stats["cpu"].items():
            cpu_stats[cpu][state] = cpu_cnt
            cpu_stats[cpu]["total"] = \
                cpu_stats[cpu].setdefault("total", 0) + int(cpu_cnt)
    for cpu, _cpu_stats in sorted(cpu_stats.items()):
        per_cpu.append((cpu, _cpu_stats["total"],
                        _cpu_stats.get("started", 0),
                        _cpu_stats.get("stopped", 0),
                        _cpu_stats.get("error", 0)))
    headers = ("CPUs", "Total", "Started", "Stopped", "Error")
    pprint_table(stdout, per_cpu, headers, separator=" | ",
                 title="Servers Per CPU")
    stdout.write("\n")

    # Print server stats per RAM
    per_ram = []
    ram_stats = defaultdict(dict)
    for state, stats in servers.items():
        for ram, ram_cnt in stats["ram"].items():
            ram_stats[ram][state] = ram_cnt
            ram_stats[ram]["total"] = \
                ram_stats[ram].setdefault("total", 0) + int(ram_cnt)
    for ram, _ram_stats in sorted(ram_stats.items()):
        per_ram.append((units.show(ram, "bytes", "auto"),
                        _ram_stats["total"],
                        _ram_stats.get("started", 0),
                        _ram_stats.get("stopped", 0),
                        _ram_stats.get("error", 0)))
    headers = ("RAM", "Total", "Started", "Stopped", "Error")
    pprint_table(stdout, per_ram, headers, separator=" | ",
                 title="Servers Per RAM")
    stdout.write("\n")

    # Print server stats per Disk Template
    per_disk_t = []
    disk_t_stats = defaultdict(dict)
    for state, stats in servers.items():
        for disk_t, disk_t_info in stats["disk"].items():
            disk_t_cnt = reduce(operator.add,
                                [v for v in disk_t_info.values()], 0)
            disk_t_stats[disk_t][state] = disk_t_cnt
            disk_t_stats[disk_t]["total"] = \
                disk_t_stats[disk_t].setdefault("total", 0) + int(disk_t_cnt)
    for disk_t, _disk_t_stats in sorted(disk_t_stats.items()):
        per_disk_t.append((disk_t, _disk_t_stats["total"],
                           _disk_t_stats.get("started", 0),
                           _disk_t_stats.get("stopped", 0),
                           _disk_t_stats.get("error", 0)))
    headers = ("Disk Template", "Total", "Started", "Stopped", "Error")
    pprint_table(stdout, per_disk_t, headers, separator=" | ",
                 title="Servers Per Disk Template")
    stdout.write("\n")

    # Print server stats per Disk Template
    per_disk_t_size = []
    disk_template_sizes = defaultdict(dict)
    disk_sizes = set()
    for state, stats in servers.items():
        for disk_t, disk_t_info in stats["disk"].items():
            if disk_t not in disk_template_sizes:
                disk_template_sizes[disk_t] = defaultdict(int)
            for disk_size, vm_count in disk_t_info.items():
                disk_sizes.add(disk_size)
                disk_template_sizes[disk_t][disk_size] += vm_count
    disk_sizes = sorted(list(disk_sizes))

    for disk_t, disk_info in disk_template_sizes.items():
        _line = [disk_t]
        for size in disk_sizes:
            _line.append(disk_info[size])
        per_disk_t_size.append(_line)
    headers = ["Disk Template"] + map(lambda x: units.show(x, "bytes"),
                                      disk_sizes)
    pprint_table(stdout, per_disk_t_size, headers, separator=" | ",
                 title="Servers per Disk Template and Disk Size")
    stdout.write("\n")

    # Print server stats per disk size
    per_disk = []
    disk_stats = defaultdict(dict)
    for state, stats in servers.items():
        for disk_t, disk_info in stats["disk"].items():
            for disk, disk_cnt in disk_info.items():
                if disk not in disk_stats:
                    disk_stats[disk] = defaultdict(dict)
                disk_stats[disk][state] = \
                    disk_stats[disk].setdefault(state, 0) + int(disk_cnt)
                disk_stats[disk]["total"] = \
                    disk_stats[disk].setdefault("total", 0) + int(disk_cnt)
    for disk, _disk_stats in sorted(disk_stats.items()):
        per_disk.append((units.show(disk, "bytes", "auto"),
                         _disk_stats["total"],
                         _disk_stats.get("started", 0),
                         _disk_stats.get("stopped", 0),
                         _disk_stats.get("error", 0)))
    headers = ("Disk Size", "Total", "Started", "Stopped", "Error")
    pprint_table(stdout, per_disk, headers, separator=" | ",
                 title="Servers Per Disk Size")
    stdout.write("\n")
Beispiel #27
0
    def resource_policies(self):
        policies = []
        append = policies.append
        resource_indexes = {}
        include_diffs = False
        is_new = self.instance and self.instance.id is None

        existing_policies = []
        existing_data = {}

        # normalize to single values dict
        data = dict()
        for key, value in self.data.iteritems():
            data[key] = value

        if not is_new:
            # User may have emptied some fields. Empty values are not handled
            # below. Fill data as if user typed "0" in field, but only
            # for resources which exist in application project and have
            # non-zero capacity (either for member or project).
            include_diffs = True
            existing_policies = self.instance.resource_set
            append_groups = set()
            for policy in existing_policies:
                cap_set = max(policy.project_capacity, policy.member_capacity)

                if not policy.resource.ui_visible:
                    continue

                rname = policy.resource.name
                group = policy.resource.group
                existing_data["%s_p_uplimit" % rname] = "0"
                existing_data["%s_m_uplimit" % rname] = "0"
                append_groups.add(group)

            for key, value in existing_data.iteritems():
                if not key in data or data.get(key, '') == '':
                    data[key] = value
            for group in append_groups:
                data["is_selected_%s" % group] = "1"

        for name, value in data.iteritems():

            if not value:
                continue

            if name.endswith('_uplimit'):
                is_project_limit = name.endswith('_p_uplimit')
                suffix = '_p_uplimit' if is_project_limit else '_m_uplimit'
                if value == 'inf' or value == 'Unlimited':
                    value = units.PRACTICALLY_INFINITE
                uplimit = value
                prefix, _suffix = name.split(suffix)

                try:
                    resource = Resource.objects.get(name=prefix)
                except Resource.DoesNotExist:
                    raise forms.ValidationError("Resource %s does not exist" %
                                                resource.name)

                if is_project_limit:
                    member_limit = data.get(prefix + '_m_uplimit')
                    try:
                        pvalue = int(value)
                        mvalue = int(member_limit)
                    except:
                        raise forms.ValidationError("Invalid format")
                else:
                    project_limit = data.get(prefix + '_p_uplimit')
                    try:
                        mvalue = int(value)
                        pvalue = int(project_limit)
                    except:
                        raise forms.ValidationError("Invalid format")

                if mvalue > pvalue:
                    msg = "%s per member limit exceeds total limit"
                    raise forms.ValidationError(msg % resource.name)

                # keep only resource limits for selected resource groups
                if data.get('is_selected_%s' % \
                                     resource.group, "0") == "1":
                    if not resource.ui_visible:
                        raise forms.ValidationError("Invalid resource %s" %
                                                    resource.name)
                    d = model_to_dict(resource)
                    try:
                        uplimit = long(uplimit)
                    except ValueError:
                        m = "Limit should be an integer"
                        raise forms.ValidationError(m)

                    display = units.show(uplimit, resource.unit)
                    if display == "inf":
                        display = "Unlimited"

                    handled = resource_indexes.get(prefix)

                    diff_data = None
                    if include_diffs:
                        try:
                            policy = existing_policies.get(resource=resource)
                            if is_project_limit:
                                pval = policy.project_capacity
                            else:
                                pval = policy.member_capacity

                            if pval != uplimit:
                                diff = pval - uplimit

                                diff_display = units.show(abs(diff),
                                                          resource.unit,
                                                          inf="Unlimited")
                                diff_is_inf = False
                                prev_is_inf = False
                                if uplimit == units.PRACTICALLY_INFINITE:
                                    diff_display = "Unlimited"
                                    diff_is_inf = True
                                if pval == units.PRACTICALLY_INFINITE:
                                    diff_display = "Unlimited"
                                    prev_is_inf = True

                                prev_display = units.show(pval, resource.unit,
                                                          inf="Unlimited")

                                diff_data = {
                                    'prev': pval,
                                    'prev_display': prev_display,
                                    'diff': diff,
                                    'diff_display': diff_display,
                                    'increased': diff < 0,
                                    'diff_is_inf': diff_is_inf,
                                    'prev_is_inf': prev_is_inf,
                                    'operator': '+' if diff < 0 else '-'
                                }

                        except:
                            pass

                    if is_project_limit:
                        d.update(dict(resource=prefix,
                                      p_uplimit=uplimit,
                                      display_p_uplimit=display))

                        if diff_data:
                            d.update(dict(resource=prefix, p_diff=diff_data))

                        if not handled:
                            d.update(dict(resource=prefix, m_uplimit=0,
                                      display_m_uplimit=units.show(0,
                                           resource.unit)))
                    else:
                        d.update(dict(resource=prefix, m_uplimit=uplimit,
                                      display_m_uplimit=display))

                        if diff_data:
                            d.update(dict(resource=prefix, m_diff=diff_data))

                        if not handled:
                            d.update(dict(resource=prefix, p_uplimit=0,
                                      display_p_uplimit=units.show(0,
                                           resource.unit)))

                    if resource_indexes.get(prefix, None) is not None:
                        # already included in policies
                        handled.update(d)
                    else:
                        # keep track of resource dicts
                        append(d)
                        resource_indexes[prefix] = d

        ordered_keys = presentation.RESOURCES['resources_order']

        def resource_order(r):
            if r['str_repr'] in ordered_keys:
                return ordered_keys.index(r['str_repr'])
            else:
                return -1

        policies = sorted(policies, key=resource_order)
        return policies