示例#1
0
def aws_cpu_days_of_the_week_usage_m(accounts):
    """---
    get:
        tags:
            - aws
        produces:
            - application/json
        description: &desc AWS CPU usage by days of the week
        summary: *desc
        responses:
            200:
                description: List of days
                schema:
                    properties:
                        hours:
                            type: array
                            items:
                                properties:
                                    day:
                                        type: string
                                    cpu:
                                        type: number
            403:
                description: Not logged in
            404:
                description: AWS account not registered
    """
    days = AWSMetric.days_of_the_week_cpu_usage(account.key
                                                for account in accounts)
    if not days:
        return jsonify(message=get_next_update_estimation_message_aws(
            accounts, AWS_KEY_PROCESSING_INTERVAL_HOURS))
    return jsonify(dict(hours=days))
示例#2
0
文件: stats.py 项目: bastienk/trackit
 def get_account_data(account):
     for date, cpu_usage in dict(
             AWSMetric.daily_cpu_utilization(account.key)).iteritems():
         yield (date, cpu_usage, None)
     for date, cost in dict(
             AWSDetailedLineitem.get_ec2_daily_cost(
                 account.get_aws_user_id())).iteritems():
         yield (date, None, cost)
示例#3
0
def aws_underutilized_resources_reduced_cost(accounts):
    now = datetime.utcnow()
    date_from = now.replace(hour=0, minute=0, second=0, microsecond=0) - relativedelta(months=6)
    date_to = now.replace(hour=23, minute=59, second=59, microsecond=999999)

    resources = AWSMetric.underutilized_resources(account.key for account in accounts)
    resource_ids = set(r['id'] for r in resources['resources'])
    months = AWSDetailedLineitem.get_monthly_cost_by_resource(resource_ids, date_from=date_from, date_to=date_to)
    res = {  # Simply multiply every cost by 20% as all instances usage is
        k: v * 0.2  # less than 20%. TODO: intelligently find the best type
        for k, v in months.iteritems()
    }
    return jsonify(res)
示例#4
0
def get_daily_cpu_usage_by_tag(session, key):
    tags = defaultdict(lambda : defaultdict(list))
    res = defaultdict(list)
    for region, instance in get_all_instances(session):
        if instance.tags:
            for tag in instance.tags:
                if tag['Key'] != 'Name' and not tag['Key'].startswith('aws:'):
                    tags[tag['Key']][tag['Value']].append(region+'/'+instance.id)
    for tag_key, tag_values_dict in tags.iteritems():
        for tag_value, instance_ids in tag_values_dict.iteritems():
            usage = AWSMetric.days_of_the_week_cpu_usage(key, resources=instance_ids)
            if usage:
                res[tag_key].append(dict(tag_value=tag_value, usage=usage, nb_instances=len(instance_ids)))
    for tag_key, values in res.iteritems():
        res[tag_key] = sorted(res[tag_key], key=lambda x: x['nb_instances'], reverse=True)[:10]
    return dict(tags=res)
示例#5
0
def get_s3_space_usage(account):
    res = []
    s3_estimator = S3CostEstimator()
    s3_space_usage = list(AWSMetric.get_s3_space_usage(account.key))
    for name, value in s3_space_usage:
        tmp = name.split('/', 2)
        if len(tmp) == 3:
            storage_type = 'infrequent_access' if tmp[2] == 'StandardIAStorage' else 'standard'
            storage_standard = 0 if tmp[2] == 'StandardIAStorage' else value
            storage_dra = 0 if tmp[2] == 'StandardStorage' else value
            args = dict(
                storage_standard=storage_standard,
                storage_dra=storage_dra
            )
            prices = []
            args.update(region=tmp[0])
            prices.append(dict(provider='aws', cost=s3_estimator.estimate(**args)['total'] / 1000.0))
            del args['region']
            prices.append(dict(provider='gcloud', cost=google_storage.current_model(**args)['total'] / 1000.0))
            prices.append(dict(provider='azure', cost=azure_estimate_cost(**args)['total'] / 1000.0))
            res.append(dict(location=tmp[0], name=tmp[1], type=storage_type, provider='aws', used_space=value, prices=prices))
    return dict(buckets=res)
示例#6
0
 def get_bucket_metrics_records():
     for metric in get_bucket_metrics(session, since):
         id = checksum(str(metric['time']), metric['metric'],
                       metric['resource'])
         yield AWSMetric(meta={'id': id}, key=key.key, **metric)
示例#7
0
    aws_account = AWSKey.query.get(aws_account.id)
    res = []
    try:
        session = boto3.Session(aws_access_key_id=aws_account.key,
                                aws_secret_access_key=aws_account.secret)
    except Exception, e:
        logging.error("[user={}][key={}] {}".format(aws_account.user.email, aws_account.pretty or aws_account.key, str(e)))
        aws_credentials_error(aws_account, traceback.format_exc())
        aws_account.error_status = u"bad_key"
        db.session.commit()
        return

    try:
        aws_pricing_data = {(f['instanceType'], f['location'], f['operatingSystem']): f for f in get_pricing_data() if 'HostBoxUsage' not in f.get('usagetype', '')}

        cpu_usage = {key: value for key, value in AWSMetric.get_cpu_usage(aws_account.key)}
        instance_iops_usage = {
            'read': {key: value for key, value in AWSMetric.get_instance_read_iops_usage(aws_account.key)},
            'write': {key: value for key, value in AWSMetric.get_instance_write_iops_usage(aws_account.key)}
        }
        instance_bytes_usage = {
            'read': {key: value for key, value in AWSMetric.get_instance_read_bytes_usage(aws_account.key)},
            'write': {key: value for key, value in AWSMetric.get_instance_write_bytes_usage(aws_account.key)}
        }
        volume_iops_usage = {
            'read': {key: value for key, value in AWSMetric.get_volume_read_iops_usage(aws_account.key)},
            'write': {key: value for key, value in AWSMetric.get_volume_write_iops_usage(aws_account.key)}
        }
        volume_bytes_usage = {
            'read': {key: value for key, value in AWSMetric.get_volume_read_bytes_usage(aws_account.key)},
            'write': {key: value for key, value in AWSMetric.get_volume_write_bytes_usage(aws_account.key)}
示例#8
0
文件: stats.py 项目: bastienk/trackit
def aws_underutilized_resources(accounts):
    return jsonify(
        AWSMetric.underutilized_resources(account.key for account in accounts))
示例#9
0
def compute_reservation_forecast(keys):
    if isinstance(keys, models.AWSKey):
        keys = [keys]
    elif not isinstance(keys, list):
        keys = list(keys)
    if not all(isinstance(k, models.AWSKey) for k in keys):
        raise TypeError('All keys must be AWSKey.')
    now = datetime.utcnow().replace(hour=0, minute=0, second=0, microsecond=0)
    range_end = now.replace(hour=0, minute=0, second=0, microsecond=0)
    range_end -= timedelta(days=1)
    range_start = range_end - timedelta(days=120)
    range_start = range_start.replace(day=1)
    s = AWSDetailedLineitem.get_instance_type(
        [k.get_aws_user_id() for k in keys],
        date_from=range_start,
        date_to=range_end)
    instance_type_hours = defaultdict(list)
    first_hour = datetime(2099, 1, 1)
    for r in s:
        rhour = datetime.strptime(r['hour'], "%Y-%m-%dT%H:%M:%S")
        if r['region'] != 'unknown':  # Some EC2 instances have no region, sometimes...
            instance_type_hours[(r['region'], r['instance'])].append(
                (rhour, r['ridCount']))
            first_hour = min(first_hour, rhour)
    hours_ahead = 120 * 24
    total_hours = (range_end -
                   first_hour).total_seconds() / 3600 - 1 + hours_ahead

    instance_types = []
    lookup = get_instance_lookup()
    for (region, instance_type), hours in instance_type_hours.iteritems():
        hours = count_forecast(hours, range_start, now, hours_ahead)
        prices = lookup[region, instance_type]
        price_results = get_monthly_prices(
            total_hours, hours, [p['amortized'] for p in prices['reserved']],
            prices['ondemand']['amortized'])
        ps = []
        for pricing, (_, count,
                      months) in zip(prices['reserved'] + [prices['ondemand']],
                                     price_results):
            pricing = dict(pricing)
            if count is not None:
                pricing['count'] = count
            pricing['months'] = [
                dict(month=m.strftime('%Y-%m'), cost=c) for m, c in months[:-1]
            ]
            ps.append(pricing)
        instance_types.append(
            dict(region=region, type=instance_type, pricing_options=ps))
    available_volumes = AWSStat.latest_available_volumes([k.key for k in keys])
    now = datetime.utcnow()
    date_from = now.replace(hour=0, minute=0, second=0,
                            microsecond=0) - relativedelta(months=6)
    date_to = now.replace(hour=23, minute=59, second=59, microsecond=999999)
    volume_monthly_costs = AWSDetailedLineitem.get_monthly_cost_by_resource(
        available_volumes['volumes'] if 'volumes' in available_volumes else (),
        date_from=date_from,
        date_to=date_to)
    resources = AWSMetric.underutilized_resources([k.key for k in keys])
    rids = set(r['id'] for r in resources['resources'])
    months = AWSDetailedLineitem.get_monthly_cost_by_resource(
        rids, date_from=date_from, date_to=date_to)
    reduced_instance_costs = {k: v * 0.2 for k, v in months.iteritems()}

    return dict(
        instances=instance_types,
        volume_monthly_costs=volume_monthly_costs,
        reduced_instance_costs=reduced_instance_costs,
    )