Пример #1
0
def _ec2_region(region, cred):
    access_key = cryptotool.decrypt_scalr(app.crypto_key, cred['access_key'])
    secret_key = cryptotool.decrypt_scalr(app.crypto_key, cred['secret_key'])
    kwds = {
        'aws_access_key_id': access_key,
        'aws_secret_access_key': secret_key
    }
    if app.scalr_config.get('aws', {}).get('use_proxy',
                                           False) in [True, 'yes']:
        if app.scalr_config['connections'].get('proxy', {}).get(
                'use_on', 'both') in ['both', 'scalr']:
            kwds['proxy'] = app.scalr_config['connections']['proxy']['host']
            kwds['proxy_port'] = app.scalr_config['connections']['proxy'][
                'port']
            kwds['proxy_user'] = app.scalr_config['connections']['proxy'][
                'user']
            kwds['proxy_pass'] = app.scalr_config['connections']['proxy'][
                'pass']
    conn = boto.ec2.connect_to_region(region, **kwds)
    cloud_nodes = _ec2_get_only_instances(conn)
    timestamp = int(time.time())
    nodes = list()
    for cloud_node in cloud_nodes:
        node = {
            'instance_id': cloud_node.id,
            'instance_type': cloud_node.instance_type,
            'os': cloud_node.platform if cloud_node.platform else 'linux'
        }
        nodes.append(node)
    return {
        'region': region,
        'timestamp': timestamp,
        'nodes': nodes
    } if nodes else dict()
Пример #2
0
def _ec2_region(region, cred):
    try:
        access_key = cryptotool.decrypt_scalr(app.crypto_key, cred["access_key"])
        secret_key = cryptotool.decrypt_scalr(app.crypto_key, cred["secret_key"])
        kwds = {"aws_access_key_id": access_key, "aws_secret_access_key": secret_key}
        proxy_settings = app.proxy_settings.get(cred.platform, {})
        kwds["proxy"] = proxy_settings.get("host")
        kwds["proxy_port"] = proxy_settings.get("port")
        kwds["proxy_user"] = proxy_settings.get("user")
        kwds["proxy_pass"] = proxy_settings.get("pass")

        msg = "List nodes for platform: 'ec2', region: '{}', envs_ids: {}"
        msg = msg.format(region, cred.envs_ids)
        LOG.debug(msg)

        conn = boto.ec2.connect_to_region(region, **kwds)
        cloud_nodes = _ec2_get_only_instances(conn)
        timestamp = int(time.time())
        nodes = list()
        for cloud_node in cloud_nodes:
            node = {
                "instance_id": cloud_node.id,
                "instance_type": cloud_node.instance_type,
                "os": cloud_node.platform if cloud_node.platform else "linux",
            }
            nodes.append(node)
        return {"region": region, "timestamp": timestamp, "nodes": nodes} if nodes else dict()
    except:
        e = sys.exc_info()[1]
        msg = "platform: '{platform}', region: '{region}', envs_ids: {envs_ids}. Reason: {error}"
        msg = msg.format(
            platform=cred.platform, region=region, envs_ids=cred.envs_ids, error=helper.exc_info(where=False)
        )
        _handle_exception(e, msg)
Пример #3
0
def _eucalyptus(cred):
    access_key = cryptotool.decrypt_scalr(app.crypto_key, cred['access_key'])
    secret_key = cryptotool.decrypt_scalr(app.crypto_key, cred['secret_key'])
    ec2_url = cryptotool.decrypt_scalr(app.crypto_key, cred['ec2_url'])
    url = urlparse.urlparse(ec2_url)
    splitted_netloc = url.netloc.split(':')
    host = splitted_netloc[0]
    try:
        port = splitted_netloc[1]
    except:
        port = None
    path = url.path
    region = 'eucalyptus'
    region_info = boto.ec2.regioninfo.RegionInfo(name=region, endpoint=host)
    conn = boto.connect_ec2(aws_access_key_id=access_key,
                            aws_secret_access_key=secret_key,
                            is_secure=False,
                            port=port,
                            path=path,
                            region=region_info)
    cloud_nodes = _ec2_get_only_instances(conn)
    timestamp = int(time.time())
    nodes = list()
    for cloud_node in cloud_nodes:
        node = {
            'instance_id': cloud_node.id,
            'instance_type': cloud_node.instance_type,
            'os': cloud_node.platform if cloud_node.platform else 'linux'
        }
        nodes.append(node)
    return {
        'region': cred['group'],
        'timestamp': timestamp,
        'nodes': nodes
    } if nodes else dict()
Пример #4
0
def _ec2_region(region, cred):
    access_key = cryptotool.decrypt_scalr(app.crypto_key, cred['access_key'])
    secret_key = cryptotool.decrypt_scalr(app.crypto_key, cred['secret_key'])
    kwds = {
        'aws_access_key_id': access_key,
        'aws_secret_access_key': secret_key
    }
    if app.scalr_config.get('aws', {}).get('use_proxy', False) in [True, 'yes']:
        if app.scalr_config['connections'].get('proxy', {}).get('use_on', 'both') in ['both', 'scalr']:
            kwds['proxy'] = app.scalr_config['connections']['proxy']['host']
            kwds['proxy_port'] = app.scalr_config['connections']['proxy']['port']
            kwds['proxy_user'] = app.scalr_config['connections']['proxy']['user']
            kwds['proxy_pass'] = app.scalr_config['connections']['proxy']['pass']
    conn = boto.ec2.connect_to_region(region, **kwds)
    cloud_nodes = _ec2_get_only_instances(conn)
    timestamp = int(time.time())
    nodes = list()
    for cloud_node in cloud_nodes:
        node = {
            'instance_id': cloud_node.id,
            'instance_type': cloud_node.instance_type,
            'os': cloud_node.platform if cloud_node.platform else 'linux'
        }
        nodes.append(node)
    return {
        'region': region,
        'timestamp': timestamp,
        'nodes': nodes
    } if nodes else dict()
Пример #5
0
def _cloudstack(cred):
    try:
        result = list()
        api_key = cryptotool.decrypt_scalr(app.crypto_key, cred['api_key'])
        secret_key = cryptotool.decrypt_scalr(app.crypto_key,
                                              cred['secret_key'])
        api_url = cryptotool.decrypt_scalr(app.crypto_key, cred['api_url'])
        url = urlparse.urlparse(api_url)
        splitted_netloc = url.netloc.split(':')
        host = splitted_netloc[0]
        try:
            port = splitted_netloc[1]
        except:
            port = 443 if url.scheme == 'https' else None
        path = url.path
        secure = url.scheme == 'https'

        cls = get_driver(Provider.CLOUDSTACK)
        driver = cls(key=api_key,
                     secret=secret_key,
                     host=host,
                     port=port,
                     path=path,
                     secure=secure)

        proxy_url = app.proxy_settings.get(cred.platform, {}).get('url')
        driver.connection.set_http_proxy(proxy_url=proxy_url)

        locations = driver.list_locations()
        cloud_nodes = _libcloud_list_nodes(driver)
        timestamp = int(time.time())
        for location in locations:
            nodes = list()
            for cloud_node in cloud_nodes:
                if cloud_node.state != NodeState.RUNNING or cloud_node.extra[
                        'zone_id'] != location.id:
                    continue
                node = {
                    'instance_id': cloud_node.id,
                    'instance_type': cloud_node.extra['size_id'],
                    'os': None
                }
                nodes.append(node)
            if nodes:
                result.append({
                    'region': location.name,
                    'timestamp': timestamp,
                    'nodes': nodes
                })
        return result
    except:
        e = sys.exc_info()[1]
        msg = "platform: '{platform}', envs_ids: {envs_ids}. Reason: {error}"
        msg = msg.format(platform=cred.platform,
                         envs_ids=cred.envs_ids,
                         error=helper.exc_info(where=False))
        _handle_exception(e, msg)
Пример #6
0
def _gce_key(cred):
    if 'json_key' in cred:
        key = json.loads(cryptotool.decrypt_scalr(app.crypto_key, cred['json_key']))['private_key']
    else:
        key = cryptotool.decrypt_scalr(app.crypto_key, cred['key'])
        # convert pkcs12 to rsa
        out, err, ret_code = helper.call(
            "openssl pkcs12 -nodes -nocerts -passin pass:notasecret | openssl rsa",
            input=binascii.a2b_base64(key),
            shell=True
        )
        key = out.strip()
    return key
Пример #7
0
def _gce_key(cred):
    if cred.get('json_key'):
        key = json.loads(
            cryptotool.decrypt_scalr(app.crypto_key,
                                     cred['json_key']))['private_key']
    else:
        key = cryptotool.decrypt_scalr(app.crypto_key, cred['key'])
        # convert pkcs12 to rsa
        out, err, ret_code = helper.call(
            "openssl pkcs12 -nodes -nocerts -passin pass:notasecret | openssl rsa",
            input=binascii.a2b_base64(key),
            shell=True)
        key = out.strip()
    return key
Пример #8
0
def _gce_zone(zone, key, cred):
    try:
        conn, http = _gce_conn(cred, key=key)
        project_id = cryptotool.decrypt_scalr(app.crypto_key, cred["project_id"])
        request = conn.instances().list(project=project_id, zone=zone, filter="status eq RUNNING")
        resp = request.execute(http=http)
        timestamp = int(time.time())
        cloud_nodes = resp["items"] if "items" in resp else []
        nodes = list()
        for cloud_node in cloud_nodes:
            node = {
                "instance_id": cloud_node["id"],
                "server_name": cloud_node["name"],
                "instance_type": cloud_node["machineType"].split("/")[-1],
                "os": None,
            }
            for item in cloud_node["metadata"].get("items", []):
                meta = dict(tuple(element.split("=", 1)) for element in item["value"].split(";") if "=" in element)
                if "serverid" in meta:
                    node["server_id"] = meta["serverid"]
                if "env_id" in meta:
                    node["env_id"] = int(meta["env_id"])
                    break
            nodes.append(node)
        return {"region": zone, "timestamp": timestamp, "nodes": nodes} if nodes else dict()
    except:
        e = sys.exc_info()[1]
        msg = "platform: '{platform}', zone: '{zone}', envs_ids: {envs_ids}. Reason: {error}"
        msg = msg.format(platform=cred.platform, zone=zone, envs_ids=cred.envs_ids, error=helper.exc_info(where=False))
        _handle_exception(e, msg)
Пример #9
0
def _openstack_cred(cred):
    username = cryptotool.decrypt_scalr(app.crypto_key, cred["username"])
    if "password" in cred:
        password = cryptotool.decrypt_scalr(app.crypto_key, cred["password"])
        auth_version = "2.0_password"
    else:
        password = cryptotool.decrypt_scalr(app.crypto_key, cred["api_key"])
        auth_version = "2.0_apikey"
    keystone_url = cryptotool.decrypt_scalr(app.crypto_key, cred["keystone_url"])
    if not keystone_url.rstrip("/").endswith("/tokens"):
        keystone_url = os.path.join(keystone_url, "tokens")
    if "tenant_name" in cred:
        tenant_name = cryptotool.decrypt_scalr(app.crypto_key, cred["tenant_name"])
    else:
        tenant_name = None
    return username, password, auth_version, keystone_url, tenant_name
Пример #10
0
def get_s3_conn(cred):
    access_key = cryptotool.decrypt_scalr(app.crypto_key, cred['access_key'])
    secret_key = cryptotool.decrypt_scalr(app.crypto_key, cred['secret_key'])
    kwds = {
        'aws_access_key_id': access_key,
        'aws_secret_access_key': secret_key
    }
    use_proxy = app.scalr_config.get('aws', {}).get('use_proxy', False)
    use_on = app.scalr_config['connections'].get('proxy', {}).get('use_on', 'both')
    if use_proxy in [True, 'yes'] and use_on in ['both', 'scalr']:
        kwds['proxy'] = app.scalr_config['connections']['proxy']['host']
        kwds['proxy_port'] = app.scalr_config['connections']['proxy']['port']
        kwds['proxy_user'] = app.scalr_config['connections']['proxy']['user']
        kwds['proxy_pass'] = app.scalr_config['connections']['proxy']['pass']
    conn = boto.connect_s3(**kwds)
    return conn
Пример #11
0
def gce(cred):
    """
    :returns: list
        [{'region': str, 'timestamp': int, 'nodes': list}]
    """

    result = list()

    project_id = cryptotool.decrypt_scalr(app.crypto_key, cred['project_id'])
    key = _gce_key(cred)
    conn, http = _gce_conn(cred, key=key)
    request = conn.zones().list(project=project_id)
    resp = request.execute(http=http)
    zones = [_['name'] for _ in resp['items']] if 'items' in resp else []

    app.pool.wait()
    async_results = dict(
        (zone, app.pool.apply_async(_gce_zone, args=(zone, key, cred,)))
        for zone in zones
    )
    gevent.sleep(0)  # force switch
    for zone, async_result in async_results.iteritems():
        try:
            zone_nodes = async_result.get(timeout=app.config['cloud_connection_timeout'] + 1)
            if zone_nodes:
                result.append(zone_nodes)
        except gevent.timeout.Timeout:
            async_result.kill()
            msg = 'platform: {platform}, zone: {zone}, env_id: {env_id}, reason: timeout'
            msg = msg.format(platform=cred.platform, zone=zone, env_id=cred.env_id)
            LOG.warning(msg)
    return result
Пример #12
0
def db_update(sorted_data, envs_ids, cred):
    platform = cred.platform

    for env_id in envs_ids:
        for region_data in sorted_data:
            try:
                sid = uuid.uuid4()
                if platform == 'ec2':
                    cloud_account = cred.get('account_id')
                else:
                    cloud_account = None

                if analytics.url_key_map[platform]:
                    url = urlparse.urlparse(
                        cryptotool.decrypt_scalr(
                            app.crypto_key,
                            cred[analytics.url_key_map[platform]]).rstrip('/'))
                    url = '%s%s' % (url.netloc, url.path)
                else:
                    url = ''

                query = ("SELECT client_id "
                         "FROM client_environments "
                         "WHERE id={env_id}").format(env_id=env_id)
                results = app.scalr_db.execute(query, retries=1)
                account_id = results[0]['client_id']

                query = (
                    "INSERT IGNORE INTO poller_sessions "
                    "(sid, account_id, env_id, dtime, platform, url, cloud_location, cloud_account) "
                    "VALUES "
                    "(UNHEX('{sid}'), {account_id}, {env_id}, '{dtime}', '{platform}', '{url}',"
                    "'{cloud_location}', '{cloud_account}')").format(
                        sid=sid.hex,
                        account_id=account_id,
                        env_id=env_id,
                        dtime=time.strftime(
                            "%Y-%m-%d %H:%M:%S",
                            time.gmtime(region_data['timestamp'])),
                        platform=platform,
                        url=url,
                        cloud_location=region_data['region'],
                        cloud_account=cloud_account)
                app.analytics_db.execute(query, retries=1)

                # managed
                for managed in region_data['managed']:
                    if managed['env_id'] != env_id:
                        continue
                    query = (
                        "INSERT IGNORE INTO managed "
                        "(sid, server_id, instance_type, os) VALUES "
                        "(UNHEX('{sid}'), UNHEX('{server_id}'), '{instance_type}', {os})"
                    ).format(sid=sid.hex,
                             server_id=uuid.UUID(managed['server_id']).hex,
                             instance_type=managed['instance_type'],
                             os=managed['os'])
                    app.analytics_db.execute(query, retries=1)
            except:
                helper.handle_error(message='Database update failed')
Пример #13
0
    def download_aws_billing_file(self, cred, bucket_name, date=None):
        if date is None:
            date = datetime.datetime.utcnow().date()

        conn = get_s3_conn(cred)
        bucket = conn.get_bucket(bucket_name)
        account_id = cryptotool.decrypt_scalr(app.crypto_key,
                                              cred['account_id'])
        file_name = get_aws_csv_file_name(account_id, date)
        key = bucket.get_key(file_name)

        if not key:
            msg = "AWS detailed billing CSV file {0} wasn't found in bucket {1}"
            msg = msg.format(file_name, bucket_name)
            if datetime.datetime.utcnow().day == 1:
                LOG.warning(msg)
                return None
            else:
                raise Exception(msg)

        last_modified_dt = datetime.datetime.strptime(
            key.last_modified, self.last_modified_format)
        update_interval = self.config['interval']
        utcnow = datetime.datetime.utcnow()
        delta = datetime.timedelta(seconds=update_interval)
        condition1 = utcnow > last_modified_dt and utcnow < last_modified_dt + delta
        condition2 = ((utcnow - last_modified_dt).seconds / 3600) % 8 == 0
        if condition1 or condition2:
            local_file_path = os.path.join(self.tmp_dir, file_name)
            LOG.debug('Downloading {0}'.format(file_name))
            key.get_contents_to_filename(local_file_path)
            return local_file_path
        else:
            return None
Пример #14
0
def _openstack_cred(cred):
    username = cryptotool.decrypt_scalr(app.crypto_key, cred['username'])
    if 'password' in cred:
        password = cryptotool.decrypt_scalr(app.crypto_key, cred['password'])
        auth_version = '2.0_password'
    else:
        password = cryptotool.decrypt_scalr(app.crypto_key, cred['api_key'])
        auth_version = '2.0_apikey'
    keystone_url = cryptotool.decrypt_scalr(app.crypto_key, cred['keystone_url'])
    if not keystone_url.rstrip('/').endswith('/tokens'):
        keystone_url = os.path.join(keystone_url, 'tokens')
    if 'tenant_name' in cred:
        tenant_name = cryptotool.decrypt_scalr(app.crypto_key, cred['tenant_name'])
    else:
        tenant_name = None
    return username, password, auth_version, keystone_url, tenant_name
Пример #15
0
def _gce_zone(zone, cred):
    conn, http = _gce_conn(cred)
    project_id = cryptotool.decrypt_scalr(CRYPTO_KEY, cred['project_id'])
    request = conn.instances().list(
        project=project_id,
        zone=zone,
        filter='status eq RUNNING'
    )
    resp = request.execute(http=http)
    timestamp = int(time.time())
    cloud_nodes = resp['items'] if 'items' in resp else []
    nodes = list()
    for cloud_node in cloud_nodes:
        node = {
            'instance_id': cloud_node['id'],
            'server_name': cloud_node['name'],
            'instance_type': cloud_node['machineType'].split('/')[-1],
            'os': None
        }
        nodes.append(node)
    return {
        'region': zone,
        'timestamp': timestamp,
        'nodes': nodes
    } if nodes else dict()
Пример #16
0
    def download_aws_billing_file(self, cred, bucket_name, date=None):
        if date is None:
            date = datetime.datetime.utcnow().date()

        conn = get_s3_conn(cred)
        bucket = conn.get_bucket(bucket_name)
        account_id = cryptotool.decrypt_scalr(app.crypto_key, cred['account_id'])
        file_name = get_aws_csv_file_name(account_id, date)
        key = bucket.get_key(file_name)

        if not key:
            msg = "AWS detailed billing CSV file {0} wasn't found in bucket {1}"
            msg = msg.format(file_name, bucket_name)
            if datetime.datetime.utcnow().day == 1:
                LOG.warning(msg)
                return None
            else:
                raise Exception(msg)

        last_modified_dt = datetime.datetime.strptime(key.last_modified, self.last_modified_format)
        update_interval = self.config['interval']
        utcnow = datetime.datetime.utcnow()
        delta = datetime.timedelta(seconds=update_interval)
        condition1 = utcnow > last_modified_dt and utcnow < last_modified_dt + delta
        condition2 = ((utcnow - last_modified_dt).seconds / 3600) % 8 == 0
        if condition1 or condition2:
            local_file_path = os.path.join(self.tmp_dir, file_name)
            LOG.debug('Downloading {0}'.format(file_name))
            key.get_contents_to_filename(local_file_path)
            return local_file_path
        else:
            return None
Пример #17
0
def _cloudstack(cred):
    try:
        result = list()
        api_key = cryptotool.decrypt_scalr(app.crypto_key, cred['api_key'])
        secret_key = cryptotool.decrypt_scalr(app.crypto_key, cred['secret_key'])
        api_url = cryptotool.decrypt_scalr(app.crypto_key, cred['api_url'])
        url = urlparse.urlparse(api_url)
        splitted_netloc = url.netloc.split(':')
        host = splitted_netloc[0]
        try:
            port = splitted_netloc[1]
        except:
            port = 443 if url.scheme == 'https' else None
        path = url.path
        secure = url.scheme == 'https'

        cls = get_driver(Provider.CLOUDSTACK)
        driver = cls(key=api_key, secret=secret_key, host=host, port=port, path=path, secure=secure)
        locations = driver.list_locations()
        cloud_nodes = _libcloud_list_nodes(driver)
        timestamp = int(time.time())
        for location in locations:
            nodes = list()
            for cloud_node in cloud_nodes:
                if cloud_node.state != NodeState.RUNNING or cloud_node.extra['zone_id'] != location.id:
                    continue
                node = {
                    'instance_id': cloud_node.id,
                    'instance_type': cloud_node.extra['size_id'],
                    'os': None
                }
                nodes.append(node)
            if nodes:
                result.append(
                    {
                        'region': location.name,
                        'timestamp': timestamp,
                        'nodes': nodes
                    }
                )
        return result
    except:
        e = sys.exc_info()[1]
        msg = 'platform: {platform}, env_id: {env_id}, reason: {error}'
        msg = msg.format(platform=cred.platform, env_id=cred.env_id,
                         error=helper.exc_info(where=False))
        _handle_exception(e, msg)
Пример #18
0
def get_s3_conn(cred):
    access_key = cryptotool.decrypt_scalr(app.crypto_key, cred['access_key'])
    secret_key = cryptotool.decrypt_scalr(app.crypto_key, cred['secret_key'])
    kwds = {
        'aws_access_key_id': access_key,
        'aws_secret_access_key': secret_key
    }
    use_proxy = app.scalr_config.get('aws', {}).get('use_proxy', False)
    use_on = app.scalr_config['connections'].get('proxy',
                                                 {}).get('use_on', 'both')
    if use_proxy in [True, 'yes'] and use_on in ['both', 'scalr']:
        kwds['proxy'] = app.scalr_config['connections']['proxy']['host']
        kwds['proxy_port'] = app.scalr_config['connections']['proxy']['port']
        kwds['proxy_user'] = app.scalr_config['connections']['proxy']['user']
        kwds['proxy_pass'] = app.scalr_config['connections']['proxy']['pass']
    conn = boto.connect_s3(**kwds)
    return conn
Пример #19
0
def db_update(sorted_data, envs_ids, cred):
    platform = cred.platform

    for env_id in envs_ids:
        for region_data in sorted_data:
            try:
                sid = uuid.uuid4()
                if platform == 'ec2':
                    cloud_account = cred.get('account_id')
                else:
                    cloud_account = None

                if analytics.url_key_map[platform]:
                    url = urlparse.urlparse(cryptotool.decrypt_scalr(
                        app.crypto_key, cred[analytics.url_key_map[platform]]).rstrip('/'))
                    url = '%s%s' % (url.netloc, url.path)
                else:
                    url = ''

                query = (
                    "SELECT client_id "
                    "FROM client_environments "
                    "WHERE id={env_id}"
                ).format(env_id=env_id)
                results = app.scalr_db.execute(query, retries=1)
                account_id = results[0]['client_id']

                query = (
                    "INSERT IGNORE INTO poller_sessions "
                    "(sid, account_id, env_id, dtime, platform, url, cloud_location, cloud_account) "
                    "VALUES "
                    "(UNHEX('{sid}'), {account_id}, {env_id}, '{dtime}', '{platform}', '{url}',"
                    "'{cloud_location}', '{cloud_account}')"
                ).format(
                    sid=sid.hex, account_id=account_id, env_id=env_id,
                    dtime=time.strftime(
                        "%Y-%m-%d %H:%M:%S", time.gmtime(region_data['timestamp'])),
                    platform=platform, url=url, cloud_location=region_data['region'],
                    cloud_account=cloud_account
                )
                app.analytics_db.execute(query, retries=1)

                # managed
                for managed in region_data['managed']:
                    if managed['env_id'] != env_id:
                        continue
                    query = (
                        "INSERT IGNORE INTO managed "
                        "(sid, server_id, instance_type, os) VALUES "
                        "(UNHEX('{sid}'), UNHEX('{server_id}'), '{instance_type}', {os})"
                    ).format(
                        sid=sid.hex,
                        server_id=uuid.UUID(managed['server_id']).hex,
                        instance_type=managed['instance_type'],
                        os=managed['os'])
                    app.analytics_db.execute(query, retries=1)
            except:
                helper.handle_error(message='Database update failed')
Пример #20
0
def _openstack_cred(cred):
    username = cryptotool.decrypt_scalr(app.crypto_key, cred['username'])
    if 'password' in cred:
        password = cryptotool.decrypt_scalr(app.crypto_key, cred['password'])
        auth_version = '2.0_password'
    else:
        password = cryptotool.decrypt_scalr(app.crypto_key, cred['api_key'])
        auth_version = '2.0_apikey'
    keystone_url = cryptotool.decrypt_scalr(app.crypto_key,
                                            cred['keystone_url'])
    if not keystone_url.rstrip('/').endswith('/tokens'):
        keystone_url = os.path.join(keystone_url, 'tokens')
    if 'tenant_name' in cred:
        tenant_name = cryptotool.decrypt_scalr(app.crypto_key,
                                               cred['tenant_name'])
    else:
        tenant_name = None
    return username, password, auth_version, keystone_url, tenant_name
Пример #21
0
def _cloudstack(cred):
    result = list()
    api_key = cryptotool.decrypt_scalr(app.crypto_key, cred['api_key'])
    secret_key = cryptotool.decrypt_scalr(app.crypto_key, cred['secret_key'])
    api_url = cryptotool.decrypt_scalr(app.crypto_key, cred['api_url'])
    url = urlparse.urlparse(api_url)
    splitted_netloc = url.netloc.split(':')
    host = splitted_netloc[0]
    try:
        port = splitted_netloc[1]
    except:
        port = 443 if url.scheme == 'https' else None
    path = url.path
    secure = url.scheme == 'https'

    cls = get_driver(Provider.CLOUDSTACK)
    driver = cls(key=api_key,
                 secret=secret_key,
                 host=host,
                 port=port,
                 path=path,
                 secure=secure)
    locations = driver.list_locations()
    cloud_nodes = _libcloud_list_nodes(driver)
    timestamp = int(time.time())
    for location in locations:
        nodes = list()
        for cloud_node in cloud_nodes:
            if cloud_node.state != NodeState.RUNNING or cloud_node.extra[
                    'zone_id'] != location.id:
                continue
            node = {
                'instance_id': cloud_node.id,
                'instance_type': cloud_node.extra['size_id'],
                'os': None
            }
            nodes.append(node)
        if nodes:
            result.append({
                'region': location.name,
                'timestamp': timestamp,
                'nodes': nodes
            })
    return result
Пример #22
0
def _ec2_region(region, cred):
    try:
        access_key = cryptotool.decrypt_scalr(app.crypto_key,
                                              cred['access_key'])
        secret_key = cryptotool.decrypt_scalr(app.crypto_key,
                                              cred['secret_key'])
        kwds = {
            'aws_access_key_id': access_key,
            'aws_secret_access_key': secret_key
        }
        proxy_settings = app.proxy_settings.get(cred.platform, {})
        kwds['proxy'] = proxy_settings.get('host')
        kwds['proxy_port'] = proxy_settings.get('port')
        kwds['proxy_user'] = proxy_settings.get('user')
        kwds['proxy_pass'] = proxy_settings.get('pass')

        msg = "List nodes for platform: 'ec2', region: '{}', envs_ids: {}"
        msg = msg.format(region, cred.envs_ids)
        LOG.debug(msg)

        conn = boto.ec2.connect_to_region(region, **kwds)
        cloud_nodes = _ec2_get_only_instances(conn)
        timestamp = int(time.time())
        nodes = list()
        for cloud_node in cloud_nodes:
            node = {
                'instance_id': cloud_node.id,
                'instance_type': cloud_node.instance_type,
                'os': cloud_node.platform if cloud_node.platform else 'linux'
            }
            nodes.append(node)
        return {
            'region': region,
            'timestamp': timestamp,
            'nodes': nodes
        } if nodes else dict()
    except:
        e = sys.exc_info()[1]
        msg = "platform: '{platform}', region: '{region}', envs_ids: {envs_ids}. Reason: {error}"
        msg = msg.format(platform=cred.platform,
                         region=region,
                         envs_ids=cred.envs_ids,
                         error=helper.exc_info(where=False))
        _handle_exception(e, msg)
Пример #23
0
def _gce_conn(cred, key=None):
    service_account_name = cryptotool.decrypt_scalr(
        app.crypto_key, cred['service_account_name'])
    if key is None:
        key = _gce_key(cred)

    signed_jwt_assert_cred = SignedJwtAssertionCredentials(
        service_account_name, key, ['https://www.googleapis.com/auth/compute'])
    http = httplib2.Http()
    http = signed_jwt_assert_cred.authorize(http)
    return build('compute', 'v1', http=http), http
Пример #24
0
def _gce_conn(cred, key=None):
    service_account_name = cryptotool.decrypt_scalr(app.crypto_key, cred["service_account_name"])
    if key is None:
        key = _gce_key(cred)

    credentials = ServiceAccountCredentials.from_p12_keyfile_buffer(
        service_account_name, StringIO.StringIO(key), scopes=["https://www.googleapis.com/auth/compute"]
    )
    http = httplib2.Http()
    http = credentials.authorize(http)
    return build("compute", "v1", http=http), http
Пример #25
0
def _gce_conn(cred):
    service_account_name = cryptotool.decrypt_scalr(CRYPTO_KEY, cred['service_account_name'])
    key = cryptotool.decrypt_scalr(CRYPTO_KEY, cred['key'])

    # convert pkcs12 to rsa
    out, err = helper.call(
        "openssl pkcs12 -nodes -nocerts -passin pass:notasecret | openssl rsa",
        input=binascii.a2b_base64(key),
        shell=True
    )
    key = out.strip()

    signed_jwt_assert_cred = SignedJwtAssertionCredentials(
        service_account_name,
        key,
        ['https://www.googleapis.com/auth/compute']
    )
    http = httplib2.Http()
    http = signed_jwt_assert_cred.authorize(http)
    return build('compute', 'v1', http=http), http
Пример #26
0
def _gce_conn(cred):
    service_account_name = cryptotool.decrypt_scalr(
        app.crypto_key, cred['service_account_name'])
    if 'json_key' in cred:
        key = json.loads(
            cryptotool.decrypt_scalr(app.crypto_key,
                                     cred['json_key']))['private_key']
    else:
        key = cryptotool.decrypt_scalr(app.crypto_key, cred['key'])
        # convert pkcs12 to rsa
        out, err, ret_code = helper.call(
            "openssl pkcs12 -nodes -nocerts -passin pass:notasecret | openssl rsa",
            input=binascii.a2b_base64(key),
            shell=True)
        key = out.strip()

    signed_jwt_assert_cred = SignedJwtAssertionCredentials(
        service_account_name, key, ['https://www.googleapis.com/auth/compute'])
    http = httplib2.Http()
    http = signed_jwt_assert_cred.authorize(http)
    return build('compute', 'v1', http=http), http
Пример #27
0
def _cloudstack(cred):
    try:
        result = list()
        api_key = cryptotool.decrypt_scalr(app.crypto_key, cred["api_key"])
        secret_key = cryptotool.decrypt_scalr(app.crypto_key, cred["secret_key"])
        api_url = cryptotool.decrypt_scalr(app.crypto_key, cred["api_url"])
        url = urlparse.urlparse(api_url)
        splitted_netloc = url.netloc.split(":")
        host = splitted_netloc[0]
        try:
            port = splitted_netloc[1]
        except:
            port = 443 if url.scheme == "https" else None
        path = url.path
        secure = url.scheme == "https"

        cls = get_driver(Provider.CLOUDSTACK)
        driver = cls(key=api_key, secret=secret_key, host=host, port=port, path=path, secure=secure)

        proxy_url = app.proxy_settings.get(cred.platform, {}).get("url")
        driver.connection.set_http_proxy(proxy_url=proxy_url)

        locations = driver.list_locations()
        cloud_nodes = _libcloud_list_nodes(driver)
        timestamp = int(time.time())
        for location in locations:
            nodes = list()
            for cloud_node in cloud_nodes:
                if cloud_node.state != NodeState.RUNNING or cloud_node.extra["zone_id"] != location.id:
                    continue
                node = {"instance_id": cloud_node.id, "instance_type": cloud_node.extra["size_id"], "os": None}
                nodes.append(node)
            if nodes:
                result.append({"region": location.name, "timestamp": timestamp, "nodes": nodes})
        return result
    except:
        e = sys.exc_info()[1]
        msg = "platform: '{platform}', envs_ids: {envs_ids}. Reason: {error}"
        msg = msg.format(platform=cred.platform, envs_ids=cred.envs_ids, error=helper.exc_info(where=False))
        _handle_exception(e, msg)
Пример #28
0
def _cloudstack(cred):
    result = list()
    api_key = cryptotool.decrypt_scalr(CRYPTO_KEY, cred['api_key'])
    secret_key = cryptotool.decrypt_scalr(CRYPTO_KEY, cred['secret_key'])
    api_url = cryptotool.decrypt_scalr(CRYPTO_KEY, cred['api_url'])
    url = urlparse.urlparse(api_url)
    splitted_netloc = url.netloc.split(':')
    host = splitted_netloc[0]
    try:
        port = splitted_netloc[1]
    except:
        port = 443 if url.scheme == 'https' else None
    path = url.path
    secure = url.scheme == 'https'

    cls = get_driver(Provider.CLOUDSTACK)
    driver = cls(key=api_key, secret=secret_key, host=host, port=port, path=path, secure=secure)
    locations = driver.list_locations()
    cloud_nodes = _libcloud_list_nodes(driver)
    timestamp = int(time.time())
    for location in locations:
        nodes = list()
        for cloud_node in cloud_nodes:
            if cloud_node.state != NodeState.RUNNING or cloud_node.extra['zone_id'] != location.id:
                continue
            node = {
                'instance_id': cloud_node.id,
                'instance_type': cloud_node.extra['size_id'],
                'os': None
            }
            nodes.append(node)
        if nodes:
            result.append(
                {
                    'region': location.name,
                    'timestamp': timestamp,
                    'nodes': nodes
                }
            )
    return result
Пример #29
0
def _ec2_region(region, cred):
    try:
        access_key = cryptotool.decrypt_scalr(app.crypto_key, cred['access_key'])
        secret_key = cryptotool.decrypt_scalr(app.crypto_key, cred['secret_key'])
        kwds = {
            'aws_access_key_id': access_key,
            'aws_secret_access_key': secret_key
        }
        proxy_settings = app.proxy_settings[cred.platform]
        kwds['proxy'] = proxy_settings.get('host', None)
        kwds['proxy_port'] = proxy_settings.get('port', None)
        kwds['proxy_user'] = proxy_settings.get('user', None)
        kwds['proxy_pass'] = proxy_settings.get('pass', None)

        msg = "List nodes for platform: 'ec2', region: '{}', envs_ids: {}"
        msg = msg.format(region, cred.envs_ids)
        LOG.debug(msg)

        conn = boto.ec2.connect_to_region(region, **kwds)
        cloud_nodes = _ec2_get_only_instances(conn)
        timestamp = int(time.time())
        nodes = list()
        for cloud_node in cloud_nodes:
            node = {
                'instance_id': cloud_node.id,
                'instance_type': cloud_node.instance_type,
                'os': cloud_node.platform if cloud_node.platform else 'linux'
            }
            nodes.append(node)
        return {
            'region': region,
            'timestamp': timestamp,
            'nodes': nodes
        } if nodes else dict()
    except:
        e = sys.exc_info()[1]
        msg = "platform: '{platform}', region: '{region}', envs_ids: {envs_ids}. Reason: {error}"
        msg = msg.format(platform=cred.platform, region=region, envs_ids=cred.envs_ids,
                         error=helper.exc_info(where=False))
        _handle_exception(e, msg)
Пример #30
0
def _gce_conn(cred, key=None):
    service_account_name = cryptotool.decrypt_scalr(
        app.crypto_key, cred['service_account_name'])
    if key is None:
        key = _gce_key(cred)

    credentials = ServiceAccountCredentials.from_p12_keyfile_buffer(
        service_account_name,
        StringIO.StringIO(key),
        scopes=['https://www.googleapis.com/auth/compute'])
    http = httplib2.Http()
    http = credentials.authorize(http)
    return build('compute', 'v1', http=http), http
Пример #31
0
 def load_access_token(self, env):
     headers = {'Content-Type': 'application/x-www-form-urlencoded'}
     tenant_id = cryptotool.decrypt_scalr(self.config['crypto_key'], env['azure.tenant_name'])
     url = self.token_url.format(tenant_id=tenant_id)
     data = {
             'grant_type': 'client_credentials',
             'client_id': self.config['azure_app_client_id'],
             'resource': 'https://management.azure.com/',
             'client_secret': self.config['azure_app_secret_key'],
     }
     resp = requests.post(url, headers=headers, data=data)
     resp.raise_for_status()
     env['azure.access_token'] = str(resp.json()['access_token'])
Пример #32
0
def _gce_conn(cred, key=None):
    service_account_name = cryptotool.decrypt_scalr(app.crypto_key, cred['service_account_name'])
    if key is None:
        key = _gce_key(cred)

    signed_jwt_assert_cred = SignedJwtAssertionCredentials(
        service_account_name,
        key,
        ['https://www.googleapis.com/auth/compute']
    )
    http = httplib2.Http()
    http = signed_jwt_assert_cred.authorize(http)
    return build('compute', 'v1', http=http), http
Пример #33
0
def _ec2_region(region, cred):
    access_key = cryptotool.decrypt_scalr(CRYPTO_KEY, cred['access_key'])
    secret_key = cryptotool.decrypt_scalr(CRYPTO_KEY, cred['secret_key'])
    conn = boto.ec2.connect_to_region(
        region,
        aws_access_key_id=access_key,
        aws_secret_access_key=secret_key
    )
    cloud_nodes = _ec2_get_only_instances(conn)
    timestamp = int(time.time())
    nodes = list()
    for cloud_node in cloud_nodes:
        node = {
            'instance_id': cloud_node.id,
            'instance_type': cloud_node.instance_type,
            'os': cloud_node.platform if cloud_node.platform else 'linux'
        }
        nodes.append(node)
    return {
        'region': region,
        'timestamp': timestamp,
        'nodes': nodes
    } if nodes else dict()
Пример #34
0
def _eucalyptus(cred):
    access_key = cryptotool.decrypt_scalr(CRYPTO_KEY, cred['access_key'])
    secret_key = cryptotool.decrypt_scalr(CRYPTO_KEY, cred['secret_key'])
    ec2_url = cryptotool.decrypt_scalr(CRYPTO_KEY, cred['ec2_url'])
    url = urlparse.urlparse(ec2_url)
    splitted_netloc = url.netloc.split(':')
    host = splitted_netloc[0]
    try:
        port = splitted_netloc[1]
    except:
        port = None
    path = url.path
    region = 'eucalyptus'
    region_info = boto.ec2.regioninfo.RegionInfo(name=region, endpoint=host)
    conn = boto.connect_ec2(
        aws_access_key_id=access_key,
        aws_secret_access_key=secret_key,
        is_secure=False,
        port=port,
        path=path,
        region=region_info
    )
    cloud_nodes = _ec2_get_only_instances(conn)
    timestamp = int(time.time())
    nodes = list()
    for cloud_node in cloud_nodes:
        node = {
            'instance_id': cloud_node.id,
            'instance_type': cloud_node.instance_type,
            'os': cloud_node.platform if cloud_node.platform else 'linux'
        }
        nodes.append(node)
    return {
        'region': cred['group'],
        'timestamp': timestamp,
        'nodes': nodes
    } if nodes else dict()
Пример #35
0
def gce(cred):
    """
    :returns: list
        [{'region': str, 'timestamp': int, 'nodes': list}]
    """

    result = list()

    project_id = cryptotool.decrypt_scalr(app.crypto_key, cred['project_id'])
    try:
        conn, http = _gce_conn(cred)
        request = conn.zones().list(project=project_id)
        resp = request.execute(http=http)
    except:
        e = sys.exc_info()[1]
        msg = 'platform: {platform}, env_id: {env_id}, reason: {error}'
        msg = msg.format(platform=cred.platform,
                         env_id=cred.env_id,
                         error=helper.exc_info())
        _handle_exception(e, msg)
        return result

    zones = [_['name'] for _ in resp['items']] if 'items' in resp else []

    app.pool.wait()
    async_results = dict(
        (zone, app.pool.apply_async(_gce_zone, args=(
            zone,
            cred,
        ))) for zone in zones)
    gevent.sleep(0)  # force switch
    for zone, async_result in async_results.iteritems():
        try:
            zone_nodes = async_result.get(
                timeout=app.config['cloud_connection_timeout'] + 1)
            if zone_nodes:
                result.append(zone_nodes)
        except:
            async_result.kill()
            e = sys.exc_info()[1]
            msg = 'platform: GCE, zone: {zone}, env_id: {env_id}, reason: {error}'
            msg = msg.format(zone=zone,
                             env_id=cred.env_id,
                             error=helper.exc_info())
            _handle_exception(e, msg)
    return result
Пример #36
0
def process_credential(cred, envs_ids=None):
    if envs_ids is None:
        envs_ids = [cred.env_id]

    try:
        for k, v in cred.iteritems():
            if k in cred.scheme[cred.platform]:
                cred[k] = cryptotool.decrypt_scalr(app.crypto_key, v)
        cloud_data = eval(cred.platform)(cred)
        if cloud_data:
            sorted_data = sort_nodes(cloud_data, cred, envs_ids)
            sorted_data_update(sorted_data)
            db_update(sorted_data, envs_ids, cred)
    except:
        msg = 'platform: {platform}, environments: {envs}, reason: {error}'
        msg = msg.format(platform=cred.platform,
                         envs=envs_ids,
                         error=helper.exc_info())
        LOG.error(msg)
Пример #37
0
def _gce_zone(zone, key, cred):
    try:
        conn, http = _gce_conn(cred, key=key)
        project_id = cryptotool.decrypt_scalr(app.crypto_key,
                                              cred['project_id'])
        request = conn.instances().list(project=project_id,
                                        zone=zone,
                                        filter='status eq RUNNING')
        resp = request.execute(http=http)
        timestamp = int(time.time())
        cloud_nodes = resp['items'] if 'items' in resp else []
        nodes = list()
        for cloud_node in cloud_nodes:
            node = {
                'instance_id': cloud_node['id'],
                'server_name': cloud_node['name'],
                'instance_type': cloud_node['machineType'].split('/')[-1],
                'os': None,
            }
            for item in cloud_node['metadata'].get('items', []):
                meta = dict(
                    tuple(element.split('=', 1))
                    for element in item['value'].split(';') if '=' in element)
                if 'serverid' in meta:
                    node['server_id'] = meta['serverid']
                if 'env_id' in meta:
                    node['env_id'] = int(meta['env_id'])
                    break
            nodes.append(node)
        return {
            'region': zone,
            'timestamp': timestamp,
            'nodes': nodes
        } if nodes else dict()
    except:
        e = sys.exc_info()[1]
        msg = 'platform: {platform}, zone: {zone}, env_id: {env_id}, reason: {error}'
        msg = msg.format(platform=cred.platform,
                         zone=zone,
                         env_id=cred.env_id,
                         error=helper.exc_info(where=False))
        _handle_exception(e, msg)
Пример #38
0
def _gce_zone(zone, key, cred):
    try:
        conn, http = _gce_conn(cred, key=key)
        project_id = cryptotool.decrypt_scalr(app.crypto_key, cred['project_id'])
        request = conn.instances().list(
            project=project_id,
            zone=zone,
            filter='status eq RUNNING'
        )
        resp = request.execute(http=http)
        timestamp = int(time.time())
        cloud_nodes = resp['items'] if 'items' in resp else []
        nodes = list()
        for cloud_node in cloud_nodes:
            node = {
                'instance_id': cloud_node['id'],
                'server_name': cloud_node['name'],
                'instance_type': cloud_node['machineType'].split('/')[-1],
                'os': None,
            }
            for item in cloud_node['metadata'].get('items', []):
                meta = dict(tuple(element.split('=', 1))
                            for element in item['value'].split(';') if '=' in element)
                if 'serverid' in meta:
                    node['server_id'] = meta['serverid']
                if 'env_id' in meta:
                    node['env_id'] = int(meta['env_id'])
                    break
            nodes.append(node)
        return {
            'region': zone,
            'timestamp': timestamp,
            'nodes': nodes
        } if nodes else dict()
    except:
        e = sys.exc_info()[1]
        msg = 'platform: {platform}, zone: {zone}, env_id: {env_id}, reason: {error}'
        msg = msg.format(platform=cred.platform, zone=zone, env_id=cred.env_id,
                         error=helper.exc_info(where=False))
        _handle_exception(e, msg)
Пример #39
0
def gce(cred):
    """
    :returns: list
        [{'region': str, 'timestamp': int, 'nodes': list}]
    """

    result = list()

    project_id = cryptotool.decrypt_scalr(CRYPTO_KEY, cred['project_id'])
    try:
        conn, http = _gce_conn(cred)
        request = conn.zones().list(project=project_id)
        resp = request.execute(http=http)
    except:
        e = sys.exc_info()[1]
        msg = 'platform: {platform}, env_id: {env_id}, reason: {error}'
        msg = msg.format(platform, cred.platform, env_id=cred.env_id, error=helper.exc_info())
        _handle_exception(e, msg)
        return result

    zones = [_['name'] for _ in resp['items']] if 'items' in resp else []

    wait_pool()
    async_results = dict(
        (zone, POOL.apply_async(_gce_zone, args=(zone, cred,)))
        for zone in zones
    )
    gevent.sleep(0) # force switch
    for zone, async_result in async_results.iteritems():
        try:
            zone_nodes = async_result.get(timeout=CONFIG['cloud_connection_timeout']+10)
            if zone_nodes:
                result.append(zone_nodes)
        except:
            async_result.kill()
            e = sys.exc_info()[1]
            msg = 'platform: GCE, zone: {zone}, env_id: {env_id}, reason: {error}'
            msg = msg.format(zone=zone, env_id=cred.env_id, error=helper.exc_info())
            _handle_exception(e, msg)
    return result
Пример #40
0
def _gce_zone(zone, cred):
    conn, http = _gce_conn(cred)
    project_id = cryptotool.decrypt_scalr(app.crypto_key, cred['project_id'])
    request = conn.instances().list(project=project_id,
                                    zone=zone,
                                    filter='status eq RUNNING')
    resp = request.execute(http=http)
    timestamp = int(time.time())
    cloud_nodes = resp['items'] if 'items' in resp else []
    nodes = list()
    for cloud_node in cloud_nodes:
        node = {
            'instance_id': cloud_node['id'],
            'server_name': cloud_node['name'],
            'instance_type': cloud_node['machineType'].split('/')[-1],
            'os': None
        }
        nodes.append(node)
    return {
        'region': zone,
        'timestamp': timestamp,
        'nodes': nodes
    } if nodes else dict()
Пример #41
0
def gce(cred):
    """
    :returns: list
        [{'region': str, 'timestamp': int, 'nodes': list}]
    """
    result = list()

    project_id = cryptotool.decrypt_scalr(app.crypto_key, cred['project_id'])
    key = _gce_key(cred)
    conn, http = _gce_conn(cred, key=key)
    request = conn.zones().list(project=project_id)
    resp = request.execute(http=http)
    zones = [_['name'] for _ in resp['items']] if 'items' in resp else []

    app.pool.wait()
    async_results = dict(
        (zone, app.pool.apply_async(_gce_zone, args=(
            zone,
            key,
            cred,
        ))) for zone in zones)
    gevent.sleep(0)  # force switch
    for zone, async_result in async_results.iteritems():
        try:
            zone_nodes = async_result.get(
                timeout=app.config['cloud_connection_timeout'] + 1)
            if zone_nodes:
                result.append(zone_nodes)
        except gevent.timeout.Timeout:
            async_result.kill()
            msg = 'platform: {platform}, zone: {zone}, env_id: {env_id}. Reason: timeout'
            msg = msg.format(platform=cred.platform,
                             zone=zone,
                             env_id=cred.env_id)
            LOG.warning(msg)
    return result
Пример #42
0
    def download_billing_file(self, env, date=None, force=False):
        date = date or datetime.datetime.utcnow().date()
        bucket_name = env['ec2.detailed_billing.bucket']
        if env.get('ec2.detailed_billing.payer_account'):
            envs = self.analytics.load_aws_accounts_ids_envs([env['ec2.detailed_billing.payer_account']])
            self.analytics.load_env_credentials(envs, platform='ec2')
            for e in envs:
                if e['client_id'] == env['client_id']:
                    credentials_env = e
                    break
            else:
                msg = 'Can not found AWS credentials for PayerAccount {}'
                msg = msg.format(env['ec2.detailed_billing.payer_account'])
                raise Exception(msg)
        else:
            credentials_env = env.copy()
        kwds = {
            'aws_access_key_id': cryptotool.decrypt_scalr(self.config['crypto_key'],
                                                          credentials_env['ec2.access_key']),
            'aws_secret_access_key': cryptotool.decrypt_scalr(self.config['crypto_key'],
                                                              credentials_env['ec2.secret_key']),
            'proxy': self.config['aws_proxy'].get('host'),
            'proxy_port': self.config['aws_proxy'].get('port'),
            'proxy_user': self.config['aws_proxy'].get('user'),
            'proxy_pass': self.config['aws_proxy'].get('pass'),
        }
        default_region_map = {
            'regular': 'us-east-1',
            'gov-cloud': 'us-gov-west-1',
            'cn-cloud': 'cn-north-1',
        }
        default_region = default_region_map[env.get('account_type', 'regular')]
        region = env.get('ec2.detailed_billing.region', default_region)
        conn = boto.s3.connect_to_region(region, **kwds)
        bucket = conn.get_bucket(bucket_name)
        default_file_name = get_default_aws_csv_file_name(credentials_env['ec2.account_id'], date)
        file_name_tmplate = get_aws_csv_file_name_tmplate(credentials_env['ec2.account_id'], date)
        files_in_bucket = [key.name for key in bucket.list()
                           if re.match(file_name_tmplate, key.name)]
        if not files_in_bucket:
            utcnow = datetime.datetime.utcnow()
            if date.month == utcnow.month and utcnow.day < 2:
                return None
            else:
                msg = "Not found any valid files({}, {}) in bucket '{}'"
                msg = msg.format(default_file_name, file_name_tmplate, bucket_name)
                raise exceptions.FileNotFoundError(msg)
        if default_file_name not in files_in_bucket:
            file_name = files_in_bucket[0]  # use first valid file
            msg = "Default AWS detailed billing statistics file '{}' not found in bucket '{}', available {}, use '{}'"
            msg = msg.format(default_file_name, bucket_name, files_in_bucket, file_name)
            LOG.warning(msg)
        else:
            file_name = default_file_name
        try:
            key = bucket.get_key(file_name)
            last_modified_dt = datetime.datetime.strptime(key.last_modified, self.last_modified_format)
            utcnow = datetime.datetime.utcnow()

            seconds_from_last_modified_dt = (utcnow - last_modified_dt).seconds
            if hasattr(self, 'task_info') and self.task_info.get('period') > 300:
                delta = datetime.timedelta(seconds=self.task_info['period'] - 90)
            else:
                delta = datetime.timedelta(seconds=210)
            condition1 = utcnow > last_modified_dt and utcnow < last_modified_dt + delta
            condition2 = seconds_from_last_modified_dt > 3600
            condition3 = (seconds_from_last_modified_dt / 3600) % 24 == 0

            if force or condition1 or (condition2 and condition3):
                with self._downloading_lock:
                    self.downloading_locks.setdefault(file_name, gevent.lock.RLock())
                with self.downloading_locks[file_name]:
                    csv_zip_file = os.path.join(self.cache_dir, file_name)
                    csv_file = csv_zip_file.rstrip('.zip')
                    if os.path.exists(csv_file):
                        msg = "'{}' already exists in cache directory, use it"
                        msg = msg.format(os.path.basename(csv_file))
                        LOG.debug(msg)
                        return csv_file
                    while key.size * 3 > helper.get_free_space(self.cache_dir):
                        LOG.error('Disk is full, waiting 60 sec')
                        gevent.sleep(60)
                    LOG.debug("Downloading '{}' for environment {}".format(file_name, env['id']))
                    attempts = 2
                    downloading_start_time = time.time()
                    while True:
                        try:
                            key.get_contents_to_filename(csv_zip_file)
                            assert os.path.isfile(csv_zip_file), os.listdir(self.cache_dir)
                            downloading_end_time = time.time()
                            break
                        except:
                            attempts -= 1
                            if not attempts:
                                raise
                    downloading_time = downloading_end_time - downloading_start_time
                    msg = "Downloading '{0}' done in {1:.1f} seconds".format(file_name, downloading_time)
                    LOG.info(msg)
                    LOG.debug('Unzipping to {}'.format(csv_file))
                    while os.path.getsize(csv_zip_file) * 1 > helper.get_free_space(self.cache_dir):
                        LOG.error('Disk is full, waiting 60 sec')
                        gevent.sleep(60)
                    with zipfile.ZipFile(csv_zip_file, 'r') as f:
                        f.extract(f.infolist()[0], self.cache_dir)
                    os.remove(csv_zip_file)
                    return csv_file
            else:
                msg = "Skipping AWS billing file '{}' for environment {}"
                msg = msg.format(file_name, env['id'])
                LOG.debug(msg)
                return None
        except:
            msg = "File '{}', bucket '{}', reason: {}"
            msg = msg.format(file_name, bucket_name, helper.exc_info())
            raise Exception, Exception(msg), sys.exc_info()[2]
Пример #43
0
def db_update(sorted_data, envs_ids, cred):
    platform = cred.platform

    for env_id in envs_ids:
        for region_data in sorted_data:
            try:
                sid = uuid.uuid4()
                if platform == 'ec2' and 'account_id' in cred:
                    cloud_account = cryptotool.decrypt_scalr(app.crypto_key, cred['account_id'])
                else:
                    cloud_account = None

                if analytics.url_key_map[platform]:
                    url = urlparse.urlparse(cryptotool.decrypt_scalr(
                        app.crypto_key, cred[analytics.url_key_map[platform]]).rstrip('/'))
                    url = '%s%s' % (url.netloc, url.path)
                else:
                    url = ''

                query = (
                    "SELECT client_id "
                    "FROM client_environments "
                    "WHERE id={env_id}"
                ).format(env_id=env_id)
                results = app.scalr_db.execute(query, retries=1)
                account_id = results[0]['client_id']

                query = (
                    "INSERT IGNORE INTO poller_sessions "
                    "(sid, account_id, env_id, dtime, platform, url, cloud_location, cloud_account) "
                    "VALUES "
                    "(UNHEX('{sid}'), {account_id}, {env_id}, '{dtime}', '{platform}', '{url}',"
                    "'{cloud_location}', '{cloud_account}')"
                ).format(
                    sid=sid.hex, account_id=account_id, env_id=env_id,
                    dtime=time.strftime(
                        "%Y-%m-%d %H:%M:%S", time.gmtime(region_data['timestamp'])),
                    platform=platform, url=url, cloud_location=region_data['region'],
                    cloud_account=cloud_account
                )
                app.analytics_db.execute(query, retries=1)

                # managed
                for managed in region_data['managed']:
                    if managed['env_id'] != env_id:
                        continue
                    query = (
                        "INSERT IGNORE INTO managed "
                        "(sid, server_id, instance_type, os) VALUES "
                        "(UNHEX('{sid}'), UNHEX('{server_id}'), '{instance_type}', {os})"
                    ).format(
                        sid=sid.hex,
                        server_id=uuid.UUID(managed['server_id']).hex,
                        instance_type=managed['instance_type'],
                        os=managed['os'])
                    LOG.debug(query)
                    app.analytics_db.execute(query, retries=1)

                ## not_managed
                #if region_data['not_managed']:
                #    base_query = (
                #        "INSERT IGNORE INTO notmanaged "
                #        "(sid, instance_id, instance_type, os) VALUES %s")
                #    values_template = "(UNHEX('{sid}'), '{instance_id}', '{instance_type}', {os})"
                #    i, chunk_size = 0, 20
                #    while True:
                #        chunk_not_managed = region_data['not_managed'][
                #            i * chunk_size:(i + 1) * chunk_size]
                #        if not chunk_not_managed:
                #            break
                #        query = base_query % ','.join(
                #            [
                #                values_template.format(
                #                    sid=sid.hex,
                #                    instance_id=not_managed['instance_id'],
                #                    instance_type=not_managed['instance_type'],
                #                    os=not_managed['os']
                #                )
                #                for not_managed in chunk_not_managed
                #            ]
                #        )
                #        app.analytics_db.execute(query, retries=1)
                #        i += 1
            except:
                msg = 'Database update failed, reason: {0}'
                msg = msg.format(helper.exc_info())
                LOG.exception(msg)
Пример #44
0
def db_update(sorted_data, envs_ids, cred):
    platform = cred.platform

    for env_id in envs_ids:
        for region_data in sorted_data:
            try:
                sid = uuid.uuid4()
                if platform == 'ec2' and 'account_id' in cred:
                    cloud_account = cryptotool.decrypt_scalr(
                        app.crypto_key, cred['account_id'])
                else:
                    cloud_account = None

                if analytics.url_key_map[platform]:
                    url = urlparse.urlparse(
                        cryptotool.decrypt_scalr(
                            app.crypto_key,
                            cred[analytics.url_key_map[platform]]).rstrip('/'))
                    url = '%s%s' % (url.netloc, url.path)
                else:
                    url = ''

                query = ("SELECT client_id "
                         "FROM client_environments "
                         "WHERE id={env_id}").format(env_id=env_id)
                results = app.scalr_db.execute(query, retries=1)
                account_id = results[0]['client_id']

                query = (
                    "INSERT IGNORE INTO poller_sessions "
                    "(sid, account_id, env_id, dtime, platform, url, cloud_location, cloud_account) "
                    "VALUES "
                    "(UNHEX('{sid}'), {account_id}, {env_id}, '{dtime}', '{platform}', '{url}',"
                    "'{cloud_location}', '{cloud_account}')").format(
                        sid=sid.hex,
                        account_id=account_id,
                        env_id=env_id,
                        dtime=time.strftime(
                            "%Y-%m-%d %H:%M:%S",
                            time.gmtime(region_data['timestamp'])),
                        platform=platform,
                        url=url,
                        cloud_location=region_data['region'],
                        cloud_account=cloud_account)
                app.analytics_db.execute(query, retries=1)

                # managed
                for managed in region_data['managed']:
                    if managed['env_id'] != env_id:
                        continue
                    query = (
                        "INSERT IGNORE INTO managed "
                        "(sid, server_id, instance_type, os) VALUES "
                        "(UNHEX('{sid}'), UNHEX('{server_id}'), '{instance_type}', {os})"
                    ).format(sid=sid.hex,
                             server_id=uuid.UUID(managed['server_id']).hex,
                             instance_type=managed['instance_type'],
                             os=managed['os'])
                    LOG.debug(query)
                    app.analytics_db.execute(query, retries=1)

                ## not_managed
                #if region_data['not_managed']:
                #    base_query = (
                #        "INSERT IGNORE INTO notmanaged "
                #        "(sid, instance_id, instance_type, os) VALUES %s")
                #    values_template = "(UNHEX('{sid}'), '{instance_id}', '{instance_type}', {os})"
                #    i, chunk_size = 0, 20
                #    while True:
                #        chunk_not_managed = region_data['not_managed'][
                #            i * chunk_size:(i + 1) * chunk_size]
                #        if not chunk_not_managed:
                #            break
                #        query = base_query % ','.join(
                #            [
                #                values_template.format(
                #                    sid=sid.hex,
                #                    instance_id=not_managed['instance_id'],
                #                    instance_type=not_managed['instance_type'],
                #                    os=not_managed['os']
                #                )
                #                for not_managed in chunk_not_managed
                #            ]
                #        )
                #        app.analytics_db.execute(query, retries=1)
                #        i += 1
            except:
                msg = 'Database update failed, reason: {0}'
                msg = msg.format(helper.exc_info())
                LOG.exception(msg)
Пример #45
0
def db_update(sorted_data, envs_ids, cred):
    platform = cred.platform

    for env_id in envs_ids:

        query = ("SELECT client_id " "FROM client_environments " "WHERE id={env_id}").format(env_id=env_id)
        results = app.scalr_db.execute(query, retries=1)
        account_id = results[0]["client_id"]

        for region_data in sorted_data:
            try:
                # skip if managed servers not exist
                if not region_data["managed"]:
                    continue

                sid = uuid.uuid4()
                if platform == "ec2":
                    cloud_account = cred.get("account_id")
                else:
                    cloud_account = None

                if analytics.url_key_map[platform]:
                    url = urlparse.urlparse(
                        cryptotool.decrypt_scalr(app.crypto_key, cred[analytics.url_key_map[platform]]).rstrip("/")
                    )
                    url = "%s%s" % (url.netloc, url.path)
                else:
                    url = ""

                query = (
                    "INSERT IGNORE INTO poller_sessions "
                    "(sid, account_id, env_id, dtime, platform, url, cloud_location, cloud_account) "
                    "VALUES "
                    "(UNHEX('{sid}'), {account_id}, {env_id}, '{dtime}', '{platform}', '{url}',"
                    "'{cloud_location}', '{cloud_account}')"
                ).format(
                    sid=sid.hex,
                    account_id=account_id,
                    env_id=env_id,
                    dtime=time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(region_data["timestamp"])),
                    platform=platform,
                    url=url,
                    cloud_location=region_data["region"],
                    cloud_account=cloud_account,
                )
                app.analytics_db.execute(query, retries=1)

                # managed
                for managed in region_data["managed"]:
                    if managed["env_id"] != env_id:
                        continue
                    query = (
                        "INSERT IGNORE INTO managed "
                        "(sid, server_id, instance_type, os) VALUES "
                        "(UNHEX('{sid}'), UNHEX('{server_id}'), '{instance_type}', {os})"
                    ).format(
                        sid=sid.hex,
                        server_id=uuid.UUID(managed["server_id"]).hex,
                        instance_type=managed["instance_type"],
                        os=managed["os"],
                    )
                    app.analytics_db.execute(query, retries=1)
            except:
                helper.handle_error(message="Database update failed")
Пример #46
0
def db_update(sorted_data, envs_ids, cred):
    platform = cred.platform
    for env_id in envs_ids:
        for region_data in sorted_data:
            try:
                sid = uuid.uuid4()
                if platform == 'ec2':
                    cloud_account = cred['account_id'] if 'account_id' in cred else None
                else:
                    cloud_account = None

                if URL_MAP[platform]:
                    url = cryptotool.decrypt_scalr(CRYPTO_KEY, cred[URL_MAP[platform]])
                    url = urlparse.urlparse(url.rstrip('/'))
                    url = '%s%s' % (url.netloc, url.path)
                else:
                    url = ''

                query = (
                        "SELECT client_id "
                        "FROM client_environments "
                        "WHERE id={env_id}"
                ).format(env_id=env_id)
                results = SCALR_DB.execute(query, retries=1)
                account_id = results[0]['client_id']

                query = (
                        "INSERT IGNORE INTO poller_sessions "
                        "(sid,account_id,env_id,dtime,platform,url,cloud_location,cloud_account) "
                        "VALUES "
                        "(UNHEX('{sid}'),{account_id},{env_id},'{dtime}','{platform}','{url}',"
                        "'{cloud_location}','{cloud_account}')"
                ).format(
                    sid=sid.hex, account_id=account_id, env_id=env_id,
                    dtime=time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(region_data['timestamp'])),
                    platform=platform, url=url, cloud_location=region_data['region'],
                    cloud_account=cloud_account
                )
                ANALYTICS_DB.execute(query, retries=1)

                # managed
                for managed in region_data['managed']:
                    if managed['env_id'] != env_id:
                        continue
                    query = (
                            "INSERT IGNORE INTO managed "
                            "(sid,server_id,instance_type,os) VALUES "
                            "(UNHEX('{sid}'),UNHEX('{server_id}'),'{instance_type}',{os})"
                    ).format(
                            sid=sid.hex,
                            server_id=uuid.UUID(managed['server_id']).hex,
                            instance_type=managed['instance_type'],
                            os=OS_MAP[managed['os']])
                    ANALYTICS_DB.execute(query, retries=1)

                # not_managed
                if region_data['not_managed']:
                    base_query = (
                            "INSERT IGNORE INTO notmanaged "
                            "(sid,instance_id,instance_type,os) VALUES %s")
                    values_template = "(UNHEX('{sid}'),'{instance_id}','{instance_type}',{os})"
                    i, chunk_size = 0, 20
                    while True:
                        chunk_not_managed = region_data['not_managed'][i*chunk_size:(i+1)*chunk_size]
                        if not chunk_not_managed:
                            break
                        query = base_query % ','.join(
                            [
                                values_template.format(
                                    sid=sid.hex,
                                    instance_id=_['instance_id'],
                                    instance_type=_['instance_type'],
                                    os=OS_MAP[_['os']]
                                )
                                for _ in chunk_not_managed
                            ]
                        )
                        ANALYTICS_DB.execute(query, retries=1)
                        i += 1
            except:
                LOG.warning(helper.exc_info())