def call_func(self, func, **kwargs):
        """General method for calling any Monasca API function."""
        @tenacity.retry(
            wait=tenacity.wait_fixed(self._retry_interval),
            stop=tenacity.stop_after_attempt(self._max_retries),
            retry=(tenacity.retry_if_exception_type(MonascaServiceException) |
                   tenacity.retry_if_exception_type(MonascaException)))
        def _inner():
            try:
                return func(**kwargs)
            except (exc.http.InternalServerError,
                    exc.http.ServiceUnavailable,
                    exc.http.BadGateway,
                    exc.connection.ConnectionError) as e:
                LOG.exception(e)
                msg = '%s: %s' % (e.__class__.__name__, e)
                raise MonascaServiceException(msg)
            except exc.http.HttpError as e:
                LOG.exception(e)
                msg = '%s: %s' % (e.__class__.__name__, e)
                status_code = e.http_status
                if not isinstance(status_code, int):
                    status_code = 500
                if 400 <= status_code < 500:
                    raise MonascaInvalidParametersException(msg)
                else:
                    raise MonascaException(msg)
            except Exception as e:
                LOG.exception(e)
                msg = '%s: %s' % (e.__class__.__name__, e)
                raise MonascaException(msg)

        return _inner()
 def send_request(self, socket, request):
     @tenacity.retry(retry=tenacity.retry_if_exception_type(zmq.Again),
                     stop=tenacity.stop_after_delay(
                         self.conf.rpc_response_timeout))
     def send_retrying():
         self._do_send(socket, request)
     return send_retrying()
Beispiel #3
0
def upgrade():
    conf = cfg.ConfigOpts()
    conf.register_cli_opts([
        cfg.BoolOpt('skip-gnocchi-resource-types',
                    help='Skip gnocchi resource-types upgrade.',
                    default=False),
        cfg.IntOpt('retry',
                   min=0,
                   help='Number of times to retry on failure. '
                   'Default is to retry forever.'),
    ])

    service.prepare_service(conf=conf)
    if conf.skip_gnocchi_resource_types:
        LOG.info("Skipping Gnocchi resource types upgrade")
    else:
        LOG.debug("Upgrading Gnocchi resource types")
        from ceilometer import gnocchi_client
        from gnocchiclient import exceptions
        if conf.retry is None:
            stop = tenacity.stop_never
        else:
            stop = tenacity.stop_after_attempt(conf.retry)
        tenacity.Retrying(
            stop=stop,
            retry=tenacity.retry_if_exception_type((
                exceptions.ConnectionFailure,
                exceptions.UnknownConnectionError,
                exceptions.ConnectionTimeout,
                exceptions.SSLError,
            ))
        )(gnocchi_client.upgrade_resource_types, conf)
Beispiel #4
0
def retry_upon_exception(exc, delay, max_delay, max_attempts):
    return tenacity.retry(reraise=True,
                          retry=tenacity.retry_if_exception_type(exc),
                          wait=tenacity.wait_exponential(
                                multiplier=delay, max=max_delay),
                          stop=tenacity.stop_after_attempt(max_attempts),
                          before=_log_before_retry, after=_log_after_retry)
Beispiel #5
0
    def join_group(self, group_id):
        if (not self._coordinator or not self._coordinator.is_started
                or not group_id):
            return

        @tenacity.retry(
            wait=tenacity.wait_exponential(
                multiplier=self.conf.coordination.retry_backoff,
                max=self.conf.coordination.max_retry_interval),
            retry=tenacity.retry_if_exception_type(
                ErrorJoiningPartitioningGroup))
        def _inner():
            try:
                join_req = self._coordinator.join_group(group_id)
                join_req.get()
                LOG.info(_LI('Joined partitioning group %s'), group_id)
            except tooz.coordination.MemberAlreadyExist:
                return
            except tooz.coordination.GroupNotCreated:
                create_grp_req = self._coordinator.create_group(group_id)
                try:
                    create_grp_req.get()
                except tooz.coordination.GroupAlreadyExist:
                    pass
                raise ErrorJoiningPartitioningGroup()
            except tooz.coordination.ToozError:
                LOG.exception(_LE('Error joining partitioning group %s,'
                                  ' re-trying'), group_id)
                raise ErrorJoiningPartitioningGroup()
            self._groups.add(group_id)

        return _inner()
Beispiel #6
0
def retry_on_db_error(func, retry=None):
    """Decorates the given function so that it retries on DB errors.

    Note that the decorator retries the function/method only on some
    of the DB errors that are considered to be worth retrying, like
    deadlocks and disconnections.

    :param func: Function to decorate.
    :param retry: a Retrying object
    :return: Decorated function.
    """
    if not retry:
        retry = tenacity.Retrying(
            retry=tenacity.retry_if_exception_type(_RETRY_ERRORS),
            stop=tenacity.stop_after_attempt(50),
            wait=tenacity.wait_incrementing(start=0, increment=0.1, max=2)
        )

    # The `assigned` arg should be empty as some of the default values are not
    # supported by simply initialized MagicMocks. The consequence may
    # be that the representation will contain the wrapper and not the
    # wrapped function.
    @functools.wraps(func, assigned=[])
    def decorate(*args, **kw):
        # Retrying library decorator might potentially run a decorated
        # function within a new thread so it's safer not to apply the
        # decorator directly to a target method/function because we can
        # lose an authentication context.
        # The solution is to create one more function and explicitly set
        # auth context before calling it (potentially in a new thread).
        auth_ctx = context.ctx() if context.has_ctx() else None

        return retry.call(_with_auth_context, auth_ctx, func, *args, **kw)

    return decorate
Beispiel #7
0
def retry_random_upon_exception(exc, delay=0.5, max_delay=5,
                                max_attempts=DEFAULT_MAX_ATTEMPTS):
    return tenacity.retry(reraise=True,
                          retry=tenacity.retry_if_exception_type(exc),
                          wait=tenacity.wait_random_exponential(
                              multiplier=delay, max=max_delay),
                          stop=tenacity.stop_after_attempt(max_attempts),
                          before=_log_before_retry, after=_log_after_retry)
Beispiel #8
0
def _safe_mongo_call(max_retries, retry_interval):
    return tenacity.retry(
        retry=tenacity.retry_if_exception_type(
            pymongo.errors.AutoReconnect),
        wait=tenacity.wait_fixed(retry_interval),
        stop=(tenacity.stop_after_attempt(max_retries) if max_retries >= 0
              else tenacity.stop_never)
    )
Beispiel #9
0
def retry_on_conflict(func):
    wrapper = tenacity.retry(
        stop=tenacity.stop_after_attempt(11),
        wait=tenacity.wait_random(max=0.002),
        retry=tenacity.retry_if_exception_type(exception.ConcurrentTransaction),
        reraise=True,
    )
    return wrapper(func)
    def send_request(self, socket, request):
        if hasattr(request, 'timeout'):
            _stop = tenacity.stop_after_delay(request.timeout)
        elif request.retry is not None and request.retry > 0:
            # no rpc_response_timeout option if notification
            _stop = tenacity.stop_after_attempt(request.retry)
        else:
            # well, now what?
            _stop = tenacity.stop_after_delay(60)

        @tenacity.retry(retry=tenacity.retry_if_exception_type(zmq.Again),
                        stop=_stop)
        def send_retrying():
            if request.msg_type in zmq_names.MULTISEND_TYPES:
                for _ in range(socket.connections_count()):
                    self.sender.send(socket, request)
            else:
                self.sender.send(socket, request)
        return send_retrying()
Beispiel #11
0
def assert_wait(func, exc_type=AssertionError):
  """Waits for function to succeed (not raise `exc_type`)."""
  return tenacity.Retrying(
      stop=tenacity.stop_after_delay(constants.ux.MAX_USER_WAIT_SECONDS),
      retry=tenacity.retry_if_exception_type((exc_type, tenacity.TryAgain)))(
      func)
Beispiel #12
0
        "w",
        compression=zipfile.ZIP_DEFLATED)


async def get_nintendont_releases(session: aiohttp.ClientSession):
    async with session.get(_NINTENDONT_RELEASES_URL) as response:
        try:
            response.raise_for_status()
            return await response.json()
        except aiohttp.ClientResponseError as e:
            raise RuntimeError("Unable to get Nintendont releases") from e


@tenacity.retry(
    stop=tenacity.stop_after_attempt(5),
    retry=tenacity.retry_if_exception_type(aiohttp.ClientConnectorError),
    wait=tenacity.wait_exponential(multiplier=1, min=4, max=30),
)
async def download_nintendont():
    headers = None
    if "GITHUB_TOKEN" in os.environ:
        headers = {"Authorization": f"Bearer {os.environ['GITHUB_TOKEN']}"}

    async with aiohttp.ClientSession(headers=headers) as session:
        print("Fetching list of Nintendont releases.")
        releases = await get_nintendont_releases(session)
        latest_release = releases[0]

        download_urls = [
            asset["browser_download_url"] for asset in latest_release["assets"]
            if asset["name"] == "boot.dol"
Beispiel #13
0
            plural=plural,
        )
    except ApiException as err:
        logging.error(
            "Failed to create %s %r: %s",
            custom_object.body["kind"],
            custom_object.name,
            err.reason,
        )
        raise


@retry(
    stop=stop_after_delay(KUBE_API_DELETE_TIMEOUT),
    wait=wait_fixed(KUBE_API_WAIT),
    retry=retry_if_exception_type(ResourceStillThereError),
)
def await_no_resources_found(list_resources: Callable, **kwargs):
    try:
        found = list_resources(**kwargs)
    except ApiException as err:
        if err.status == STATUS_NOT_FOUND:
            return
        raise
    if hasattr(found, "items"):
        found = found.items
    if found:
        raise ResourceStillThereError(
            f"Resource(s): {found} still found; retrying.")

Beispiel #14
0
    else:
        expect = 'Projects - OpenStack Dashboard'

    if expect not in response.text:
        msg = 'FAILURE code={} text="{}"'.format(response, response.text)
        logging.info("Yeah, wen't wrong: {}".format(msg))
        raise FailedAuth(msg)
    logging.info("Logged into okay")
    return client, response


# NOTE(ajkavanagh): it seems that apache2 doesn't start quickly enough
# for the test, and so it gets reset errors; repeat until either that
# stops or there is a failure
@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, min=5, max=10),
                retry=tenacity.retry_if_exception_type(
                    http.client.RemoteDisconnected),
                reraise=True)
def _do_request(request, cafile=None):
    """Open a webpage via urlopen.

    :param request: A urllib request object.
    :type request: object
    :returns: HTTPResponse object
    :rtype: object
    :raises: URLError on protocol errors
    """
    return urllib.request.urlopen(request, cafile=cafile)


class OpenStackDashboardBase():
    """Mixin for interacting with Horizon."""
def cleanup(bucket_cleanup):
    """Use this fixture to delete all unit testing resources
    regardless of of the failure or success of the test"""
    yield None
    iam = ck.aws.clients['iam']
    ec2 = ck.aws.clients['ec2']
    batch = ck.aws.clients['batch']
    ecs = ck.aws.clients['ecs']
    config_file = ck.config.get_config_file()
    section_suffix = ck.get_profile() + ' ' + ck.get_region()
    jq_section_name = 'job-queues ' + section_suffix
    ce_section_name = 'compute-environments ' + section_suffix
    jd_section_name = 'job-definitions ' + section_suffix
    roles_section_name = 'roles ' + ck.get_profile() + ' global'
    vpc_section_name = 'vpc ' + section_suffix
    sg_section_name = 'security-groups ' + section_suffix

    retry = tenacity.Retrying(wait=tenacity.wait_exponential(max=16),
                              stop=tenacity.stop_after_delay(120),
                              retry=tenacity.retry_if_exception_type(
                                  batch.exceptions.ClientException))

    # Clean up job queues from AWS
    # ----------------------------
    # Find all unit testing job queues
    response = batch.describe_job_queues()

    job_queues = [{
        'name': d['jobQueueName'],
        'arn': d['jobQueueArn'],
        'state': d['state'],
        'status': d['status']
    } for d in response.get('jobQueues')]

    while response.get('nextToken'):
        response = batch.describe_job_queues(
            nextToken=response.get('nextToken'))

        job_queues = job_queues + [{
            'name': d['jobQueueName'],
            'arn': d['jobQueueArn'],
            'state': d['state'],
            'status': d['status']
        } for d in response.get('jobQueues')]

    unit_test_JQs = list(
        filter(lambda d: UNIT_TEST_PREFIX in d['name'], job_queues))

    enabled = list(filter(lambda d: d['state'] == 'ENABLED', unit_test_JQs))

    for jq in enabled:
        ck.aws.wait_for_job_queue(name=jq['name'], max_wait_time=180)
        retry.call(batch.update_job_queue,
                   jobQueue=jq['arn'],
                   state='DISABLED')

    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)

    requires_deletion = list(
        filter(lambda d: d['status'] not in ['DELETED', 'DELETING'],
               unit_test_JQs))

    for jq in requires_deletion:
        ck.aws.wait_for_job_queue(name=jq['name'], max_wait_time=180)

        # Finally, delete the job queue
        retry.call(batch.delete_job_queue, jobQueue=jq['arn'])

        # Clean up config file
        try:
            config.remove_option(jq_section_name, jq['name'])
        except configparser.NoSectionError:
            pass

    with open(config_file, 'w') as f:
        config.write(f)

    # Clean up compute environments from AWS
    # --------------------------------------
    # Find all unit testing compute environments
    response = batch.describe_compute_environments()

    comp_envs = [{
        'name': d['computeEnvironmentName'],
        'arn': d['computeEnvironmentArn'],
        'state': d['state'],
        'status': d['status']
    } for d in response.get('computeEnvironments')]

    while response.get('nextToken'):
        response = batch.describe_compute_environments(
            nextToken=response.get('nextToken'))

        comp_envs = comp_envs + [{
            'name': d['computeEnvironmentName'],
            'arn': d['computeEnvironmentArn'],
            'state': d['state'],
            'status': d['status']
        } for d in response.get('computeEnvironments')]

    unit_test_CEs = list(
        filter(lambda d: UNIT_TEST_PREFIX in d['name'], comp_envs))

    enabled = list(filter(lambda d: d['state'] == 'ENABLED', unit_test_CEs))

    for ce in enabled:
        ck.aws.wait_for_compute_environment(arn=ce['arn'],
                                            name=ce['name'],
                                            log=False)

        # Set the compute environment state to 'DISABLED'
        retry.call(batch.update_compute_environment,
                   computeEnvironment=ce['arn'],
                   state='DISABLED')

    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)

    for ce in unit_test_CEs:
        # Then disassociate from any job queues
        response = batch.describe_job_queues()
        associated_queues = list(
            filter(
                lambda q: ce['arn'] in [
                    c['computeEnvironment']
                    for c in q['computeEnvironmentOrder']
                ], response.get('jobQueues')))

        for queue in associated_queues:
            arn = queue['jobQueueArn']
            name = queue['jobQueueName']

            # Disable submissions to the queue
            if queue['state'] == 'ENABLED':
                ck.aws.wait_for_job_queue(name=name,
                                          log=True,
                                          max_wait_time=180)
                retry.call(batch.update_job_queue,
                           jobQueue=arn,
                           state='DISABLED')

            # Delete the job queue
            if queue['status'] not in ['DELETED', 'DELETING']:
                ck.aws.wait_for_job_queue(name=name,
                                          log=True,
                                          max_wait_time=180)
                retry.call(batch.delete_job_queue, jobQueue=arn)

            # Clean up config file
            try:
                config.remove_option(jq_section_name, name)
            except configparser.NoSectionError:
                pass

    requires_deletion = list(
        filter(lambda d: d['status'] not in ['DELETED', 'DELETING'],
               unit_test_CEs))

    for ce in requires_deletion:
        # Now get the associated ECS cluster
        response = batch.describe_compute_environments(
            computeEnvironments=[ce['arn']])
        cluster_arn = response.get('computeEnvironments')[0]['ecsClusterArn']

        # Get container instances
        response = ecs.list_container_instances(cluster=cluster_arn, )
        instances = response.get('containerInstanceArns')

        for i in instances:
            ecs.deregister_container_instance(cluster=cluster_arn,
                                              containerInstance=i,
                                              force=True)

        retry_if_exception = tenacity.Retrying(
            wait=tenacity.wait_exponential(max=16),
            stop=tenacity.stop_after_delay(120),
            retry=tenacity.retry_if_exception_type())
        retry_if_exception.call(ecs.delete_cluster, cluster=cluster_arn)

        ck.aws.wait_for_compute_environment(arn=ce['arn'],
                                            name=ce['name'],
                                            log=False)

        retry.call(batch.delete_compute_environment,
                   computeEnvironment=ce['arn'])

        # Clean up config file
        try:
            config.remove_option(ce_section_name, ce['name'])
        except configparser.NoSectionError:
            pass

    with open(config_file, 'w') as f:
        config.write(f)

    # Clean up job definitions from AWS
    # ---------------------------------
    # Find all unit testing job definitions
    response = batch.describe_job_definitions(status='ACTIVE')

    jds = [{
        'name': d['jobDefinitionName'],
        'arn': d['jobDefinitionArn']
    } for d in response.get('jobDefinitions')]

    unit_test_jds = list(filter(lambda d: UNIT_TEST_PREFIX in d['name'], jds))

    while response.get('nextToken'):
        response = batch.describe_job_definitions(
            status='ACTIVE', nextToken=response.get('nextToken'))

        jds = [{
            'name': d['jobDefinitionName'],
            'arn': d['jobDefinitionArn']
        } for d in response.get('jobDefinitions')]

        unit_test_jds = unit_test_jds + list(
            filter(lambda d: UNIT_TEST_PREFIX in d['name'], jds))

    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)

        for jd in unit_test_jds:
            # Deregister the job definition
            retry.call(batch.deregister_job_definition,
                       jobDefinition=jd['arn'])

            # Clean up config file
            try:
                config.remove_option(jd_section_name, jd['name'])
            except configparser.NoSectionError:
                pass

        with open(config_file, 'w') as f:
            config.write(f)

    # Clean up security_groups from AWS
    # ---------------------------------
    # Find all unit test security groups
    ec2_retry = tenacity.Retrying(wait=tenacity.wait_exponential(max=16),
                                  stop=tenacity.stop_after_delay(60),
                                  retry=tenacity.retry_if_exception_type(
                                      ec2.exceptions.ClientError))

    response = ec2.describe_security_groups()
    sgs = [{
        'name': d['GroupName'],
        'id': d['GroupId']
    } for d in response.get('SecurityGroups')]
    unit_test_sgs = filter(lambda d: UNIT_TEST_PREFIX in d['name'], sgs)

    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)

        for sg in unit_test_sgs:
            # Delete role
            ec2_retry.call(ec2.delete_security_group, GroupId=sg['id'])

            # Clean up config file
            try:
                config.remove_option(sg_section_name, sg['id'])
            except configparser.NoSectionError:
                pass

        with open(config_file, 'w') as f:
            config.write(f)

    # Clean up VPCs from AWS
    # ----------------------
    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)

        # Find all VPCs with a Name tag key
        response = ec2.describe_vpcs(Filters=[{
            'Name': 'tag-key',
            'Values': ['Name']
        }])

        for vpc in response.get('Vpcs'):
            # Test if the unit-test prefix is in the name
            if UNIT_TEST_PREFIX in [
                    d for d in vpc['Tags'] if d['Key'] == 'Name'
            ][0]['Value']:
                # Retrieve and delete subnets
                response = ec2.describe_subnets(
                    Filters=[{
                        'Name': 'vpc-id',
                        'Values': [vpc['VpcId']]
                    }])

                subnets = [d['SubnetId'] for d in response.get('Subnets')]

                for subnet_id in subnets:
                    ec2_retry.call(ec2.delete_subnet, SubnetId=subnet_id)

                response = ec2.describe_network_acls(
                    Filters=[{
                        'Name': 'vpc-id',
                        'Values': [vpc['VpcId']]
                    }, {
                        'Name': 'default',
                        'Values': ['false']
                    }])

                network_acl_ids = [
                    n['NetworkAclId'] for n in response.get('NetworkAcls')
                ]

                # Delete the network ACL
                for net_id in network_acl_ids:
                    ec2_retry.call(ec2.delete_network_acl, NetworkAclId=net_id)

                response = ec2.describe_route_tables(
                    Filters=[{
                        'Name': 'vpc-id',
                        'Values': [vpc['VpcId']]
                    }, {
                        'Name': 'association.main',
                        'Values': ['false']
                    }])

                route_table_ids = [
                    rt['RouteTableId'] for rt in response.get('RouteTables')
                ]

                # Delete the route table
                for rt_id in route_table_ids:
                    ec2_retry.call(ec2.delete_route_table, RouteTableId=rt_id)

                # Detach and delete the internet gateway
                response = ec2.describe_internet_gateways(
                    Filters=[{
                        'Name': 'attachment.vpc-id',
                        'Values': [vpc['VpcId']]
                    }])

                gateway_ids = [
                    g['InternetGatewayId']
                    for g in response.get('InternetGateways')
                ]

                for gid in gateway_ids:
                    ec2_retry.call(ec2.detach_internet_gateway,
                                   InternetGatewayId=gid,
                                   VpcId=vpc['VpcId'])
                    ec2_retry.call(ec2.delete_internet_gateway,
                                   InternetGatewayId=gid)

                # delete the VPC
                ec2_retry.call(ec2.delete_vpc, VpcId=vpc['VpcId'])

                # Clean up config file
                try:
                    config.remove_option(vpc_section_name, vpc['VpcId'])
                except configparser.NoSectionError:
                    pass

        with open(config_file, 'w') as f:
            config.write(f)

    # Clean up roles from AWS
    # -----------------------
    # Find all unit test roles
    response = iam.list_roles()
    role_names = [d['RoleName'] for d in response.get('Roles')]
    unit_test_roles = filter(lambda n: UNIT_TEST_PREFIX in n, role_names)

    for role_name in unit_test_roles:
        # Remove instance profiles
        response = iam.list_instance_profiles_for_role(RoleName=role_name)
        for ip in response.get('InstanceProfiles'):
            iam.remove_role_from_instance_profile(
                InstanceProfileName=ip['InstanceProfileName'],
                RoleName=role_name)
            iam.delete_instance_profile(
                InstanceProfileName=ip['InstanceProfileName'])

        # Detach policies from role
        response = iam.list_attached_role_policies(RoleName=role_name)
        for policy in response.get('AttachedPolicies'):
            iam.detach_role_policy(RoleName=role_name,
                                   PolicyArn=policy['PolicyArn'])

        # Delete role
        iam.delete_role(RoleName=role_name)

    # Clean up config file
    config = configparser.ConfigParser()
    with ck.config.rlock:
        config.read(config_file)
        for role_name in config.options(roles_section_name):
            if UNIT_TEST_PREFIX in role_name:
                config.remove_option(roles_section_name, role_name)
        with open(config_file, 'w') as f:
            config.write(f)
Beispiel #16
0
class LBAASv2Test(test_utils.OpenStackBaseTest):
    """LBaaSv2 service tests."""
    @classmethod
    def setUpClass(cls):
        """Run class setup for running LBaaSv2 service tests."""
        super(LBAASv2Test, cls).setUpClass()
        cls.keystone_client = openstack_utils.get_keystone_session_client(
            cls.keystone_session)
        cls.neutron_client = openstack_utils.get_neutron_session_client(
            cls.keystone_session)
        cls.octavia_client = openstack_utils.get_octavia_session_client(
            cls.keystone_session)
        cls.RESOURCE_PREFIX = 'zaza-octavia'

        # NOTE(fnordahl): in the event of a test failure we do not want to run
        # tear down code as it will make debugging a problem virtually
        # impossible.  To alleviate each test method will set the
        # `run_tearDown` instance variable at the end which will let us run
        # tear down only when there were no failure.
        cls.run_tearDown = False
        # List of load balancers created by this test
        cls.loadbalancers = []
        # List of floating IPs created by this test
        cls.fips = []

    def resource_cleanup(self):
        """Remove resources created during test execution."""
        for lb in self.loadbalancers:
            self.octavia_client.load_balancer_delete(lb['id'], cascade=True)
            try:
                self.wait_for_lb_resource(
                    self.octavia_client.load_balancer_show,
                    lb['id'],
                    provisioning_status='DELETED')
            except osc_lib.exceptions.NotFound:
                pass
        for fip in self.fips:
            self.neutron_client.delete_floatingip(fip)
        # we run the parent resource_cleanup last as it will remove instances
        # referenced as members in the above cleaned up load balancers
        super(LBAASv2Test, self).resource_cleanup()

    @staticmethod
    @tenacity.retry(retry=tenacity.retry_if_exception_type(AssertionError),
                    wait=tenacity.wait_fixed(1),
                    reraise=True,
                    stop=tenacity.stop_after_delay(900))
    def wait_for_lb_resource(octavia_show_func,
                             resource_id,
                             provisioning_status=None,
                             operating_status=None):
        """Wait for loadbalancer resource to reach expected status."""
        provisioning_status = provisioning_status or 'ACTIVE'
        resp = octavia_show_func(resource_id)
        logging.info(resp['provisioning_status'])
        assert resp['provisioning_status'] == provisioning_status, (
            'load balancer resource has not reached '
            'expected provisioning status: {}'.format(resp))
        if operating_status:
            logging.info(resp['operating_status'])
            assert resp['operating_status'] == operating_status, (
                'load balancer resource has not reached '
                'expected operating status: {}'.format(resp))

        return resp

    @staticmethod
    def get_lb_providers(octavia_client):
        """Retrieve loadbalancer providers.

        :param octavia_client: Octavia client object
        :type octavia_client: OctaviaAPI
        :returns: Dictionary with provider information, name as keys
        :rtype: Dict[str,Dict[str,str]]
        """
        providers = {
            provider['name']: provider
            for provider in octavia_client.provider_list().get(
                'providers', [])
            if provider['name'] != 'octavia'  # alias for `amphora`, skip
        }
        return providers

    def _create_lb_resources(self, octavia_client, provider, vip_subnet_id,
                             member_subnet_id, payload_ips):
        # The `amphora` provider is required for load balancing based on
        # higher layer protocols
        if provider == 'amphora':
            protocol = 'HTTP'
            algorithm = 'ROUND_ROBIN'
            monitor = True
        else:
            protocol = 'TCP'
            algorithm = 'SOURCE_IP_PORT'
            monitor = False

        result = octavia_client.load_balancer_create(
            json={
                'loadbalancer': {
                    'description': 'Created by Zaza',
                    'admin_state_up': True,
                    'vip_subnet_id': vip_subnet_id,
                    'name': 'zaza-{}-0'.format(provider),
                    'provider': provider,
                }
            })
        lb = result['loadbalancer']
        lb_id = lb['id']

        logging.info('Awaiting loadbalancer to reach provisioning_status '
                     '"ACTIVE"')
        resp = self.wait_for_lb_resource(octavia_client.load_balancer_show,
                                         lb_id)
        logging.info(resp)

        result = octavia_client.listener_create(
            json={
                'listener': {
                    'loadbalancer_id': lb_id,
                    'name': 'listener1',
                    'protocol': protocol,
                    'protocol_port': 80
                },
            })
        listener_id = result['listener']['id']
        logging.info('Awaiting listener to reach provisioning_status '
                     '"ACTIVE"')
        resp = self.wait_for_lb_resource(octavia_client.listener_show,
                                         listener_id)
        logging.info(resp)

        result = octavia_client.pool_create(
            json={
                'pool': {
                    'listener_id': listener_id,
                    'name': 'pool1',
                    'lb_algorithm': algorithm,
                    'protocol': protocol,
                },
            })
        pool_id = result['pool']['id']
        logging.info('Awaiting pool to reach provisioning_status ' '"ACTIVE"')
        resp = self.wait_for_lb_resource(octavia_client.pool_show, pool_id)
        logging.info(resp)

        if monitor:
            result = octavia_client.health_monitor_create(
                json={
                    'healthmonitor': {
                        'pool_id': pool_id,
                        'delay': 5,
                        'max_retries': 4,
                        'timeout': 10,
                        'type': 'HTTP',
                        'url_path': '/',
                    },
                })
            healthmonitor_id = result['healthmonitor']['id']
            logging.info('Awaiting healthmonitor to reach provisioning_status '
                         '"ACTIVE"')
            resp = self.wait_for_lb_resource(
                octavia_client.health_monitor_show, healthmonitor_id)
            logging.info(resp)

        for ip in payload_ips:
            result = octavia_client.member_create(pool_id=pool_id,
                                                  json={
                                                      'member': {
                                                          'subnet_id':
                                                          member_subnet_id,
                                                          'address': ip,
                                                          'protocol_port': 80,
                                                      },
                                                  })
            member_id = result['member']['id']
            logging.info('Awaiting member to reach provisioning_status '
                         '"ACTIVE"')
            resp = self.wait_for_lb_resource(
                lambda x: octavia_client.member_show(pool_id=pool_id,
                                                     member_id=x),
                member_id,
                operating_status='ONLINE' if monitor else '')
            logging.info(resp)
        return lb

    @staticmethod
    @tenacity.retry(wait=tenacity.wait_fixed(1),
                    reraise=True,
                    stop=tenacity.stop_after_delay(900))
    def _get_payload(ip):
        return subprocess.check_output(
            ['wget', '-O', '-', 'http://{}/'.format(ip)],
            universal_newlines=True)

    def test_create_loadbalancer(self):
        """Create load balancer."""
        # Prepare payload instances
        # First we allow communication to port 80 by adding a security group
        # rule
        project_id = openstack_utils.get_project_id(self.keystone_client,
                                                    'admin',
                                                    domain_name='admin_domain')
        openstack_utils.add_neutron_secgroup_rules(self.neutron_client,
                                                   project_id,
                                                   [{
                                                       'protocol': 'tcp',
                                                       'port_range_min': '80',
                                                       'port_range_max': '80',
                                                       'direction': 'ingress'
                                                   }])

        # Then we request two Ubuntu instances with the Apache web server
        # installed
        instance_1, instance_2 = self.launch_guests(
            userdata='#cloud-config\npackages:\n - apache2\n')

        # Get IP of the prepared payload instances
        payload_ips = []
        for server in (instance_1, instance_2):
            payload_ips.append(server.networks['private'][0])
        self.assertTrue(len(payload_ips) > 0)

        resp = self.neutron_client.list_networks(name='private')
        subnet_id = resp['networks'][0]['subnets'][0]
        if openstack_utils.dvr_enabled():
            resp = self.neutron_client.list_networks(
                name='private_lb_fip_network')
            vip_subnet_id = resp['networks'][0]['subnets'][0]
        else:
            vip_subnet_id = subnet_id
        for provider in self.get_lb_providers(self.octavia_client).keys():
            logging.info(
                'Creating loadbalancer with provider {}'.format(provider))
            lb = self._create_lb_resources(self.octavia_client, provider,
                                           vip_subnet_id, subnet_id,
                                           payload_ips)
            self.loadbalancers.append(lb)

            lb_fp = openstack_utils.create_floating_ip(
                self.neutron_client, 'ext_net', port={'id': lb['vip_port_id']})

            snippet = 'This is the default welcome page'
            assert snippet in self._get_payload(lb_fp['floating_ip_address'])
            logging.info('Found "{}" in page retrieved through load balancer '
                         ' (provider="{}") at "http://{}/"'.format(
                             snippet, provider, lb_fp['floating_ip_address']))

        # If we get here, it means the tests passed
        self.run_resource_cleanup = True
Beispiel #17
0
    """
    return hookenv.leader_get(CHARM_ACCESS_ROLE_ID)


def get_client(url=None):
    """Provide a client for talking to the vault api

    :returns: vault client
    :rtype: hvac.Client
    """
    return hvac.Client(url=url or get_api_url())


@tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60),
                stop=tenacity.stop_after_attempt(8),
                retry=tenacity.retry_if_exception_type(
                    hvac.exceptions.InternalServerError),
                reraise=True)
def get_local_client():
    """Provide a client for talking to the vault api

    :returns: vault client
    :rtype: hvac.Client
    """
    client = get_client(url=VAULT_LOCALHOST_URL)
    app_role_id = get_local_charm_access_role_id()
    if not app_role_id:
        hookenv.log('Could not retrieve app_role_id', level=hookenv.DEBUG)
        raise VaultNotReady("Cannot initialise local client")
    client = hvac.Client(url=VAULT_LOCALHOST_URL)
    client.auth_approle(app_role_id)
    return client
Beispiel #18
0
class EntityManager(object):
    __metaclass__ = ABCMeta

    @abstractmethod
    def __init__(self, client=None, project_id=None):
        """Mother Class to Initiate
        Args:
            self : Authorized BigQuery API service instance.
            client (:obj:`int`, optional):
            project_id (str):
        Returns:
        """
        self.__client = client
        self.__project_id = project_id
        self._time_out_seconds = 3600

    @staticmethod
    def _execute_methods(request):
        """Method using to execute Build the calling API's Methods in Google Cloud Discovery
        Args:
            self : Authorized BigQuery API service instance.
            request:
        Returns:
        """
        return request.execute()

    @tn.retry(wait=tn.wait_random_exponential(multiplier=1, max=60),
              retry=tn.retry_if_exception_type(),
              stop=tn.stop_after_attempt(5))
    def _call_methods(self, http_verb, url, **kwargs):
        """Method using to Call  API's Methods in Google Cloud Discovery
        Args:
            self : Authorized Cloud Storage API service instance.
            http_verb:
            url:
            **kwargs:
        Returns:
        """
        return self.__client.api_request(http_verb,
                                         url,
                                         api_version="v2",
                                         **kwargs)

    def _check_results(self, job_id, job_type, dataset_id, table_id):
        """check_job_results : methods to check if execution jobs is good gone
        Args:
            self : Authorized BigQuery API service instance.
            job_id (str):
            job_type (object): Type of Job : Dataset Jobs, Table Jobs or General Jobs
            dataset_id (str):
            table_id (str):
        Returns:
        """
        request = self.__client.jobs().get(projectId=self.__project_id,
                                           jobId=job_id)
        response = self._execute_methods(request)
        start_time = time.time()
        duration = 0
        while response["status"]["state"] != "DONE" and (
                duration <= self._time_out_seconds):
            time.sleep(1)
            response = self.__client.jobs().get(projectId=self.__project_id,
                                                jobId=job_id)
            response = self._execute_methods(response)
            duration = time.time() - start_time

        if duration > self._time_out_seconds and response["status"][
                "state"] != "DONE":
            raise Exception("Time out %dsec exceeded for this current job" %
                            self._time_out_seconds)

        try:
            print(response["status"]["errorResult"])
            print("\tERROR %s: %s.%s " % (job_type, dataset_id, table_id))
            return response["status"]["errorResult"]
        except KeyError:
            print("\tSUCCESS %s: %s.%s " % (job_type, dataset_id, table_id))
            return "\t {statistics}".format(statistics=response["statistics"])
Beispiel #19
0
class Novel:
    id: int
    title: str
    author: str
    library: str
    cover: bytes
    status: str
    statusCode: int
    totalWords: str
    briefIntroduction: str
    copyright: bool
    volumeList: List[dict]

    @retry(stop=stop_after_attempt(3),
           retry=retry_if_exception_type(ConnectionError))
    @retry(stop=stop_after_attempt(3),
           retry=retry_if_exception_type(ProtocolError))
    @retry(stop=stop_after_attempt(3), retry=retry_if_exception_type(CE))
    @retry(stop=stop_after_attempt(3),
           retry=retry_if_exception_type(TimeoutError))
    def __init__(self, articleid: int):
        main_page_request = requests.get(
            f"http://www.wenku8.net/book/{articleid}.htm",
            headers=headers,
            cookies=SelfUser.cookies)
        main_page_request.encoding = "gbk"
        main_page = BeautifulSoup(main_page_request.text,
                                  features="html.parser")
        main_web_content = main_page.text
        self.id = articleid
        self.title = fast_regex(r"槽([\s\S]*)\[推", main_web_content).lstrip()
        self.statusCode = int(str(articleid)[0:1]) if len(str(
            articleid)) >= 4 else 0  # 这个statusCode实际上就是把文库的小说按每1000本进1的方式搞出来的
        assert bool(self.title)
        self.author = fast_regex(r"小说作者:(.*)", main_web_content)
        self.library = fast_regex(r"文库分类:(.*)", main_web_content)
        self.status = fast_regex(r"文章状态:(.*)", main_web_content)
        self.totalWords = fast_regex(r"全文长度:(.*)字", main_web_content)
        self.copyright = True if main_web_content.find("版权问题") == -1 else False
        self.briefIntroduction = fast_regex("内容简介:([\s\S]*)阅读\n小说目录", main_web_content).lstrip().rstrip().replace(' ',
                                                                                                                  '') \
            .replace("\n\n", "").replace("	", "")
        self.cover = request(
            f"https://img.wenku8.com/image/{self.statusCode}/{self.id}/{self.id}s.jpg",
            SelfUser.cookies).content
        read_page_request = requests.get(
            f"http://www.wenku8.net/novel/{self.statusCode}/{articleid}/index.htm",
            cookies=SelfUser.cookies,
            headers=headers)
        read_page_request.encoding = "gbk"
        read_page = BeautifulSoup(read_page_request.text,
                                  features="html.parser")
        tags = read_page.find_all("td")
        volumeList = []
        for i in tags:
            if i["class"][0] == "vcss":
                volumeList.append({"name": str(i.string), "chapters": []})
            elif i["class"][0] == "ccss" and i.string != "\xa0":
                volumeList[len(volumeList) - 1]["chapters"].append({
                    "name":
                    str(i.string),
                    "cid":
                    int(i.a["href"].replace(".htm", ""))
                })
        self.volumeList = volumeList

    def to_dict(self) -> dict:
        return {
            "id": self.id,
            "title": self.title,
            "author": self.author,
            "library": self.library,
            "cover": self.cover,
            "status": self.status,
            "statusCode": self.statusCode,
            "totalWords": self.totalWords,
            "briefIntroduction": self.briefIntroduction,
            "copyright": self.copyright,
            "volumeList": self.volumeList
        }
Beispiel #20
0
class NuAPI:

    def __init__(self, apiKey, verbose):

        self.__apiKey = apiKey
        self.__apiHeaders = {"X-NUID-API-KEY": self.__apiKey}  # set our API key in the header as required.
        self.__v = verbose
        self.__api = "https://nebulous.nuid.io"
        self.__secrets = None
        self.__evtLog = WindowsEventWriter()
        # API route: https://nebulous.nuid.io/api/search/hash/NTLMSHA2/<sha256(ntlm)>
        self.__route_url = "{}/api/search/hash/NTLMSHA2".format(self.__api)
        # API route: https://nebulous.nuid.io/api/search/kanon/<query>
        self.__anon_url = "{}/api/search/kanon/NTLMSHA2".format(self.__api)

    @retry(retry=retry_if_exception_type(APIRetryException), stop=stop_after_attempt(10), wait=wait_random(10, 30))
    def anon_api_helper(self, user):
        """
        Helper function for check_hashes(). Pass over an iterable of the users' keys in __secrets and feed it to this.
        :param user: Pass in the username of the user to look up. This should be a key of __secrets we can use to lookup
        the users information.
        :return: None.
        :raises: APIRetryException @ if the request fails for whatever reason.
        """
        # Broilerplate the data for the Windows Event Log. Need to do this to redact the NT hashes from the log file.
        audit_data = {
            "accoutName": user,
            "Status": self.__secrets[user].get("Status"),
            "Check": {
                "LastCheck": None,
                "Which": [],
                "Compromised": None
            }
        }
        h = sha256()

        # Set the timestamps
        now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        self.__secrets[user]['Check']['LastCheck'] = now
        audit_data['Check']['LastCheck'] = now

        if self.__v:
            logging.info(logger.Fore.LIGHTBLACK_EX + "Checking hash for account: {}".format(user,
                                                                                            logger.Fore.RESET))
        try:
            # First we check the accounts active NTLM hash.
            nthash = self.__secrets[user]['NTLM_Hash']
            h.update(nthash)  # Calculate SHA256(NTLM)
            digest = h.hexdigest() # Save this digest for comparison later.
            url = "{}/{}".format(self.__anon_url, digest[0:5])
            resp = requests.get(url, headers=self.__apiHeaders, timeout=60)

        except (requests.HTTPError, requests.ConnectTimeout, requests.ConnectionError, ReadTimeout) as err:
            logging.warning("Had trouble connecting to the API. Will retry this request. ERR: {}".format(err))
            raise APIRetryException
        except UnicodeDecodeError as unierr:
            logging.error("Malformated request URL. Did the hash decode correctly? Exiting. {}".format(unierr))
            exit(1)

        if resp.status_code == 200:

            for hit in resp.json()['data'].get('matches'):

                if hit == digest:
                    logging.warning(
                        logger.Fore.LIGHTRED_EX + "CURRENT PASSWORD for account: {} is COMPROMISED!".format(user) +
                        logger.Fore.RESET)
                    self.__secrets[user]["Check"]["Compromised"] = True
                    self.__secrets[user]["Check"]["Which"].append("NTLM_Hash")
                    break
            if self.__secrets[user]["Check"]["Compromised"] is None:
                self.__secrets[user]["Check"]["Compromised"] = False
                if self.__v:
                    logging.info(logger.Fore.LIGHTBLACK_EX + "Hash for account:"
                                                             " {} is OK.{}".format(user, logger.Fore.RESET))

        elif resp.status_code == 404:
            logging.error("API error. Retrying the request. Code: {}".format(resp.status_code))
            raise APIRetryException
        elif resp.status_code == 429:
            logging.warning("We are being rate limited. Retrying request and throttling connection.")
            raise APIRetryException
        elif resp.status_code == 500:
            logging.error("API error. Retrying the request. Code: {}".format(resp.status_code))
            raise APIRetryException

        if self.__secrets[user].get("History"):
            # Need to split this one up so we can retry each individual request. Should prevent log spam from retrying.
            self.check_anon_history(user)
            audit_data['Check']['Which'] = self.__secrets[user]['Check']['Which']

        # Setup the data to be submitted to the audit log.
        audit_data['Check']['Compromised'] = self.__secrets[user]['Check']['Compromised']
        audit_data['Check']['Which'] = self.__secrets[user]['Check']['Which']
        # Write our results and redacted data to the Windows event log.
        self.__evtLog.write_event(user, audit_data['Check']['Compromised'], audit_data)

        return 0

    @retry(retry=retry_if_exception_type(APIRetryException), stop=stop_after_attempt(10), wait=wait_random(10, 30))
    def check_anon_history(self, user):
        """
        We split up check_history to its own function. This way if we get a failure when checking an old hash, it does
        not restart the entire process. However, if we get a failure, it will restart all of the history over again.
        :param user: Pass in the username of the user to look up. This should be a key of __secrets we can use to lookup
        the users information.
        :return: None.
        :raises: APIRetryException @ if the request fails for whatever reason.
        """
        # TODO: Get better exception handling so we don't retry all histories if one fails or gets rate limited.
        for nthistory in self.__secrets[user]["History"]:

            h = sha256()
            ntlm_hist = self.__secrets[user]["History"][nthistory]
            h.update(ntlm_hist)
            digest = h.hexdigest()

            if self.__v:

                logging.info(logger.Fore.LIGHTBLACK_EX + "Checking {} for user: {}...".format(nthistory, user) +
                             logger.Fore.RESET)

            try:
                url = "{}/{}".format(self.__anon_url, digest[0:5])
                resp = requests.get(url, headers=self.__apiHeaders, timeout=60)
            except (requests.HTTPError, requests.ConnectTimeout, requests.ConnectionError, ReadTimeout) as err:
                logging.warning("Had trouble connecting to the API. Will retry this request. ERR: {}".format(err))
                raise APIRetryException

            if resp.status_code == 200:
                for hit in resp.json()['data'].get('matches'):

                    if hit == digest:
                        logging.warning(logger.Fore.RED + "Historic hash {} for account: {} is INACTIVE and "
                                                          "COMPROMISED!".format(nthistory, user) + logger.Fore.RESET)
                        self.__secrets[user]["Check"]["Compromised"] = True
                        self.__secrets[user]["Check"]["Which"].append(nthistory)
                        break

                if self.__secrets[user]["Check"]["Compromised"] is None:
                    self.__secrets[user]["Check"]["Compromised"] = False
                    if self.__v:
                        logging.info(logger.Fore.LIGHTBLACK_EX + "Historic hash for account:"
                                                                 " {} is OK.{}".format(user, logger.Fore.RESET))

            elif resp.status_code == 404:
                logging.error("API error. Retrying the request. Code: {}".format(resp.status_code))
                raise APIRetryException
            elif resp.status_code == 429:
                logging.warning("We are being rate limited. Retrying request and throttling connection.")
                raise APIRetryException
            elif resp.status_code == 500:
                logging.error("API error. Retrying the request. Code: {}".format(resp.status_code))
                raise APIRetryException

    @retry(retry=retry_if_exception_type(APIRetryException), stop=stop_after_attempt(10), wait=wait_random(10, 30))
    def api_helper(self, user):
        """
        Helper function for check_hashes(). Pass over an iterable of the users' keys in __secrets and feed it to this.
        :param user: Pass in the username of the user to look up. This should be a key of __secrets we can use to lookup
        the users information.
        :return: None.
        :raises: APIRetryException @ if the request fails for whatever reason.
        """
        # Broilerplate the data for the Windows Event Log. Need to do this to redact the NT hashes from the log file.
        audit_data = {
            "accoutName": user,
            "Status": self.__secrets[user].get("Status"),
            "Check": {
                "LastCheck": None,
                "Which": [],
                "Compromised": None
            }
        }
        h = sha256()

        # Set the timestamps
        now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        self.__secrets[user]['Check']['LastCheck'] = now
        audit_data['Check']['LastCheck'] = now

        if self.__v:
            logging.info(logger.Fore.LIGHTBLACK_EX + "Checking hash for account: {}".format(user,
                                                                                            logger.Fore.RESET))
        try:
            # First we check the accounts active NTLM hash.
            nthash = self.__secrets[user]['NTLM_Hash']
            h.update(nthash)  # Calculate SHA256(NTLM)
            url = "{}/{}".format(self.__route_url, h.hexdigest())
            resp = requests.get(url, headers=self.__apiHeaders, timeout=60)

        except (requests.HTTPError, requests.ConnectTimeout, requests.ConnectionError, ReadTimeout) as err:
            logging.warning("Had trouble connecting to the API. Will retry this request. ERR: {}".format(err))
            raise APIRetryException
        except UnicodeDecodeError as unierr:
            logging.error("Malformated request URL. Did the hash decode correctly? Exiting. {}".format(unierr))
            exit(1)

        if resp.status_code == 200:
            logging.warning(logger.Fore.LIGHTRED_EX + "CURRENT PASSWORD for account: {} is COMPROMISED!".format(user) +
                            logger.Fore.RESET)

            self.__secrets[user]["Check"]["Compromised"] = True
            self.__secrets[user]["Check"]["Which"].append("NTLM_Hash")

        elif resp.status_code == 404:
            if self.__v:
                logging.info(logger.Fore.LIGHTBLACK_EX + "Hash for account:"
                                                         " {} is OK.{}".format(user,  logger.Fore.RESET))
            self.__secrets[user]["Check"]["Compromised"] = False
        elif resp.status_code == 429:
            logging.warning("We are being rate limited. Retrying request and throttling connection.")
            raise APIRetryException
        elif resp.status_code == 500:
            logging.error("API error. Retrying the request. Code: {}".format(resp.status_code))
            raise APIRetryException

        if self.__secrets[user].get("History"):
            # Need to split this one up so we can retry each individual request. Should prevent log spam from retrying.
            self.check_history(user)
            audit_data['Check']['Which'] = self.__secrets[user]['Check']['Which']

        # Setup the data to be submitted to the audit log.
        audit_data['Check']['Compromised'] = self.__secrets[user]['Check']['Compromised']
        audit_data['Check']['Which'] = self.__secrets[user]['Check']['Which']
        # Write our results and redacted data to the Windows event log.
        self.__evtLog.write_event(user, audit_data['Check']['Compromised'], audit_data)

        return 0

    @retry(retry=retry_if_exception_type(APIRetryException), stop=stop_after_attempt(10), wait=wait_random(10, 30))
    def check_history(self, user):
        """
        We split up check_history to its own function. This way if we get a failure when checking an old hash, it does
        not restart the entire process. However, if we get a failure, it will restart all of the history over again.
        :param user: Pass in the username of the user to look up. This should be a key of __secrets we can use to lookup
        the users information.
        :return: None.
        :raises: APIRetryException @ if the request fails for whatever reason.
        """
        # TODO: Get better exception handling so we don't retry all histories if one fails or gets rate limited.
        for nthistory in self.__secrets[user]["History"]:

            h = sha256()
            ntlm_hist = self.__secrets[user]["History"][nthistory]
            h.update(ntlm_hist)

            if self.__v:

                logging.info(logger.Fore.LIGHTBLACK_EX + "Checking {} for user: {}...".format(nthistory, user) +
                             logger.Fore.RESET)

            try:
                url = "{}/{}".format(self.__route_url, h.hexdigest())
                resp = requests.get(url, headers=self.__apiHeaders, timeout=60)
            except (requests.HTTPError, requests.ConnectTimeout, requests.ConnectionError, ReadTimeout) as err:
                logging.warning("Had trouble connecting to the API. Will retry this request. ERR: {}".format(err))
                raise APIRetryException
            if resp.status_code == 200:
                logging.warning(logger.Fore.RED + "Historic Hash {} for account: {} is INACTIVE and "
                                                  "COMPROMISED!".format(nthistory, user) + logger.Fore.RESET)
                self.__secrets[user]["Check"]["Compromised"] = True
                self.__secrets[user]["Check"]["Which"].append(nthistory)

            elif resp.status_code == 404:
                if self.__v:
                    logging.info(
                        logger.Fore.LIGHTBLACK_EX + "Historic hash {} for account:"
                                                    " {} is OK. {}".format(nthistory, user, logger.Fore.RESET))

            elif resp.status_code == 429:
                logging.warning("We are being rate limited. Retrying request and throttling connection.")
                raise APIRetryException
            elif resp.status_code == 500:
                logging.error("API error. Retrying the request. Code: {}".format(resp.status_code))
                raise APIRetryException

    def check_hashes(self, secrets, kanon):
        """
        Feed this a secrets_dict from secretsdump.py and we can check against NuID's api if the hash exists.
        :param secrets: secrets_dict object from secretsdump.NTDSHashes().
        :return: Return the updated secrets_dict, after all checks have completed.
        """
        self.__secrets = secrets
        # Use a minumum of 16 threads for network bound I/O. More if we have them.
        if cpu_count() < 16:
            procs = 16
        else:
            procs = cpu_count()
        p = Pool(processes=procs)

        if kanon:
            logging.info("Checking a total of {} accounts for compromised credentials with k-Anon".format(len(self.__secrets)))
            p.imap_unordered(self.anon_api_helper, self.__secrets)
        else:
            logging.info("Checking a total of {} accounts for compromised credentials.".format(len(self.__secrets)))
            p.imap_unordered(self.api_helper, self.__secrets)

        p.close()
        p.join()
        logging.info("Done checking hashes.")
        return self.__secrets
Beispiel #21
0
    def render_text(self, output: str) -> str:
        """Exports the info to string"""
        console = AirflowConsole(record=True)
        with console.capture():
            self.show(output=output, console=console)
        return console.export_text()


class FileIoException(Exception):
    """Raises when error happens in FileIo.io integration"""


@tenacity.retry(
    stop=tenacity.stop_after_attempt(5),
    wait=tenacity.wait_exponential(multiplier=1, max=10),
    retry=tenacity.retry_if_exception_type(FileIoException),
    before=tenacity.before_log(log, logging.DEBUG),
    after=tenacity.after_log(log, logging.DEBUG),
)
def _upload_text_to_fileio(content):
    """Upload text file to File.io service and return lnk"""
    resp = requests.post("https://file.io", data={"text": content})
    if not resp.ok:
        print(resp.json())
        raise FileIoException("Failed to send report to file.io service.")
    try:
        return resp.json()["link"]
    except ValueError as e:
        log.debug(e)
        raise FileIoException("Failed to send report to file.io service.")
Beispiel #22
0
import re
from typing import Any, Iterable

import psycopg2
import psycopg2.extensions
from tenacity import before_log, retry, retry_if_exception_type, stop_after_delay, wait_random_exponential

from connstr import ConnectionString

log = logging.getLogger(__name__)

PGConnection = psycopg2.extensions.connection


@retry(
    retry=retry_if_exception_type(psycopg2.OperationalError),
    stop=stop_after_delay(300),
    wait=wait_random_exponential(multiplier=1, max=15),
    reraise=True,
    before=before_log(log, logging.DEBUG),
)
def connect(conn_str: ConnectionString) -> PGConnection:
    con = psycopg2.connect(str(conn_str))
    con.autocommit = True
    return con


def ensure_user(con: PGConnection,
                username: str,
                password: str,
                superuser: bool = False,
Beispiel #23
0
    return size, count


@app.task(bind=True, event_type=50710)
def DeleteFiles(self, path):
    path, = self.parse_params(path)
    delete_path(path)

    msg = "Deleted %s" % path
    self.create_success_event(msg)


@app.task(bind=True)
@retry(reraise=True,
       retry=retry_if_exception_type(NoSpaceLeftError),
       wait=wait_exponential(max=60),
       stop=stop_after_delay(600))
def CopyDir(self,
            src,
            dst,
            remote_credentials=None,
            block_size=DEFAULT_BLOCK_SIZE):
    src, dst = self.parse_params(src, dst)
    requests_session = None
    if remote_credentials:
        user, passw = decrypt_remote_credentials(remote_credentials)
        requests_session = requests.Session()
        requests_session.verify = settings.REQUESTS_VERIFY
        requests_session.auth = (user, passw)
Beispiel #24
0
    body: str,
) -> SHA:
    url = make_file_content_url(doc)
    data = {
        "message": "Interactive edit",
        "content": base64.b64encode(body.encode()).decode(),
        "sha": old_file_sha,
        "branch": new_branch,
    }
    response = session.put(url, json=data)
    response.raise_for_status()
    return response.json()["commit"]["sha"]


@tenacity.retry(
    retry=tenacity.retry_if_exception_type(requests.exceptions.HTTPError),
    wait=tenacity.wait_fixed(10),
    stop=tenacity.stop_after_attempt(3),
)
def _create_branch(session, doc, name, parent_sha):
    url = doc.api_url.child("git").child("refs")
    r = session.post(url,
                     json={
                         "ref": f"refs/heads/{name}",
                         "sha": parent_sha
                     })
    r.raise_for_status()


def create_branch(session: requests.Session,
                  doc: interedit.app.RenderedDocument, parent_sha: SHA) -> str:
            failures.add('collector %s failed: %s', name, exc)

    resp = call_inspector(data, failures)

    # Now raise everything we were delaying
    failures.raise_if_needed()

    if resp is None:
        raise errors.InspectionError('stopping inspection, as inspector '
                                     'returned an error')

    LOG.info('inspection finished successfully')
    return resp.get('uuid')


@tenacity.retry(retry=tenacity.retry_if_exception_type(
    requests.exceptions.ConnectionError),
                stop=tenacity.stop_after_attempt(5),
                wait=tenacity.wait_fixed(5),
                reraise=True)
def _post_to_inspector(url, data, verify, cert):
    return requests.post(CONF.inspection_callback_url,
                         data=data,
                         verify=verify,
                         cert=cert)


def call_inspector(data, failures):
    """Post data to inspector."""
    data['error'] = failures.get_error()

    LOG.info('posting collected data to %s', CONF.inspection_callback_url)
Beispiel #26
0
class NetworkExplorer(object):
    def __init__(self, this, region_name=None):
        session = helpers.get_execution_session()
        self._project_id = session.project_id
        self._settings = CONF.networking
        self._available_cidrs = self._generate_possible_cidrs()
        self._region = this.find_owner('io.murano.CloudRegion')
        self._region_name = region_name

    @staticmethod
    @session_local_storage.execution_session_memoize
    def _get_client(region_name):
        return nclient.Client(**auth_utils.get_session_client_parameters(
            service_type='network', region=region_name, conf='neutron'))

    @property
    def _client(self):
        region = self._region_name or (None if self._region is None else
                                       self._region['name'])
        return self._get_client(region)

    # NOTE(starodubcevna): to avoid simultaneous router requests we use retry
    # decorator with random delay 1-10 seconds between attempts and maximum
    # delay time 30 seconds.
    @tenacity.retry(retry=tenacity.retry_if_exception_type(
        exc.RouterInfoException),
                    stop=tenacity.stop_after_delay(30),
                    wait=tenacity.wait_random(min=1, max=10),
                    reraise=True)
    def get_default_router(self):
        router_name = self._settings.router_name

        routers = self._client.list_routers(tenant_id=self._project_id,
                                            name=router_name).get('routers')
        if len(routers) == 0:
            LOG.debug('Router {name} not found'.format(name=router_name))
            if self._settings.create_router:
                LOG.debug('Attempting to create Router {router}'.format(
                    router=router_name))
                external_network = self._settings.external_network
                kwargs = {'id': external_network} \
                    if uuidutils.is_uuid_like(external_network) \
                    else {'name': external_network}
                networks = self._client.list_networks(**kwargs).get('networks')
                ext_nets = list(
                    filter(lambda n: n['router:external'], networks))
                if len(ext_nets) == 0:
                    raise KeyError('Router %s could not be created, '
                                   'no external network found' % router_name)
                nid = ext_nets[0]['id']

                body_data = {
                    'router': {
                        'name': router_name,
                        'external_gateway_info': {
                            'network_id': nid
                        },
                        'admin_state_up': True,
                    }
                }
                router = self._client.create_router(
                    body=body_data).get('router')
                LOG.info('Created router: {id}'.format(id=router['id']))
                return router['id']
            else:
                raise KeyError('Router %s was not found' % router_name)
        else:
            if routers[0]['external_gateway_info'] is None:
                raise exc.RouterInfoException('Please set external gateway for'
                                              ' the router %s ' % router_name)
            router_id = routers[0]['id']
        return router_id

    def get_available_cidr(self, router_id, net_id, ip_version=4):
        """Uses hash of network IDs to minimize the collisions

        Different nets will attempt to pick different cidrs out of available
        range.
        If the cidr is taken will pick another one.
        """
        taken_cidrs = self._get_cidrs_taken_by_router(router_id)
        id_hash = hash(net_id)
        num_fails = 0
        available_ipv6_cidrs = []
        if ip_version == 6:
            for cidr in self._available_cidrs:
                available_ipv6_cidrs.append(cidr.ipv6())
            self._available_cidrs = available_ipv6_cidrs
        while num_fails < len(self._available_cidrs):
            cidr = self._available_cidrs[(id_hash + num_fails) %
                                         len(self._available_cidrs)]
            if any(
                    self._cidrs_overlap(cidr, taken_cidr)
                    for taken_cidr in taken_cidrs):
                num_fails += 1
            else:
                return str(cidr)
        return None

    def get_default_dns(self, ip_version=4):
        dns_list = self._settings.default_dns
        valid_dns = []
        for ip in dns_list:
            if ip_version == 6 and netutils.is_valid_ipv6(ip):
                valid_dns.append(ip)
            elif ip_version == 4 and netutils.is_valid_ipv4(ip):
                valid_dns.append(ip)
            else:
                LOG.warning('{0} is not a vaild IPV{1} address, '
                            'ingore...'.format(ip, ip_version))
        return valid_dns

    def get_external_network_id_for_router(self, router_id):
        router = self._client.show_router(router_id).get('router')
        if not router or 'external_gateway_info' not in router:
            return None
        return router['external_gateway_info'].get('network_id')

    def get_external_network_id_for_network(self, network_id):
        network = self._client.show_network(network_id).get('network')
        if network.get('router:external', False):
            return network_id

        # Get router interfaces of the network
        router_ports = self._client.list_ports(**{
            'device_owner': 'network:router_interface',
            'network_id': network_id
        }).get('ports')

        # For each router this network is connected to
        # check if the router has external_gateway set
        for router_port in router_ports:
            ext_net_id = self.get_external_network_id_for_router(
                router_port.get('device_id'))
            if ext_net_id:
                return ext_net_id
        return None

    def _get_cidrs_taken_by_router(self, router_id):
        if not router_id:
            return []
        ports = self._client.list_ports(device_id=router_id)['ports']
        subnet_ids = []
        for port in ports:
            for fixed_ip in port['fixed_ips']:
                subnet_ids.append(fixed_ip['subnet_id'])

        all_subnets = self._client.list_subnets()['subnets']
        filtered_cidrs = [
            netaddr.IPNetwork(subnet['cidr']) for subnet in all_subnets
            if subnet['id'] in subnet_ids
        ]

        return filtered_cidrs

    @staticmethod
    def _cidrs_overlap(cidr1, cidr2):
        return (cidr1 in cidr2) or (cidr2 in cidr1)

    def _generate_possible_cidrs(self):
        bits_for_envs = int(
            math.ceil(math.log(self._settings.max_environments, 2)))
        bits_for_hosts = int(math.ceil(math.log(self._settings.max_hosts, 2)))
        width = ipv4.width
        mask_width = width - bits_for_hosts - bits_for_envs
        net = netaddr.IPNetwork('{0}/{1}'.format(
            self._settings.env_ip_template, mask_width))
        return list(net.subnet(width - bits_for_hosts))

    def list_networks(self):
        return self._client.list_networks()['networks']

    def list_subnetworks(self):
        return self._client.list_subnets()['subnets']

    def list_ports(self):
        return self._client.list_ports()['ports']

    def list_neutron_extensions(self):
        return self._client.list_extensions()['extensions']
Beispiel #27
0
    if retry_state.outcome.failed:
        verb, value = 'raised', retry_state.outcome.exception()
    else:
        verb, value = 'returned', retry_state.outcome.result()

    logger.debug(
        "Retrying %s in %sms (attempt: %s) as it %s %s[%s]." %
        (get_callback_name(retry_state.fn),
         int(getattr(retry_state.next_action, 'sleep') * 1000),
         retry_state.attempt_number, verb, value.__class__.__name__, value))


@retry(reraise=True,
       stop=stop_after_attempt(2),
       wait=wait_random(min=0.01, max=0.04),
       retry=retry_if_exception_type(PermissionError),
       before_sleep=my_before_sleep)
def get_bash_line_retry(*args):
    return get_bash_line(*args)


def get_tmux_bash_pid(session):
    # out = subprocess.check_output(['tmux', 'list-panes', '-s', '-t', session, '-F', "#{pane_active} #{pane_pid}"])
    out = subprocess.check_output([
        'tmux', 'list-window', '-t', session, '-F',
        "#{window_active} #{pane_pid}"
    ])
    # print('out', out)
    out2 = [l.split(' ') for l in out.decode().splitlines()]
    # print('out2', out2)
    for active, pid in out2:
Beispiel #28
0
class ControllerWorker(object):
    def __init__(self):

        self._amphora_repo = repo.AmphoraRepository()
        self._amphora_health_repo = repo.AmphoraHealthRepository()
        self._health_mon_repo = repo.HealthMonitorRepository()
        self._lb_repo = repo.LoadBalancerRepository()
        self._listener_repo = repo.ListenerRepository()
        self._member_repo = repo.MemberRepository()
        self._pool_repo = repo.PoolRepository()
        self._l7policy_repo = repo.L7PolicyRepository()
        self._l7rule_repo = repo.L7RuleRepository()
        self._flavor_repo = repo.FlavorRepository()
        self._az_repo = repo.AvailabilityZoneRepository()

        if CONF.task_flow.jobboard_enabled:
            persistence = tsk_driver.MysqlPersistenceDriver()

            self.jobboard_driver = stevedore_driver.DriverManager(
                namespace='octavia.worker.jobboard_driver',
                name=CONF.task_flow.jobboard_backend_driver,
                invoke_args=(persistence, ),
                invoke_on_load=True).driver
        else:
            self.tf_engine = base_taskflow.BaseTaskFlowEngine()

    @tenacity.retry(
        retry=(tenacity.retry_if_result(_is_provisioning_status_pending_update)
               | tenacity.retry_if_exception_type()),
        wait=tenacity.wait_incrementing(
            CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
            CONF.haproxy_amphora.api_db_commit_retry_backoff,
            CONF.haproxy_amphora.api_db_commit_retry_max),
        stop=tenacity.stop_after_attempt(
            CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def _get_db_obj_until_pending_update(self, repo, id):

        return repo.get(db_apis.get_session(), id=id)

    @property
    def services_controller(self):
        return base_taskflow.TaskFlowServiceController(self.jobboard_driver)

    def run_flow(self, func, *args, **kwargs):
        if CONF.task_flow.jobboard_enabled:
            self.services_controller.run_poster(func, *args, **kwargs)
        else:
            tf = self.tf_engine.taskflow_load(func(*args), **kwargs)
            with tf_logging.DynamicLoggingListener(tf, log=LOG):
                tf.run()

    def create_amphora(self, availability_zone=None):
        """Creates an Amphora.

        This is used to create spare amphora.

        :returns: uuid
        """
        try:
            store = {
                constants.BUILD_TYPE_PRIORITY:
                constants.LB_CREATE_SPARES_POOL_PRIORITY,
                constants.FLAVOR: None,
                constants.SERVER_GROUP_ID: None,
                constants.AVAILABILITY_ZONE: None
            }
            if availability_zone:
                store[constants.AVAILABILITY_ZONE] = (
                    self._az_repo.get_availability_zone_metadata_dict(
                        db_apis.get_session(), availability_zone))
            self.run_flow(flow_utils.get_create_amphora_flow,
                          store=store,
                          wait=True)
        except Exception as e:
            LOG.error('Failed to create an amphora due to: %s', str(e))

    def delete_amphora(self, amphora_id):
        """Deletes an existing Amphora.

        :param amphora_id: ID of the amphora to delete
        :returns: None
        :raises AmphoraNotFound: The referenced Amphora was not found
        """
        try:
            amphora = self._amphora_repo.get(db_apis.get_session(),
                                             id=amphora_id)
            store = {constants.AMPHORA: amphora.to_dict()}
            self.run_flow(flow_utils.get_delete_amphora_flow, store=store)
        except Exception as e:
            LOG.error('Failed to delete a amphora %s due to: %s', amphora_id,
                      str(e))
            return
        LOG.info('Finished deleting amphora %s.', amphora_id)

    @tenacity.retry(retry=tenacity.retry_if_exception_type(
        db_exceptions.NoResultFound),
                    wait=tenacity.wait_incrementing(
                        CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
                        CONF.haproxy_amphora.api_db_commit_retry_backoff,
                        CONF.haproxy_amphora.api_db_commit_retry_max),
                    stop=tenacity.stop_after_attempt(
                        CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def create_health_monitor(self, health_monitor):
        """Creates a health monitor.

        :param health_monitor: Provider health monitor dict
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_health_monitor = self._health_mon_repo.get(
            db_apis.get_session(),
            id=health_monitor[constants.HEALTHMONITOR_ID])

        if not db_health_monitor:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'healthmonitor',
                health_monitor[constants.HEALTHMONITOR_ID])
            raise db_exceptions.NoResultFound

        pool = db_health_monitor.pool
        pool.health_monitor = db_health_monitor
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.HEALTH_MON: health_monitor,
            constants.POOL_ID: pool.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb
        }
        self.run_flow(flow_utils.get_create_health_monitor_flow, store=store)

    def delete_health_monitor(self, health_monitor):
        """Deletes a health monitor.

        :param health_monitor: Provider health monitor dict
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        db_health_monitor = self._health_mon_repo.get(
            db_apis.get_session(),
            id=health_monitor[constants.HEALTHMONITOR_ID])

        pool = db_health_monitor.pool
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.HEALTH_MON: health_monitor,
            constants.POOL_ID: pool.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.PROJECT_ID: load_balancer.project_id
        }
        self.run_flow(flow_utils.get_delete_health_monitor_flow, store=store)

    def update_health_monitor(self, original_health_monitor,
                              health_monitor_updates):
        """Updates a health monitor.

        :param original_health_monitor: Provider health monitor dict
        :param health_monitor_updates: Dict containing updated health monitor
        :returns: None
        :raises HMNotFound: The referenced health monitor was not found
        """
        try:
            db_health_monitor = self._get_db_obj_until_pending_update(
                self._health_mon_repo,
                original_health_monitor[constants.HEALTHMONITOR_ID])
        except tenacity.RetryError as e:
            LOG.warning(
                'Health monitor did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            db_health_monitor = e.last_attempt.result()

        pool = db_health_monitor.pool

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        store = {
            constants.HEALTH_MON: original_health_monitor,
            constants.POOL_ID: pool.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.UPDATE_DICT: health_monitor_updates
        }
        self.run_flow(flow_utils.get_update_health_monitor_flow, store=store)

    @tenacity.retry(retry=tenacity.retry_if_exception_type(
        db_exceptions.NoResultFound),
                    wait=tenacity.wait_incrementing(
                        CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
                        CONF.haproxy_amphora.api_db_commit_retry_backoff,
                        CONF.haproxy_amphora.api_db_commit_retry_max),
                    stop=tenacity.stop_after_attempt(
                        CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def create_listener(self, listener):
        """Creates a listener.

        :param listener: A listener provider dictionary.
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=listener[constants.LISTENER_ID])
        if not db_listener:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'listener', listener[constants.LISTENER_ID])
            raise db_exceptions.NoResultFound

        load_balancer = db_listener.load_balancer
        listeners = load_balancer.listeners
        dict_listeners = []
        for li in listeners:
            dict_listeners.append(
                provider_utils.db_listener_to_provider_listener(li).to_dict())
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        store = {
            constants.LISTENERS: dict_listeners,
            constants.LOADBALANCER: provider_lb,
            constants.LOADBALANCER_ID: load_balancer.id
        }

        self.run_flow(flow_utils.get_create_listener_flow, store=store)

    def delete_listener(self, listener):
        """Deletes a listener.

        :param listener: A listener provider dictionary to delete
        :returns: None
        :raises ListenerNotFound: The referenced listener was not found
        """
        store = {
            constants.LISTENER: listener,
            constants.LOADBALANCER_ID: listener[constants.LOADBALANCER_ID],
            constants.PROJECT_ID: listener[constants.PROJECT_ID]
        }
        self.run_flow(flow_utils.get_delete_listener_flow, store=store)

    def update_listener(self, listener, listener_updates):
        """Updates a listener.

        :param listener: A listener provider dictionary to update
        :param listener_updates: Dict containing updated listener attributes
        :returns: None
        :raises ListenerNotFound: The referenced listener was not found
        """
        db_lb = self._lb_repo.get(db_apis.get_session(),
                                  id=listener[constants.LOADBALANCER_ID])
        store = {
            constants.LISTENER: listener,
            constants.UPDATE_DICT: listener_updates,
            constants.LOADBALANCER_ID: db_lb.id,
            constants.LISTENERS: [listener]
        }
        self.run_flow(flow_utils.get_update_listener_flow, store=store)

    @tenacity.retry(retry=tenacity.retry_if_exception_type(
        db_exceptions.NoResultFound),
                    wait=tenacity.wait_incrementing(
                        CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
                        CONF.haproxy_amphora.api_db_commit_retry_backoff,
                        CONF.haproxy_amphora.api_db_commit_retry_max),
                    stop=tenacity.stop_after_attempt(
                        CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def create_load_balancer(self,
                             loadbalancer,
                             flavor=None,
                             availability_zone=None):
        """Creates a load balancer by allocating Amphorae.

        First tries to allocate an existing Amphora in READY state.
        If none are available it will attempt to build one specifically
        for this load balancer.

        :param loadbalancer: The dict of load balancer to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        lb = self._lb_repo.get(db_apis.get_session(),
                               id=loadbalancer[constants.LOADBALANCER_ID])
        if not lb:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'load_balancer',
                loadbalancer[constants.LOADBALANCER_ID])
            raise db_exceptions.NoResultFound

        store = {
            lib_consts.LOADBALANCER_ID:
            loadbalancer[lib_consts.LOADBALANCER_ID],
            constants.BUILD_TYPE_PRIORITY: constants.LB_CREATE_NORMAL_PRIORITY,
            lib_consts.FLAVOR: flavor,
            lib_consts.AVAILABILITY_ZONE: availability_zone
        }

        topology = lb.topology
        if (not CONF.nova.enable_anti_affinity
                or topology == constants.TOPOLOGY_SINGLE):
            store[constants.SERVER_GROUP_ID] = None

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                lb.listeners))

        store[constants.UPDATE_DICT] = {constants.TOPOLOGY: topology}
        self.run_flow(flow_utils.get_create_load_balancer_flow,
                      topology,
                      listeners=listeners_dicts,
                      store=store)

    def delete_load_balancer(self, load_balancer, cascade=False):
        """Deletes a load balancer by de-allocating Amphorae.

        :param load_balancer: Dict of the load balancer to delete
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """
        loadbalancer_id = load_balancer[constants.LOADBALANCER_ID]
        db_lb = self._lb_repo.get(db_apis.get_session(), id=loadbalancer_id)
        store = {
            constants.LOADBALANCER: load_balancer,
            constants.LOADBALANCER_ID: loadbalancer_id,
            constants.SERVER_GROUP_ID: db_lb.server_group_id,
            constants.PROJECT_ID: db_lb.project_id
        }
        if cascade:
            listeners = flow_utils.get_listeners_on_lb(db_lb)
            pools = flow_utils.get_pools_on_lb(db_lb)

            self.run_flow(flow_utils.get_cascade_delete_load_balancer_flow,
                          load_balancer,
                          listeners,
                          pools,
                          store=store)
        else:
            self.run_flow(flow_utils.get_delete_load_balancer_flow,
                          load_balancer,
                          store=store)

    def update_load_balancer(self, original_load_balancer,
                             load_balancer_updates):
        """Updates a load balancer.

        :param original_load_balancer: Dict of the load balancer to update
        :param load_balancer_updates: Dict containing updated load balancer
        :returns: None
        :raises LBNotFound: The referenced load balancer was not found
        """
        store = {
            constants.LOADBALANCER:
            original_load_balancer,
            constants.LOADBALANCER_ID:
            original_load_balancer[constants.LOADBALANCER_ID],
            constants.UPDATE_DICT:
            load_balancer_updates
        }

        self.run_flow(flow_utils.get_update_load_balancer_flow, store=store)

    @tenacity.retry(retry=tenacity.retry_if_exception_type(
        db_exceptions.NoResultFound),
                    wait=tenacity.wait_incrementing(
                        CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
                        CONF.haproxy_amphora.api_db_commit_retry_backoff,
                        CONF.haproxy_amphora.api_db_commit_retry_max),
                    stop=tenacity.stop_after_attempt(
                        CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def create_member(self, member):
        """Creates a pool member.

        :param member: A member provider dictionary to create
        :returns: None
        :raises NoSuitablePool: Unable to find the node pool
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.MEMBER: member,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.POOL_ID: pool.id
        }
        if load_balancer.availability_zone:
            store[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), load_balancer.availability_zone))
        else:
            store[constants.AVAILABILITY_ZONE] = {}

        self.run_flow(flow_utils.get_create_member_flow, store=store)

    def delete_member(self, member):
        """Deletes a pool member.

        :param member: A member provider dictionary to delete
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])

        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))

        store = {
            constants.MEMBER: member,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.POOL_ID: pool.id,
            constants.PROJECT_ID: load_balancer.project_id
        }
        if load_balancer.availability_zone:
            store[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), load_balancer.availability_zone))
        else:
            store[constants.AVAILABILITY_ZONE] = {}

        self.run_flow(flow_utils.get_delete_member_flow, store=store)

    def batch_update_members(self, old_members, new_members, updated_members):
        updated_members = [(provider_utils.db_member_to_provider_member(
            self._member_repo.get(db_apis.get_session(),
                                  id=m.get(constants.ID))).to_dict(), m)
                           for m in updated_members]
        provider_old_members = [
            provider_utils.db_member_to_provider_member(
                self._member_repo.get(db_apis.get_session(),
                                      id=m.get(constants.ID))).to_dict()
            for m in old_members
        ]
        if old_members:
            pool = self._pool_repo.get(db_apis.get_session(),
                                       id=old_members[0][constants.POOL_ID])
        elif new_members:
            pool = self._pool_repo.get(db_apis.get_session(),
                                       id=new_members[0][constants.POOL_ID])
        else:
            pool = self._pool_repo.get(
                db_apis.get_session(),
                id=updated_members[0][0][constants.POOL_ID])
        load_balancer = pool.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        store = {
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.POOL_ID: pool.id,
            constants.PROJECT_ID: load_balancer.project_id
        }
        if load_balancer.availability_zone:
            store[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), load_balancer.availability_zone))
        else:
            store[constants.AVAILABILITY_ZONE] = {}

        self.run_flow(flow_utils.get_batch_update_members_flow,
                      provider_old_members,
                      new_members,
                      updated_members,
                      store=store)

    def update_member(self, member, member_updates):
        """Updates a pool member.

        :param member_id: A member provider dictionary  to update
        :param member_updates: Dict containing updated member attributes
        :returns: None
        :raises MemberNotFound: The referenced member was not found
        """
        # TODO(ataraday) when other flows will use dicts - revisit this
        pool = self._pool_repo.get(db_apis.get_session(),
                                   id=member[constants.POOL_ID])
        load_balancer = pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                pool.listeners))
        store = {
            constants.MEMBER: member,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb,
            constants.POOL_ID: pool.id,
            constants.UPDATE_DICT: member_updates
        }
        if load_balancer.availability_zone:
            store[constants.AVAILABILITY_ZONE] = (
                self._az_repo.get_availability_zone_metadata_dict(
                    db_apis.get_session(), load_balancer.availability_zone))
        else:
            store[constants.AVAILABILITY_ZONE] = {}

        self.run_flow(flow_utils.get_update_member_flow, store=store)

    @tenacity.retry(retry=tenacity.retry_if_exception_type(
        db_exceptions.NoResultFound),
                    wait=tenacity.wait_incrementing(
                        CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
                        CONF.haproxy_amphora.api_db_commit_retry_backoff,
                        CONF.haproxy_amphora.api_db_commit_retry_max),
                    stop=tenacity.stop_after_attempt(
                        CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def create_pool(self, pool):
        """Creates a node pool.

        :param pool: Provider pool dict to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """

        # TODO(ataraday) It seems we need to get db pool here anyway to get
        # proper listeners
        db_pool = self._pool_repo.get(db_apis.get_session(),
                                      id=pool[constants.POOL_ID])
        if not db_pool:
            LOG.warning(
                'Failed to fetch %s %s from DB. Retrying for up to '
                '60 seconds.', 'pool', pool[constants.POOL_ID])
            raise db_exceptions.NoResultFound

        load_balancer = db_pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))

        store = {
            constants.POOL_ID: pool[constants.POOL_ID],
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.LOADBALANCER: provider_lb
        }
        self.run_flow(flow_utils.get_create_pool_flow, store=store)

    def delete_pool(self, pool):
        """Deletes a node pool.

        :param pool: Provider pool dict to delete
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        db_pool = self._pool_repo.get(db_apis.get_session(),
                                      id=pool[constants.POOL_ID])

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))
        load_balancer = db_pool.load_balancer

        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        store = {
            constants.POOL_ID: pool[constants.POOL_ID],
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER: provider_lb,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.PROJECT_ID: db_pool.project_id
        }
        self.run_flow(flow_utils.get_delete_pool_flow, store=store)

    def update_pool(self, origin_pool, pool_updates):
        """Updates a node pool.

        :param origin_pool: Provider pool dict to update
        :param pool_updates: Dict containing updated pool attributes
        :returns: None
        :raises PoolNotFound: The referenced pool was not found
        """
        try:
            db_pool = self._get_db_obj_until_pending_update(
                self._pool_repo, origin_pool[constants.POOL_ID])
        except tenacity.RetryError as e:
            LOG.warning(
                'Pool did not go into %s in 60 seconds. '
                'This either due to an in-progress Octavia upgrade '
                'or an overloaded and failing database. Assuming '
                'an upgrade is in progress and continuing.',
                constants.PENDING_UPDATE)
            db_pool = e.last_attempt.result()

        load_balancer = db_pool.load_balancer
        provider_lb = provider_utils.db_loadbalancer_to_provider_loadbalancer(
            load_balancer).to_dict()

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                db_pool.listeners))

        store = {
            constants.POOL_ID: db_pool.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER: provider_lb,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.UPDATE_DICT: pool_updates
        }
        self.run_flow(flow_utils.get_update_pool_flow, store=store)

    @tenacity.retry(retry=tenacity.retry_if_exception_type(
        db_exceptions.NoResultFound),
                    wait=tenacity.wait_incrementing(
                        CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
                        CONF.haproxy_amphora.api_db_commit_retry_backoff,
                        CONF.haproxy_amphora.api_db_commit_retry_max),
                    stop=tenacity.stop_after_attempt(
                        CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def create_l7policy(self, l7policy):
        """Creates an L7 Policy.

        :param l7policy: Provider dict of the l7policy to create
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=l7policy[constants.LISTENER_ID])

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_listener]))

        store = {
            constants.L7POLICY: l7policy,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: db_listener.load_balancer.id
        }
        self.run_flow(flow_utils.get_create_l7policy_flow, store=store)

    def delete_l7policy(self, l7policy):
        """Deletes an L7 policy.

        :param l7policy: Provider dict of the l7policy to delete
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=l7policy[constants.LISTENER_ID])
        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_listener]))

        store = {
            constants.L7POLICY: l7policy,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: db_listener.load_balancer.id
        }
        self.run_flow(flow_utils.get_delete_l7policy_flow, store=store)

    def update_l7policy(self, original_l7policy, l7policy_updates):
        """Updates an L7 policy.

        :param l7policy: Provider dict of the l7policy to update
        :param l7policy_updates: Dict containing updated l7policy attributes
        :returns: None
        :raises L7PolicyNotFound: The referenced l7policy was not found
        """
        db_listener = self._listener_repo.get(
            db_apis.get_session(), id=original_l7policy[constants.LISTENER_ID])

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_listener]))

        store = {
            constants.L7POLICY: original_l7policy,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: db_listener.load_balancer.id,
            constants.UPDATE_DICT: l7policy_updates
        }
        self.run_flow(flow_utils.get_update_l7policy_flow, store=store)

    @tenacity.retry(retry=tenacity.retry_if_exception_type(
        db_exceptions.NoResultFound),
                    wait=tenacity.wait_incrementing(
                        CONF.haproxy_amphora.api_db_commit_retry_initial_delay,
                        CONF.haproxy_amphora.api_db_commit_retry_backoff,
                        CONF.haproxy_amphora.api_db_commit_retry_max),
                    stop=tenacity.stop_after_attempt(
                        CONF.haproxy_amphora.api_db_commit_retry_attempts))
    def create_l7rule(self, l7rule):
        """Creates an L7 Rule.

        :param l7rule: Provider dict l7rule
        :returns: None
        :raises NoResultFound: Unable to find the object
        """
        db_l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                              id=l7rule[constants.L7POLICY_ID])

        load_balancer = db_l7policy.listener.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_l7policy.listener]))
        l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy(
            db_l7policy)

        store = {
            constants.L7RULE: l7rule,
            constants.L7POLICY: l7policy_dict.to_dict(),
            constants.L7POLICY_ID: db_l7policy.id,
            constants.LISTENERS: listeners_dicts,
            constants.LOADBALANCER_ID: load_balancer.id
        }
        self.run_flow(flow_utils.get_create_l7rule_flow, store=store)

    def delete_l7rule(self, l7rule):
        """Deletes an L7 rule.

        :param l7rule: Provider dict of the l7rule to delete
        :returns: None
        :raises L7RuleNotFound: The referenced l7rule was not found
        """
        db_l7policy = self._l7policy_repo.get(db_apis.get_session(),
                                              id=l7rule[constants.L7POLICY_ID])
        l7policy = provider_utils.db_l7policy_to_provider_l7policy(db_l7policy)
        load_balancer = db_l7policy.listener.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_l7policy.listener]))

        store = {
            constants.L7RULE: l7rule,
            constants.L7POLICY: l7policy.to_dict(),
            constants.LISTENERS: listeners_dicts,
            constants.L7POLICY_ID: db_l7policy.id,
            constants.LOADBALANCER_ID: load_balancer.id
        }
        self.run_flow(flow_utils.get_delete_l7rule_flow, store=store)

    def update_l7rule(self, original_l7rule, l7rule_updates):
        """Updates an L7 rule.

        :param l7rule: Origin dict of the l7rule to update
        :param l7rule_updates: Dict containing updated l7rule attributes
        :returns: None
        :raises L7RuleNotFound: The referenced l7rule was not found
        """
        db_l7policy = self._l7policy_repo.get(
            db_apis.get_session(), id=original_l7rule[constants.L7POLICY_ID])
        load_balancer = db_l7policy.listener.load_balancer

        listeners_dicts = (
            provider_utils.db_listeners_to_provider_dicts_list_of_dicts(
                [db_l7policy.listener]))
        l7policy_dict = provider_utils.db_l7policy_to_provider_l7policy(
            db_l7policy)

        store = {
            constants.L7RULE: original_l7rule,
            constants.L7POLICY: l7policy_dict.to_dict(),
            constants.LISTENERS: listeners_dicts,
            constants.L7POLICY_ID: db_l7policy.id,
            constants.LOADBALANCER_ID: load_balancer.id,
            constants.UPDATE_DICT: l7rule_updates
        }
        self.run_flow(flow_utils.get_update_l7rule_flow, store=store)

    def failover_amphora(self, amphora_id):
        """Perform failover operations for an amphora.

        Note: This expects the load balancer to already be in
        provisioning_status=PENDING_UPDATE state.

        :param amphora_id: ID for amphora to failover
        :returns: None
        :raises octavia.common.exceptions.NotFound: The referenced amphora was
                                                    not found
        """
        amphora = None
        try:
            amphora = self._amphora_repo.get(db_apis.get_session(),
                                             id=amphora_id)
            if amphora is None:
                LOG.error(
                    'Amphora failover for amphora %s failed because '
                    'there is no record of this amphora in the '
                    'database. Check that the [house_keeping] '
                    'amphora_expiry_age configuration setting is not '
                    'too short. Skipping failover.', amphora_id)
                raise exceptions.NotFound(resource=constants.AMPHORA,
                                          id=amphora_id)

            if amphora.status == constants.DELETED:
                LOG.warning(
                    'Amphora %s is marked DELETED in the database but '
                    'was submitted for failover. Deleting it from the '
                    'amphora health table to exclude it from health '
                    'checks and skipping the failover.', amphora.id)
                self._amphora_health_repo.delete(db_apis.get_session(),
                                                 amphora_id=amphora.id)
                return

            loadbalancer = None
            if amphora.load_balancer_id:
                loadbalancer = self._lb_repo.get(db_apis.get_session(),
                                                 id=amphora.load_balancer_id)
            lb_amp_count = None
            if loadbalancer:
                if loadbalancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
                    lb_amp_count = 2
                elif loadbalancer.topology == constants.TOPOLOGY_SINGLE:
                    lb_amp_count = 1

            az_metadata = {}
            flavor_dict = {}
            lb_id = None
            vip_dict = {}
            server_group_id = None
            if loadbalancer:
                lb_id = loadbalancer.id
                # Even if the LB doesn't have a flavor, create one and
                # pass through the topology.
                if loadbalancer.flavor_id:
                    flavor_dict = self._flavor_repo.get_flavor_metadata_dict(
                        db_apis.get_session(), loadbalancer.flavor_id)
                    flavor_dict[constants.LOADBALANCER_TOPOLOGY] = (
                        loadbalancer.topology)
                else:
                    flavor_dict = {
                        constants.LOADBALANCER_TOPOLOGY: loadbalancer.topology
                    }
                if loadbalancer.availability_zone:
                    az_metadata = (
                        self._az_repo.get_availability_zone_metadata_dict(
                            db_apis.get_session(),
                            loadbalancer.availability_zone))
                vip_dict = loadbalancer.vip.to_dict()
                server_group_id = loadbalancer.server_group_id
            provider_lb_dict = (
                provider_utils.db_loadbalancer_to_provider_loadbalancer
            )(loadbalancer).to_dict() if loadbalancer else loadbalancer

            stored_params = {
                constants.AVAILABILITY_ZONE: az_metadata,
                constants.BUILD_TYPE_PRIORITY:
                constants.LB_CREATE_FAILOVER_PRIORITY,
                constants.FLAVOR: flavor_dict,
                constants.LOADBALANCER: provider_lb_dict,
                constants.SERVER_GROUP_ID: server_group_id,
                constants.LOADBALANCER_ID: lb_id,
                constants.VIP: vip_dict,
                constants.AMPHORA_ID: amphora_id
            }

            self.run_flow(flow_utils.get_failover_amphora_flow,
                          amphora.to_dict(),
                          lb_amp_count,
                          store=stored_params,
                          wait=True)

            LOG.info(
                "Successfully completed the failover for an amphora: %s", {
                    "id": amphora_id,
                    "load_balancer_id": lb_id,
                    "lb_network_ip": amphora.lb_network_ip,
                    "compute_id": amphora.compute_id,
                    "role": amphora.role
                })

        except Exception as e:
            with excutils.save_and_reraise_exception(reraise=False):
                LOG.exception("Amphora %s failover exception: %s", amphora_id,
                              str(e))
                self._amphora_repo.update(db_apis.get_session(),
                                          amphora_id,
                                          status=constants.ERROR)
                if amphora and amphora.load_balancer_id:
                    self._lb_repo.update(db_apis.get_session(),
                                         amphora.load_balancer_id,
                                         provisioning_status=constants.ERROR)

    @staticmethod
    def _get_amphorae_for_failover(load_balancer):
        """Returns an ordered list of amphora to failover.

        :param load_balancer: The load balancer being failed over.
        :returns: An ordered list of amphora to failover,
                  first amp to failover is last in the list
        :raises octavia.common.exceptions.InvalidTopology: LB has an unknown
                                                           topology.
        """
        if load_balancer.topology == constants.TOPOLOGY_SINGLE:
            # In SINGLE topology, amp failover order does not matter
            return [
                a.to_dict() for a in load_balancer.amphorae
                if a.status != constants.DELETED
            ]

        if load_balancer.topology == constants.TOPOLOGY_ACTIVE_STANDBY:
            # In Active/Standby we should preference the standby amp
            # for failover first in case the Active is still able to pass
            # traffic.
            # Note: The active amp can switch at any time and in less than a
            #       second, so this is "best effort".
            amphora_driver = utils.get_amphora_driver()
            timeout_dict = {
                constants.CONN_MAX_RETRIES:
                CONF.haproxy_amphora.failover_connection_max_retries,
                constants.CONN_RETRY_INTERVAL:
                CONF.haproxy_amphora.failover_connection_retry_interval
            }
            amps = []
            selected_amp = None
            for amp in load_balancer.amphorae:
                if amp.status == constants.DELETED:
                    continue
                if selected_amp is None:
                    try:
                        if amphora_driver.get_interface_from_ip(
                                amp, load_balancer.vip.ip_address,
                                timeout_dict):
                            # This is a potential ACTIVE, add it to the list
                            amps.append(amp.to_dict())
                        else:
                            # This one doesn't have the VIP IP, so start
                            # failovers here.
                            selected_amp = amp
                            LOG.debug(
                                "Selected amphora %s as the initial "
                                "failover amphora.", amp.id)
                    except Exception:
                        # This amphora is broken, so start failovers here.
                        selected_amp = amp
                else:
                    # We have already found a STANDBY, so add the rest to the
                    # list without querying them.
                    amps.append(amp.to_dict())
            # Put the selected amphora at the end of the list so it is
            # first to failover.
            if selected_amp:
                amps.append(selected_amp.to_dict())
            return amps

        LOG.error(
            'Unknown load balancer topology found: %s, aborting '
            'failover.', load_balancer.topology)
        raise exceptions.InvalidTopology(topology=load_balancer.topology)

    def failover_loadbalancer(self, load_balancer_id):
        """Perform failover operations for a load balancer.

        Note: This expects the load balancer to already be in
        provisioning_status=PENDING_UPDATE state.

        :param load_balancer_id: ID for load balancer to failover
        :returns: None
        :raises octavia.commom.exceptions.NotFound: The load balancer was not
                                                    found.
        """
        try:
            lb = self._lb_repo.get(db_apis.get_session(), id=load_balancer_id)
            if lb is None:
                raise exceptions.NotFound(resource=constants.LOADBALANCER,
                                          id=load_balancer_id)

            # Get the ordered list of amphorae to failover for this LB.
            amps = self._get_amphorae_for_failover(lb)

            if lb.topology == constants.TOPOLOGY_SINGLE:
                if len(amps) != 1:
                    LOG.warning(
                        '%d amphorae found on load balancer %s where '
                        'one should exist. Repairing.', len(amps),
                        load_balancer_id)
            elif lb.topology == constants.TOPOLOGY_ACTIVE_STANDBY:

                if len(amps) != 2:
                    LOG.warning(
                        '%d amphorae found on load balancer %s where '
                        'two should exist. Repairing.', len(amps),
                        load_balancer_id)
            else:
                LOG.error(
                    'Unknown load balancer topology found: %s, aborting '
                    'failover!', lb.topology)
                raise exceptions.InvalidTopology(topology=lb.topology)

            # We must provide a topology in the flavor definition
            # here for the amphora to be created with the correct
            # configuration.
            if lb.flavor_id:
                flavor = self._flavor_repo.get_flavor_metadata_dict(
                    db_apis.get_session(), lb.flavor_id)
                flavor[constants.LOADBALANCER_TOPOLOGY] = lb.topology
            else:
                flavor = {constants.LOADBALANCER_TOPOLOGY: lb.topology}

            provider_lb_dict = (
                provider_utils.db_loadbalancer_to_provider_loadbalancer(
                    lb).to_dict() if lb else lb)

            provider_lb_dict[constants.FLAVOR] = flavor

            stored_params = {
                constants.LOADBALANCER: provider_lb_dict,
                constants.BUILD_TYPE_PRIORITY:
                constants.LB_CREATE_FAILOVER_PRIORITY,
                constants.SERVER_GROUP_ID: lb.server_group_id,
                constants.LOADBALANCER_ID: lb.id,
                constants.FLAVOR: flavor
            }

            if lb.availability_zone:
                stored_params[constants.AVAILABILITY_ZONE] = (
                    self._az_repo.get_availability_zone_metadata_dict(
                        db_apis.get_session(), lb.availability_zone))
            else:
                stored_params[constants.AVAILABILITY_ZONE] = {}

            self.run_flow(flow_utils.get_failover_LB_flow,
                          amps,
                          provider_lb_dict,
                          store=stored_params,
                          wait=True)

            LOG.info('Failover of load balancer %s completed successfully.',
                     lb.id)

        except Exception as e:
            with excutils.save_and_reraise_exception(reraise=False):
                LOG.exception("LB %(lbid)s failover exception: %(exc)s", {
                    'lbid': load_balancer_id,
                    'exc': str(e)
                })
                self._lb_repo.update(db_apis.get_session(),
                                     load_balancer_id,
                                     provisioning_status=constants.ERROR)

    def amphora_cert_rotation(self, amphora_id):
        """Perform cert rotation for an amphora.

        :param amphora_id: ID for amphora to rotate
        :returns: None
        :raises AmphoraNotFound: The referenced amphora was not found
        """

        amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
        LOG.info("Start amphora cert rotation, amphora's id is: %s",
                 amphora_id)

        store = {
            constants.AMPHORA: amp.to_dict(),
            constants.AMPHORA_ID: amphora_id
        }

        self.run_flow(flow_utils.cert_rotate_amphora_flow, store=store)
        LOG.info("Finished amphora cert rotation, amphora's id was: %s",
                 amphora_id)

    def update_amphora_agent_config(self, amphora_id):
        """Update the amphora agent configuration.

        Note: This will update the amphora agent configuration file and
              update the running configuration for mutatable configuration
              items.

        :param amphora_id: ID of the amphora to update.
        :returns: None
        """
        LOG.info(
            "Start amphora agent configuration update, amphora's id "
            "is: %s", amphora_id)
        amp = self._amphora_repo.get(db_apis.get_session(), id=amphora_id)
        lb = self._amphora_repo.get_lb_for_amphora(db_apis.get_session(),
                                                   amphora_id)
        flavor = {}
        if lb.flavor_id:
            flavor = self._flavor_repo.get_flavor_metadata_dict(
                db_apis.get_session(), lb.flavor_id)

        store = {constants.AMPHORA: amp.to_dict(), constants.FLAVOR: flavor}

        self.run_flow(flow_utils.update_amphora_config_flow, store=store)
        LOG.info(
            "Finished amphora agent configuration update, amphora's id "
            "was: %s", amphora_id)
Beispiel #29
0
        schema_file (str): Path to a file holding a JSONschema document
        classname (str, optional): Name of the instantiated class to return.
            Optional if only one class is defined by the schema document.

    Returns:
        class: A Python class named after and defined by the schema
    """
    schema_dict = json.load(open(schema_file, 'r'))
    actual_classname = classname_from_long_title(
        title_from_schema(schema_dict))
    return get_class_object_from_dict(schema_dict,
                                      classname=actual_classname,
                                      use_cache=use_cache)


@retry(retry=retry_if_exception_type(RefResolutionError),
       stop=(stop_after_delay(15) | stop_after_attempt(5)),
       wait=wait_random(min=1, max=3),
       reraise=True)
def get_class_object_from_uri(schema_uri, classname=None, use_cache=True):
    """Instantiate a Python class object from a JSONschema URI

    Args:
        schema_uri (str): URI for a JSONschema document
        classname (str, optional): Name of the instantiated class to return.
            Optional if only one class is defined by the schema document.

    Returns:
        class: A Python class named after and defined by the schema
    """
    resp = requests.get(schema_uri, allow_redirects=True, timeout=1)
Beispiel #30
0
    return decorator


# NOTE I don't like this but slack client is annoying (kglisson)
SLACK_GET_ENDPOINTS = [
    "conversations.history",
    "conversations.info",
    "users.conversations",
    "users.info",
    "users.lookupByEmail",
    "users.profile.get",
]


@retry(stop=stop_after_attempt(5), retry=retry_if_exception_type(TryAgain))
def make_call(client: Any, endpoint: str, **kwargs):
    """Make an slack client api call."""

    try:
        if endpoint in SLACK_GET_ENDPOINTS:
            response = client.api_call(endpoint, http_verb="GET", params=kwargs)
        else:
            response = client.api_call(endpoint, json=kwargs)
    except slack.errors.SlackApiError as e:
        log.error(f"SlackError. Response: {e.response} Endpoint: {endpoint} kwargs: {kwargs}")

        # NOTE we've seen some eventual consistency problems with channel creation
        if e.response["error"] == "channel_not_found":
            raise TryAgain
Beispiel #31
0
    def __init__(
        self,
        logger_name: str,
        max_meter_queue_size: int,
        current_dir: pathlib.Path,
        must_exit_after_24h: bool,
        messaging_thread_provider: typing.Callable[[queue.Queue],
                                                   MessagingThreadProtocol],
        meter_values_generator: typing.Callable[[], typing.Generator[int, None,
                                                                     None]],
        default_action_handler: typing.Callable[[], None] = None,
        tests_modules_names_provider: typing.Callable[
            [], typing.List[str]] = lambda: []):
        """

        Parameters
        ----------
        logger_name : str
            Name of the logger to be used
        max_meter_queue_size : int
            Size of the `queue.Queue` that will hold the Meter's generated values.
        current_dir : pathlib.Path
            Refers the parent directory
        must_exit_after_24h : bool
            If True, the app will abort its execution after 24h of being running
        messaging_thread_provider : callable
            Method to be executed to acquire an instance of a `MessagingThread`.
        meter_values_generator : generator
            Generates a value between [0, 9000] everytime it's iterated over.
        default_action_handler: callable, optional
            When provided, specifies the default action handler to be used when a specified 
            action is not recognized.
        tests_modules_names_provider : callable, optional
            Provides the list of tests modules to run.
        """

        # Maps Command Line Interface allowed actions to actions handlers.
        self._action_handlers: typing.Dict[str, typing.Callable] = {
            "help": self._usage_handler,
            "test": self._run_tests_handler,
            "start": self._run_meter_simulator_handler
        }

        self._log: logging.Logger = logging.getLogger(logger_name)
        self._log.debug(f"{self.__class__.__name__}.__init__()")

        self._max_meter_queue_size = max_meter_queue_size
        self._meter_values_queue: typing.Optional[queue.Queue] = None
        # Establish how to re-try to add a meter value to the queue when it's full.
        # Also specifies when it should stop from re-trying. Re-tries would be aborted, if was specified to abort execution
        # after 24h and they have already lapsed:
        self._retry_policy = tenacity.Retrying(
            wait=tenacity.wait_fixed(
                0.5
            ),  # Wait 0.5sec before try again to add the generated meter value to the queue
            retry=tenacity.retry_if_exception_type(queue.Full),
            stop=tenacity.stop_any(self._must_exit),
            after=lambda _, __, ___: self._log.warning(
                "Meter's values queue is Full!!!"))

        self._current_dir = current_dir

        self._must_exit_after_24h: bool = must_exit_after_24h
        self._start_datetime: datetime = datetime.datetime.now()
        self._stop_datetime: datetime = self._start_datetime + datetime.timedelta(
            hours=24)
        # self._stop_datetime: datetime = self._start_datetime + datetime.timedelta(minutes=2)

        self._messaging_thread_provider: typing.Callable[[queue.Queue], MessagingThreadProtocol] = \
            messaging_thread_provider

        self._meter_values_generator: typing.Callable[[], typing.Generator[int, None, None]] = \
            meter_values_generator
        self._default_action_handler: typing.Callable[[], None] = \
            default_action_handler or self._usage_handler

        self._tests_modules_names_provider: typing.Callable[[], typing.List[str]] = \
            tests_modules_names_provider

        self._messaging_thread: typing.Optional[MessagingThreadProtocol] = None

        self._sys_argv: typing.Optional[typing.List[str]] = None
Beispiel #32
0
class NeutronNetworkingBase(test_utils.OpenStackBaseTest):
    """Base for checking openstack instances have valid networking."""

    RESOURCE_PREFIX = 'zaza-neutrontests'

    @classmethod
    def setUpClass(cls):
        """Run class setup for running Neutron API Networking tests."""
        super(NeutronNetworkingBase, cls).setUpClass(
            application_name='neutron-api')
        cls.neutron_client = (
            openstack_utils.get_neutron_session_client(cls.keystone_session))

    @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60),
                    reraise=True, stop=tenacity.stop_after_attempt(8))
    def validate_instance_can_reach_other(self,
                                          instance_1,
                                          instance_2,
                                          verify,
                                          mtu=None):
        """
        Validate that an instance can reach a fixed and floating of another.

        :param instance_1: The instance to check networking from
        :type instance_1: nova_client.Server

        :param instance_2: The instance to check networking from
        :type instance_2: nova_client.Server

        :param verify: callback to verify result
        :type verify: callable

        :param mtu: Check that we can send non-fragmented packets of given size
        :type mtu: Optional[int]
        """
        floating_1 = floating_ips_from_instance(instance_1)[0]
        floating_2 = floating_ips_from_instance(instance_2)[0]
        address_2 = fixed_ips_from_instance(instance_2)[0]

        username = guest.boot_tests['bionic']['username']
        password = guest.boot_tests['bionic'].get('password')
        privkey = openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME)

        cmds = [
            'ping -c 1',
        ]
        if mtu:
            # the on-wire packet will be 28 bytes larger than the value
            # provided to ping(8) -s parameter
            packetsize = mtu - 28
            cmds.append(
                'ping -M do -s {} -c 1'.format(packetsize))

        for cmd in cmds:
            openstack_utils.ssh_command(
                username, floating_1, 'instance-1',
                '{} {}'.format(cmd, address_2),
                password=password, privkey=privkey, verify=verify)

            openstack_utils.ssh_command(
                username, floating_1, 'instance-1',
                '{} {}'.format(cmd, floating_2),
                password=password, privkey=privkey, verify=verify)

    @tenacity.retry(wait=tenacity.wait_exponential(multiplier=1, max=60),
                    reraise=True, stop=tenacity.stop_after_attempt(8))
    def validate_instance_can_reach_router(self, instance, verify, mtu=None):
        """
        Validate that an instance can reach it's primary gateway.

        We make the assumption that the router's IP is 192.168.0.1
        as that's the network that is setup in
        neutron.setup.basic_overcloud_network which is used in all
        Zaza Neutron validations.

        :param instance: The instance to check networking from
        :type instance: nova_client.Server

        :param verify: callback to verify result
        :type verify: callable

        :param mtu: Check that we can send non-fragmented packets of given size
        :type mtu: Optional[int]
        """
        address = floating_ips_from_instance(instance)[0]

        username = guest.boot_tests['bionic']['username']
        password = guest.boot_tests['bionic'].get('password')
        privkey = openstack_utils.get_private_key(nova_utils.KEYPAIR_NAME)

        cmds = [
            'ping -c 1',
        ]
        if mtu:
            # the on-wire packet will be 28 bytes larger than the value
            # provided to ping(8) -s parameter
            packetsize = mtu - 28
            cmds.append(
                'ping -M do -s {} -c 1'.format(packetsize))

        for cmd in cmds:
            openstack_utils.ssh_command(
                username, address, 'instance', '{} 192.168.0.1'.format(cmd),
                password=password, privkey=privkey, verify=verify)

    @tenacity.retry(wait=tenacity.wait_exponential(min=5, max=60),
                    reraise=True, stop=tenacity.stop_after_attempt(8),
                    retry=tenacity.retry_if_exception_type(AssertionError))
    def check_server_state(self, nova_client, state, server_id=None,
                           server_name=None):
        """Wait for server to reach desired state.

        :param nova_client: Nova client to use when checking status
        :type nova_client: nova client
        :param state: Target state for server
        :type state: str
        :param server_id: UUID of server to check
        :type server_id: str
        :param server_name: Name of server to check
        :type server_name: str
        :raises: AssertionError
        """
        if server_name:
            server_id = nova_client.servers.find(name=server_name).id
        server = nova_client.servers.find(id=server_id)
        assert server.status == state

    @tenacity.retry(wait=tenacity.wait_exponential(min=5, max=60),
                    reraise=True, stop=tenacity.stop_after_attempt(8),
                    retry=tenacity.retry_if_exception_type(AssertionError))
    def check_neutron_agent_up(self, neutron_client, host_name):
        """Wait for agents to come up.

        :param neutron_client: Neutron client to use when checking status
        :type neutron_client: neutron client
        :param host_name: The name of the host whose agents need checking
        :type host_name: str
        :raises: AssertionError
        """
        for agent in neutron_client.list_agents()['agents']:
            if agent['host'] == host_name:
                assert agent['admin_state_up']
                assert agent['alive']

    def effective_network_mtu(self, network_name):
        """Retrieve effective MTU for a network.

        If the `instance-mtu` configuration option is set to a value lower than
        the network MTU this method will return the value of that. Otherwise
        Neutron's value for MTU on a network will be returned.

        :param network_name: Name of network to query
        :type network_name: str
        :returns: MTU for network
        :rtype: int
        """
        cfg_instance_mtu = None
        for app in ('neutron-gateway', 'neutron-openvswitch'):
            try:
                cfg = zaza.model.get_application_config(app)
                cfg_instance_mtu = int(cfg['instance-mtu']['value'])
                break
            except KeyError:
                pass

        networks = self.neutron_client.show_network('', name=network_name)
        network_mtu = int(next(iter(networks['networks']))['mtu'])

        if cfg_instance_mtu and cfg_instance_mtu < network_mtu:
            logging.info('Using MTU from application "{}" config: {}'
                         .format(app, cfg_instance_mtu))
            return cfg_instance_mtu
        else:
            logging.info('Using MTU from network "{}": {}'
                         .format(network_name, network_mtu))
            return network_mtu

    def check_connectivity(self, instance_1, instance_2):
        """Run North/South and East/West connectivity tests."""
        def verify(stdin, stdout, stderr):
            """Validate that the SSH command exited 0."""
            self.assertEqual(stdout.channel.recv_exit_status(), 0)

        try:
            mtu_1 = self.effective_network_mtu(
                network_name_from_instance(instance_1))
            mtu_2 = self.effective_network_mtu(
                network_name_from_instance(instance_2))
            mtu_min = min(mtu_1, mtu_2)
        except neutronexceptions.NotFound:
            # Older versions of OpenStack cannot look up network by name, just
            # skip the check if that is the case.
            mtu_1 = mtu_2 = mtu_min = None

        # Verify network from 1 to 2
        self.validate_instance_can_reach_other(
            instance_1, instance_2, verify, mtu_min)

        # Verify network from 2 to 1
        self.validate_instance_can_reach_other(
            instance_2, instance_1, verify, mtu_min)

        # Validate tenant to external network routing
        self.validate_instance_can_reach_router(instance_1, verify, mtu_1)
        self.validate_instance_can_reach_router(instance_2, verify, mtu_2)
Beispiel #33
0
    def request(self,
                method,
                additional_headers=None,
                retry=True,
                timeout=None,
                auth=None,
                use_gzip_encoding=None,
                params=None,
                max_attempts=None,
                **kwargs):
        """
        Make an HTTP request by calling self._request with backoff retry.

        :param method: request method
        :type method: str
        :param additional_headers: additional headers to include in the request
        :type additional_headers: dict[str, str]
        :param retry: boolean indicating whether to retry if the request fails
        :type retry: boolean
        :param timeout: timeout in seconds, overrides default_timeout_secs
        :type timeout: float
        :param timeout: timeout in seconds
        :type timeout: float
        :param auth: auth scheme for the request
        :type auth: requests.auth.AuthBase
        :param use_gzip_encoding: boolean indicating whether to pass gzip
                                  encoding in the request headers or not
        :type use_gzip_encoding: boolean | None
        :param params: additional params to include in the request
        :type params: str | dict[str, T] | None
        :param max_attempts: maximum number of attempts to try for any request
        :type max_attempts: int
        :param kwargs: additional arguments to pass to requests.request
        :type kwargs: dict[str, T]
        :return: HTTP response
        :rtype: requests.Response
        """
        request = self._request

        if retry:
            if max_attempts is None:
                max_attempts = self.default_max_attempts

            # We retry only when it makes sense: either due to a network
            # partition (e.g. connection errors) or if the request failed
            # due to a server error such as 500s, timeouts, and so on.
            request = tenacity.retry(
                stop=tenacity.stop_after_attempt(max_attempts),
                wait=tenacity.wait_exponential(),
                retry=tenacity.retry_if_exception_type((
                    requests.exceptions.Timeout,
                    requests.exceptions.ConnectionError,
                    MesosServiceUnavailableException,
                    MesosInternalServerErrorException,
                )),
                reraise=True,
            )(request)

        try:
            return request(
                method=method,
                additional_headers=additional_headers,
                timeout=timeout,
                auth=auth,
                use_gzip_encoding=use_gzip_encoding,
                params=params,
                **kwargs
            )
        # If the request itself failed, an exception subclassed from
        # RequestException will be raised. Catch this and reraise as
        # MesosException since we want the caller to be able to catch
        # and handle this.
        except requests.exceptions.RequestException as err:
            raise MesosException('Request failed', err)
Beispiel #34
0
class Mega:
    def __init__(self, options=None):
        self.schema = 'https'
        self.domain = 'mega.co.nz'
        self.timeout = 160  # max secs to wait for resp from api requests
        self.sid = None
        self.sequence_num = random.randint(0, 0xFFFFFFFF)
        self.request_id = make_id(10)
        self._trash_folder_node_id = None

        if options is None:
            options = {}
        self.options = options

    def login(self, email=None, password=None):
        if email:
            self._login_user(email, password)
        else:
            self.login_anonymous()
        self._trash_folder_node_id = self.get_node_by_type(4)[0]
        logger.info('Login complete')
        return self

    def _login_user(self, email, password):
        logger.info('Logging in user...')
        email = email.lower()
        get_user_salt_resp = self._api_request({'a': 'us0', 'user': email})
        user_salt = None
        try:
            user_salt = base64_to_a32(get_user_salt_resp['s'])
        except KeyError:
            # v1 user account
            password_aes = prepare_key(str_to_a32(password))
            user_hash = stringhash(email, password_aes)
        else:
            # v2 user account
            pbkdf2_key = hashlib.pbkdf2_hmac(hash_name='sha512',
                                             password=password.encode(),
                                             salt=a32_to_str(user_salt),
                                             iterations=100000,
                                             dklen=32)
            password_aes = str_to_a32(pbkdf2_key[:16])
            user_hash = base64_url_encode(pbkdf2_key[-16:])
        resp = self._api_request({'a': 'us', 'user': email, 'uh': user_hash})
        if isinstance(resp, int):
            raise RequestError(resp)
        self._login_process(resp, password_aes)

    def login_anonymous(self):
        logger.info('Logging in anonymous temporary user...')
        master_key = [random.randint(0, 0xFFFFFFFF)] * 4
        password_key = [random.randint(0, 0xFFFFFFFF)] * 4
        session_self_challenge = [random.randint(0, 0xFFFFFFFF)] * 4

        user = self._api_request({
            'a':
            'up',
            'k':
            a32_to_base64(encrypt_key(master_key, password_key)),
            'ts':
            base64_url_encode(
                a32_to_str(session_self_challenge) +
                a32_to_str(encrypt_key(session_self_challenge, master_key)))
        })

        resp = self._api_request({'a': 'us', 'user': user})
        if isinstance(resp, int):
            raise RequestError(resp)
        self._login_process(resp, password_key)

    def _login_process(self, resp, password):
        encrypted_master_key = base64_to_a32(resp['k'])
        self.master_key = decrypt_key(encrypted_master_key, password)
        if 'tsid' in resp:
            tsid = base64_url_decode(resp['tsid'])
            key_encrypted = a32_to_str(
                encrypt_key(str_to_a32(tsid[:16]), self.master_key))
            if key_encrypted == tsid[-16:]:
                self.sid = resp['tsid']
        elif 'csid' in resp:
            encrypted_rsa_private_key = base64_to_a32(resp['privk'])
            rsa_private_key = decrypt_key(encrypted_rsa_private_key,
                                          self.master_key)

            private_key = a32_to_str(rsa_private_key)
            # The private_key contains 4 MPI integers concatenated together.
            rsa_private_key = [0, 0, 0, 0]
            for i in range(4):
                # An MPI integer has a 2-byte header which describes the number
                # of bits in the integer.
                bitlength = (private_key[0] * 256) + private_key[1]
                bytelength = math.ceil(bitlength / 8)
                # Add 2 bytes to accommodate the MPI header
                bytelength += 2
                rsa_private_key[i] = mpi_to_int(private_key[:bytelength])
                private_key = private_key[bytelength:]

            first_factor_p = rsa_private_key[0]
            second_factor_q = rsa_private_key[1]
            private_exponent_d = rsa_private_key[2]
            # In MEGA's webclient javascript, they assign [3] to a variable
            # called u, but I do not see how it corresponds to pycryptodome's
            # RSA.construct and it does not seem to be necessary.
            rsa_modulus_n = first_factor_p * second_factor_q
            phi = (first_factor_p - 1) * (second_factor_q - 1)
            public_exponent_e = modular_inverse(private_exponent_d, phi)

            rsa_components = (
                rsa_modulus_n,
                public_exponent_e,
                private_exponent_d,
                first_factor_p,
                second_factor_q,
            )
            rsa_decrypter = RSA.construct(rsa_components)

            encrypted_sid = mpi_to_int(base64_url_decode(resp['csid']))

            sid = '%x' % rsa_decrypter._decrypt(encrypted_sid)
            sid = binascii.unhexlify('0' + sid if len(sid) % 2 else sid)
            self.sid = base64_url_encode(sid[:43])

    @retry(retry=retry_if_exception_type(RuntimeError),
           wait=wait_exponential(multiplier=2, min=2, max=60))
    def _api_request(self, data):
        params = {'id': self.sequence_num}
        self.sequence_num += 1

        if self.sid:
            params.update({'sid': self.sid})

        # ensure input data is a list
        if not isinstance(data, list):
            data = [data]

        url = f'{self.schema}://g.api.{self.domain}/cs'
        req = requests.post(
            url,
            params=params,
            data=json.dumps(data),
            timeout=self.timeout,
        )
        json_resp = json.loads(req.text)
        if isinstance(json_resp, int):
            if json_resp == -3:
                msg = 'Request failed, retrying'
                logger.info(msg)
                raise RuntimeError(msg)
            raise RequestError(json_resp)
        return json_resp[0]

    def _parse_url(self, url):
        # parse file id and key from url
        if '!' in url:
            match = re.findall(r'/#!(.*)', url)
            path = match[0]
            return path
        else:
            raise RequestError('Url key missing')

    def _process_file(self, file, shared_keys):
        if file['t'] == 0 or file['t'] == 1:
            keys = dict(
                keypart.split(':', 1) for keypart in file['k'].split('/')
                if ':' in keypart)
            uid = file['u']
            key = None
            # my objects
            if uid in keys:
                key = decrypt_key(base64_to_a32(keys[uid]), self.master_key)
            # shared folders
            elif 'su' in file and 'sk' in file and ':' in file['k']:
                shared_key = decrypt_key(base64_to_a32(file['sk']),
                                         self.master_key)
                key = decrypt_key(base64_to_a32(keys[file['h']]), shared_key)
                if file['su'] not in shared_keys:
                    shared_keys[file['su']] = {}
                shared_keys[file['su']][file['h']] = shared_key
            # shared files
            elif file['u'] and file['u'] in shared_keys:
                for hkey in shared_keys[file['u']]:
                    shared_key = shared_keys[file['u']][hkey]
                    if hkey in keys:
                        key = keys[hkey]
                        key = decrypt_key(base64_to_a32(key), shared_key)
                        break
            if file['h'] and file['h'] in shared_keys.get('EXP', ()):
                shared_key = shared_keys['EXP'][file['h']]
                encrypted_key = str_to_a32(
                    base64_url_decode(file['k'].split(':')[-1]))
                key = decrypt_key(encrypted_key, shared_key)
                file['shared_folder_key'] = shared_key
            if key is not None:
                # file
                if file['t'] == 0:
                    k = (key[0] ^ key[4], key[1] ^ key[5], key[2] ^ key[6],
                         key[3] ^ key[7])
                    file['iv'] = key[4:6] + (0, 0)
                    file['meta_mac'] = key[6:8]
                # folder
                else:
                    k = key
                file['key'] = key
                file['k'] = k
                attributes = base64_url_decode(file['a'])
                attributes = decrypt_attr(attributes, k)
                file['a'] = attributes
            # other => wrong object
            elif file['k'] == '':
                file['a'] = False
        elif file['t'] == 2:
            self.root_id = file['h']
            file['a'] = {'n': 'Cloud Drive'}
        elif file['t'] == 3:
            self.inbox_id = file['h']
            file['a'] = {'n': 'Inbox'}
        elif file['t'] == 4:
            self.trashbin_id = file['h']
            file['a'] = {'n': 'Rubbish Bin'}
        return file

    def _init_shared_keys(self, files, shared_keys):
        """
        Init shared key not associated with a user.
        Seems to happen when a folder is shared,
        some files are exchanged and then the
        folder is un-shared.
        Keys are stored in files['s'] and files['ok']
        """
        ok_dict = {}
        for ok_item in files['ok']:
            shared_key = decrypt_key(base64_to_a32(ok_item['k']),
                                     self.master_key)
            ok_dict[ok_item['h']] = shared_key
        for s_item in files['s']:
            if s_item['u'] not in shared_keys:
                shared_keys[s_item['u']] = {}
            if s_item['h'] in ok_dict:
                shared_keys[s_item['u']][s_item['h']] = ok_dict[s_item['h']]
        self.shared_keys = shared_keys

    def find_path_descriptor(self, path, files=()):
        """
        Find descriptor of folder inside a path. i.e.: folder1/folder2/folder3
        Params:
            path, string like folder1/folder2/folder3
        Return:
            Descriptor (str) of folder3 if exists, None otherwise
        """
        paths = path.split('/')

        files = files or self.get_files()
        parent_desc = self.root_id
        found = False
        for foldername in paths:
            if foldername != '':
                for file in files.items():
                    if (file[1]['a'] and file[1]['t']
                            and file[1]['a']['n'] == foldername):
                        if parent_desc == file[1]['p']:
                            parent_desc = file[0]
                            found = True
                if found:
                    found = False
                else:
                    return None
        return parent_desc

    def find(self, filename=None, handle=None, exclude_deleted=False):
        """
        Return file object from given filename
        """
        files = self.get_files()
        if handle:
            return files[handle]
        path = Path(filename)
        filename = path.name
        parent_dir_name = path.parent.name
        for file in list(files.items()):
            parent_node_id = None
            if parent_dir_name:
                parent_node_id = self.find_path_descriptor(parent_dir_name,
                                                           files=files)
                if (filename and parent_node_id and file[1]['a']
                        and file[1]['a']['n'] == filename
                        and parent_node_id == file[1]['p']):
                    if (exclude_deleted
                            and self._trash_folder_node_id == file[1]['p']):
                        continue
                    return file
            if (filename and file[1]['a'] and file[1]['a']['n'] == filename):
                if (exclude_deleted
                        and self._trash_folder_node_id == file[1]['p']):
                    continue
                return file

    def get_files(self):
        logger.info('Getting all files...')
        files = self._api_request({'a': 'f', 'c': 1, 'r': 1})
        files_dict = {}
        shared_keys = {}
        self._init_shared_keys(files, shared_keys)
        for file in files['f']:
            processed_file = self._process_file(file, shared_keys)
            # ensure each file has a name before returning
            if processed_file['a']:
                files_dict[file['h']] = processed_file
        return files_dict

    def get_upload_link(self, file):
        """
        Get a files public link inc. decrypted key
        Requires upload() response as input
        """
        if 'f' in file:
            file = file['f'][0]
            public_handle = self._api_request({'a': 'l', 'n': file['h']})
            file_key = file['k'][file['k'].index(':') + 1:]
            decrypted_key = a32_to_base64(
                decrypt_key(base64_to_a32(file_key), self.master_key))
            return (f'{self.schema}://{self.domain}'
                    f'/#!{public_handle}!{decrypted_key}')
        else:
            raise ValueError('''Upload() response required as input,
                            use get_link() for regular file input''')

    def get_link(self, file):
        """
        Get a file public link from given file object
        """
        file = file[1]
        if 'h' in file and 'k' in file:
            public_handle = self._api_request({'a': 'l', 'n': file['h']})
            if public_handle == -11:
                raise RequestError("Can't get a public link from that file "
                                   "(is this a shared file?)")
            decrypted_key = a32_to_base64(file['key'])
            return (f'{self.schema}://{self.domain}'
                    f'/#!{public_handle}!{decrypted_key}')
        else:
            raise ValidationError('File id and key must be present')

    def _node_data(self, node):
        try:
            return node[1]
        except (IndexError, KeyError):
            return node

    def get_folder_link(self, file):
        try:
            file = file[1]
        except (IndexError, KeyError):
            pass
        if 'h' in file and 'k' in file:
            public_handle = self._api_request({'a': 'l', 'n': file['h']})
            if public_handle == -11:
                raise RequestError("Can't get a public link from that file "
                                   "(is this a shared file?)")
            decrypted_key = a32_to_base64(file['shared_folder_key'])
            return (f'{self.schema}://{self.domain}'
                    f'/#F!{public_handle}!{decrypted_key}')
        else:
            raise ValidationError('File id and key must be present')

    def get_user(self):
        user_data = self._api_request({'a': 'ug'})
        return user_data

    def get_node_by_type(self, type):
        """
        Get a node by it's numeric type id, e.g:
        0: file
        1: dir
        2: special: root cloud drive
        3: special: inbox
        4: special trash bin
        """
        nodes = self.get_files()
        for node in list(nodes.items()):
            if node[1]['t'] == type:
                return node

    def get_files_in_node(self, target):
        """
        Get all files in a given target, e.g. 4=trash
        """
        if type(target) == int:
            # convert special nodes (e.g. trash)
            node_id = self.get_node_by_type(target)
        else:
            node_id = [target]

        files = self._api_request({'a': 'f', 'c': 1})
        files_dict = {}
        shared_keys = {}
        self._init_shared_keys(files, shared_keys)
        for file in files['f']:
            processed_file = self._process_file(file, shared_keys)
            if processed_file['a'] and processed_file['p'] == node_id[0]:
                files_dict[file['h']] = processed_file
        return files_dict

    def get_id_from_public_handle(self, public_handle):
        # get node data
        node_data = self._api_request({'a': 'f', 'f': 1, 'p': public_handle})
        node_id = self.get_id_from_obj(node_data)
        return node_id

    def get_id_from_obj(self, node_data):
        """
        Get node id from a file object
        """
        node_id = None

        for i in node_data['f']:
            if i['h'] != '':
                node_id = i['h']
        return node_id

    def get_quota(self):
        """
        Get current remaining disk quota in MegaBytes
        """
        json_resp = self._api_request({
            'a': 'uq',
            'xfer': 1,
            'strg': 1,
            'v': 1
        })
        # convert bytes to megabyes
        return json_resp['mstrg'] / 1048576

    def get_storage_space(self, giga=False, mega=False, kilo=False):
        """
        Get the current storage space.
        Return a dict containing at least:
          'used' : the used space on the account
          'total' : the maximum space allowed with current plan
        All storage space are in bytes unless asked differently.
        """
        if sum(1 if x else 0 for x in (kilo, mega, giga)) > 1:
            raise ValueError("Only one unit prefix can be specified")
        unit_coef = 1
        if kilo:
            unit_coef = 1024
        if mega:
            unit_coef = 1048576
        if giga:
            unit_coef = 1073741824
        json_resp = self._api_request({'a': 'uq', 'xfer': 1, 'strg': 1})
        return {
            'used': json_resp['cstrg'] / unit_coef,
            'total': json_resp['mstrg'] / unit_coef,
        }

    def get_balance(self):
        """
        Get account monetary balance, Pro accounts only
        """
        user_data = self._api_request({"a": "uq", "pro": 1})
        if 'balance' in user_data:
            return user_data['balance']

    def delete(self, public_handle):
        """
        Delete a file by its public handle
        """
        return self.move(public_handle, 4)

    def delete_url(self, url):
        """
        Delete a file by its url
        """
        path = self._parse_url(url).split('!')
        public_handle = path[0]
        file_id = self.get_id_from_public_handle(public_handle)
        return self.move(file_id, 4)

    def destroy(self, file_id):
        """
        Destroy a file by its private id
        """
        return self._api_request({
            'a': 'd',
            'n': file_id,
            'i': self.request_id
        })

    def destroy_url(self, url):
        """
        Destroy a file by its url
        """
        path = self._parse_url(url).split('!')
        public_handle = path[0]
        file_id = self.get_id_from_public_handle(public_handle)
        return self.destroy(file_id)

    def empty_trash(self):
        # get list of files in rubbish out
        files = self.get_files_in_node(4)

        # make a list of json
        if files != {}:
            post_list = []
            for file in files:
                post_list.append({"a": "d", "n": file, "i": self.request_id})
            return self._api_request(post_list)

    def download(self, file, dest_path=None, dest_filename=None):
        """
        Download a file by it's file object
        """
        return self._download_file(file_handle=None,
                                   file_key=None,
                                   file=file[1],
                                   dest_path=dest_path,
                                   dest_filename=dest_filename,
                                   is_public=False)

    def _export_file(self, node):
        node_data = self._node_data(node)
        self._api_request([{
            'a': 'l',
            'n': node_data['h'],
            'i': self.request_id
        }])
        return self.get_link(node)

    def export(self, path=None, node_id=None):
        nodes = self.get_files()
        if node_id:
            node = nodes[node_id]
        else:
            node = self.find(path)

        node_data = self._node_data(node)
        is_file_node = node_data['t'] == 0
        if is_file_node:
            return self._export_file(node)
        if node:
            try:
                # If already exported
                return self.get_folder_link(node)
            except (RequestError, KeyError):
                pass

        master_key_cipher = AES.new(a32_to_str(self.master_key), AES.MODE_ECB)
        ha = base64_url_encode(
            master_key_cipher.encrypt(node_data['h'].encode("utf8") +
                                      node_data['h'].encode("utf8")))

        share_key = secrets.token_bytes(16)
        ok = base64_url_encode(master_key_cipher.encrypt(share_key))

        share_key_cipher = AES.new(share_key, AES.MODE_ECB)
        node_key = node_data['k']
        encrypted_node_key = base64_url_encode(
            share_key_cipher.encrypt(a32_to_str(node_key)))

        node_id = node_data['h']
        request_body = [{
            'a':
            's2',
            'n':
            node_id,
            's': [{
                'u': 'EXP',
                'r': 0
            }],
            'i':
            self.request_id,
            'ok':
            ok,
            'ha':
            ha,
            'cr': [[node_id], [node_id], [0, 0, encrypted_node_key]]
        }]
        self._api_request(request_body)
        nodes = self.get_files()
        return self.get_folder_link(nodes[node_id])

    def download_url(self, url, dest_path=None, dest_filename=None):
        """
        Download a file by it's public url
        """
        path = self._parse_url(url).split('!')
        file_id = path[0]
        file_key = path[1]
        return self._download_file(
            file_handle=file_id,
            file_key=file_key,
            dest_path=dest_path,
            dest_filename=dest_filename,
            is_public=True,
        )

    def _download_file(self,
                       file_handle,
                       file_key,
                       dest_path=None,
                       dest_filename=None,
                       is_public=False,
                       file=None):
        if file is None:
            if is_public:
                file_key = base64_to_a32(file_key)
                file_data = self._api_request({
                    'a': 'g',
                    'g': 1,
                    'p': file_handle
                })
            else:
                file_data = self._api_request({
                    'a': 'g',
                    'g': 1,
                    'n': file_handle
                })

            k = (file_key[0] ^ file_key[4], file_key[1] ^ file_key[5],
                 file_key[2] ^ file_key[6], file_key[3] ^ file_key[7])
            iv = file_key[4:6] + (0, 0)
            meta_mac = file_key[6:8]
        else:
            file_data = self._api_request({'a': 'g', 'g': 1, 'n': file['h']})
            k = file['k']
            iv = file['iv']
            meta_mac = file['meta_mac']

        # Seems to happens sometime... When this occurs, files are
        # inaccessible also in the official also in the official web app.
        # Strangely, files can come back later.
        if 'g' not in file_data:
            raise RequestError('File not accessible anymore')
        file_url = file_data['g']
        file_size = file_data['s']
        attribs = base64_url_decode(file_data['at'])
        attribs = decrypt_attr(attribs, k)

        if dest_filename is not None:
            file_name = dest_filename
        else:
            file_name = attribs['n']

        input_file = requests.get(file_url, stream=True).raw

        if dest_path is None:
            dest_path = ''
        else:
            dest_path += '/'

        with tempfile.NamedTemporaryFile(mode='w+b',
                                         prefix='megapy_',
                                         delete=False) as temp_output_file:
            k_str = a32_to_str(k)
            counter = Counter.new(128,
                                  initial_value=((iv[0] << 32) + iv[1]) << 64)
            aes = AES.new(k_str, AES.MODE_CTR, counter=counter)

            mac_str = '\0' * 16
            mac_encryptor = AES.new(k_str, AES.MODE_CBC,
                                    mac_str.encode("utf8"))
            iv_str = a32_to_str([iv[0], iv[1], iv[0], iv[1]])

            for chunk_start, chunk_size in get_chunks(file_size):
                chunk = input_file.read(chunk_size)
                chunk = aes.decrypt(chunk)
                temp_output_file.write(chunk)

                encryptor = AES.new(k_str, AES.MODE_CBC, iv_str)
                for i in range(0, len(chunk) - 16, 16):
                    block = chunk[i:i + 16]
                    encryptor.encrypt(block)

                # fix for files under 16 bytes failing
                if file_size > 16:
                    i += 16
                else:
                    i = 0

                block = chunk[i:i + 16]
                if len(block) % 16:
                    block += b'\0' * (16 - (len(block) % 16))
                mac_str = mac_encryptor.encrypt(encryptor.encrypt(block))

                file_info = os.stat(temp_output_file.name)
                logger.info('%s of %s downloaded', file_info.st_size,
                            file_size)
            file_mac = str_to_a32(mac_str)
            # check mac integrity
            if (file_mac[0] ^ file_mac[1],
                    file_mac[2] ^ file_mac[3]) != meta_mac:
                raise ValueError('Mismatched mac')
            output_path = Path(dest_path + file_name)
            shutil.move(temp_output_file.name, output_path)
            return output_path

    def upload(self, filename, dest=None, dest_filename=None):
        # determine storage node
        if dest is None:
            # if none set, upload to cloud drive node
            if not hasattr(self, 'root_id'):
                self.get_files()
            dest = self.root_id

        # request upload url, call 'u' method
        with open(filename, 'rb') as input_file:
            file_size = os.path.getsize(filename)
            ul_url = self._api_request({'a': 'u', 's': file_size})['p']

            # generate random aes key (128) for file
            ul_key = [random.randint(0, 0xFFFFFFFF) for _ in range(6)]
            k_str = a32_to_str(ul_key[:4])
            count = Counter.new(
                128, initial_value=((ul_key[4] << 32) + ul_key[5]) << 64)
            aes = AES.new(k_str, AES.MODE_CTR, counter=count)

            upload_progress = 0
            completion_file_handle = None

            mac_str = '\0' * 16
            mac_encryptor = AES.new(k_str, AES.MODE_CBC,
                                    mac_str.encode("utf8"))
            iv_str = a32_to_str([ul_key[4], ul_key[5], ul_key[4], ul_key[5]])
            if file_size > 0:
                for chunk_start, chunk_size in get_chunks(file_size):
                    chunk = input_file.read(chunk_size)
                    upload_progress += len(chunk)

                    encryptor = AES.new(k_str, AES.MODE_CBC, iv_str)
                    for i in range(0, len(chunk) - 16, 16):
                        block = chunk[i:i + 16]
                        encryptor.encrypt(block)

                    # fix for files under 16 bytes failing
                    if file_size > 16:
                        i += 16
                    else:
                        i = 0

                    block = chunk[i:i + 16]
                    if len(block) % 16:
                        block += makebyte('\0' * (16 - len(block) % 16))
                    mac_str = mac_encryptor.encrypt(encryptor.encrypt(block))

                    # encrypt file and upload
                    chunk = aes.encrypt(chunk)
                    output_file = requests.post(ul_url + "/" +
                                                str(chunk_start),
                                                data=chunk,
                                                timeout=self.timeout)
                    completion_file_handle = output_file.text
                    logger.info('%s of %s uploaded', upload_progress,
                                file_size)
            else:
                output_file = requests.post(ul_url + "/0",
                                            data='',
                                            timeout=self.timeout)
                completion_file_handle = output_file.text

            logger.info('Chunks uploaded')
            logger.info('Setting attributes to complete upload')
            logger.info('Computing attributes')
            file_mac = str_to_a32(mac_str)

            # determine meta mac
            meta_mac = (file_mac[0] ^ file_mac[1], file_mac[2] ^ file_mac[3])

            dest_filename = dest_filename or os.path.basename(filename)
            attribs = {'n': dest_filename}

            encrypt_attribs = base64_url_encode(
                encrypt_attr(attribs, ul_key[:4]))
            key = [
                ul_key[0] ^ ul_key[4], ul_key[1] ^ ul_key[5],
                ul_key[2] ^ meta_mac[0], ul_key[3] ^ meta_mac[1], ul_key[4],
                ul_key[5], meta_mac[0], meta_mac[1]
            ]
            encrypted_key = a32_to_base64(encrypt_key(key, self.master_key))
            logger.info('Sending request to update attributes')
            # update attributes
            data = self._api_request({
                'a':
                'p',
                't':
                dest,
                'i':
                self.request_id,
                'n': [{
                    'h': completion_file_handle,
                    't': 0,
                    'a': encrypt_attribs,
                    'k': encrypted_key
                }]
            })
            logger.info('Upload complete')
            return data

    def _mkdir(self, name, parent_node_id):
        # generate random aes key (128) for folder
        ul_key = [random.randint(0, 0xFFFFFFFF) for _ in range(6)]

        # encrypt attribs
        attribs = {'n': name}
        encrypt_attribs = base64_url_encode(encrypt_attr(attribs, ul_key[:4]))
        encrypted_key = a32_to_base64(encrypt_key(ul_key[:4], self.master_key))

        # update attributes
        data = self._api_request({
            'a':
            'p',
            't':
            parent_node_id,
            'n': [{
                'h': 'xxxxxxxx',
                't': 1,
                'a': encrypt_attribs,
                'k': encrypted_key
            }],
            'i':
            self.request_id
        })
        return data

    def _root_node_id(self):
        if not hasattr(self, 'root_id'):
            self.get_files()
        return self.root_id

    def create_folder(self, name, dest=None):
        dirs = tuple(dir_name for dir_name in str(name).split('/') if dir_name)
        folder_node_ids = {}
        for idx, directory_name in enumerate(dirs):
            existing_node_id = self.find_path_descriptor(directory_name)
            if existing_node_id:
                folder_node_ids[idx] = existing_node_id
                continue
            if idx == 0:
                if dest is None:
                    parent_node_id = self._root_node_id()
                else:
                    parent_node_id = dest
            else:
                parent_node_id = folder_node_ids[idx - 1]
            created_node = self._mkdir(name=directory_name,
                                       parent_node_id=parent_node_id)
            node_id = created_node['f'][0]['h']
            folder_node_ids[idx] = node_id
        return dict(zip(dirs, folder_node_ids.values()))

    def rename(self, file, new_name):
        file = file[1]
        # create new attribs
        attribs = {'n': new_name}
        # encrypt attribs
        encrypt_attribs = base64_url_encode(encrypt_attr(attribs, file['k']))
        encrypted_key = a32_to_base64(encrypt_key(file['key'],
                                                  self.master_key))
        # update attributes
        return self._api_request([{
            'a': 'a',
            'attr': encrypt_attribs,
            'key': encrypted_key,
            'n': file['h'],
            'i': self.request_id
        }])

    def move(self, file_id, target):
        """
        Move a file to another parent node
        params:
        a : command
        n : node we're moving
        t : id of target parent node, moving to
        i : request id

        targets
        2 : root
        3 : inbox
        4 : trash

        or...
        target's id
        or...
        target's structure returned by find()
        """

        # determine target_node_id
        if type(target) == int:
            target_node_id = str(self.get_node_by_type(target)[0])
        elif type(target) in (str, ):
            target_node_id = target
        else:
            file = target[1]
            target_node_id = file['h']
        return self._api_request({
            'a': 'm',
            'n': file_id,
            't': target_node_id,
            'i': self.request_id
        })

    def add_contact(self, email):
        """
        Add another user to your mega contact list
        """
        return self._edit_contact(email, True)

    def remove_contact(self, email):
        """
        Remove a user to your mega contact list
        """
        return self._edit_contact(email, False)

    def _edit_contact(self, email, add):
        """
        Editing contacts
        """
        if add is True:
            l = '1'  # add command
        elif add is False:
            l = '0'  # remove command
        else:
            raise ValidationError('add parameter must be of type bool')

        if not re.match(r"[^@]+@[^@]+\.[^@]+", email):
            ValidationError('add_contact requires a valid email address')
        else:
            return self._api_request({
                'a': 'ur',
                'u': email,
                'l': l,
                'i': self.request_id
            })

    def get_public_url_info(self, url):
        """
        Get size and name from a public url, dict returned
        """
        file_handle, file_key = self._parse_url(url).split('!')
        return self.get_public_file_info(file_handle, file_key)

    def import_public_url(self, url, dest_node=None, dest_name=None):
        """
        Import the public url into user account
        """
        file_handle, file_key = self._parse_url(url).split('!')
        return self.import_public_file(file_handle,
                                       file_key,
                                       dest_node=dest_node,
                                       dest_name=dest_name)

    def get_public_file_info(self, file_handle, file_key):
        """
        Get size and name of a public file
        """
        data = self._api_request({'a': 'g', 'p': file_handle, 'ssm': 1})
        if isinstance(data, int):
            raise RequestError(data)

        if 'at' not in data or 's' not in data:
            raise ValueError("Unexpected result", data)

        key = base64_to_a32(file_key)
        k = (key[0] ^ key[4], key[1] ^ key[5], key[2] ^ key[6],
             key[3] ^ key[7])

        size = data['s']
        unencrypted_attrs = decrypt_attr(base64_url_decode(data['at']), k)
        if not unencrypted_attrs:
            return None
        result = {'size': size, 'name': unencrypted_attrs['n']}
        return result

    def import_public_file(self,
                           file_handle,
                           file_key,
                           dest_node=None,
                           dest_name=None):
        """
        Import the public file into user account
        """
        # Providing dest_node spare an API call to retrieve it.
        if dest_node is None:
            # Get '/Cloud Drive' folder no dest node specified
            dest_node = self.get_node_by_type(2)[1]

        # Providing dest_name spares an API call to retrieve it.
        if dest_name is None:
            pl_info = self.get_public_file_info(file_handle, file_key)
            dest_name = pl_info['name']

        key = base64_to_a32(file_key)
        k = (key[0] ^ key[4], key[1] ^ key[5], key[2] ^ key[6],
             key[3] ^ key[7])

        encrypted_key = a32_to_base64(encrypt_key(key, self.master_key))
        encrypted_name = base64_url_encode(encrypt_attr({'n': dest_name}, k))
        return self._api_request({
            'a':
            'p',
            't':
            dest_node['h'],
            'n': [{
                'ph': file_handle,
                't': 0,
                'a': encrypted_name,
                'k': encrypted_key
            }]
        })
import os
import logging
import mysql.connector
import postgresql
import postgresql.driver.dbapi20 as pgdb
import time
from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_random_exponential

logging.basicConfig(level=logging.DEBUG)


@retry(retry=retry_if_exception_type(mysql.connector.errors.InterfaceError),
       stop=stop_after_attempt(10),
       wait=wait_random_exponential(multiplier=1, max=10))
def get_mysql_connection(host: str, user: str, password: str, database: str):
    logging.debug(
        f'Getting bigbluebutton-auto-approver MySQL connection on host={host} database={database} user={user} password={password}...'
    )
    db = mysql.connector.connect(host=host,
                                 user=user,
                                 passwd=password,
                                 db=database)
    logging.debug(f'Got bigbluebutton-auto-approver MySQL connection: {db}')
    return db


def get_postgres_connection(host: str, user: str, password: str,
                            database: str):
    logging.debug(
        f'Getting Greenlight PostgreSQL connection on host={host} database={database} user={user} password={password}...'
    )
Beispiel #36
0
def retry_on_stale_data_error(func):
    wrapper = tenacity.retry(
        stop=tenacity.stop_after_attempt(3),
        retry=tenacity.retry_if_exception_type(exc.StaleDataError),
        reraise=True)
    return wrapper(func)
Beispiel #37
0
class MasakariTest(test_utils.OpenStackBaseTest):
    """Encapsulate Masakari tests."""
    @classmethod
    def setUpClass(cls):
        """Run class setup for running tests."""
        super(MasakariTest, cls).setUpClass(application_name="masakari")
        cls.current_release = openstack_utils.get_os_release()
        cls.keystone_session = openstack_utils.get_overcloud_keystone_session()
        cls.model_name = zaza.model.get_juju_model()
        cls.nova_client = openstack_utils.get_nova_session_client(
            cls.keystone_session)

    @classmethod
    def tearDown(cls):
        """Bring hypervisors and services back up."""
        logging.info('Running teardown')
        for unit in zaza.model.get_units('nova-compute',
                                         model_name=cls.model_name):
            zaza.openstack.configure.masakari.simulate_compute_host_recovery(
                unit.entity_id, model_name=cls.model_name)
        openstack_utils.enable_all_nova_services(cls.nova_client)
        zaza.openstack.configure.masakari.enable_hosts()

    def ensure_guest(self, vm_name):
        """Return the existing guest or boot a new one.

        :param vm_name: Name of guest to lookup
        :type vm_name: str
        :returns: Guest matching name.
        :rtype: novaclient.v2.servers.Server
        """
        try:
            guest = self.nova_client.servers.find(name=vm_name)
            logging.info('Found existing guest')
        except novaclient.exceptions.NotFound:
            logging.info('Launching new guest')
            guest = zaza.openstack.configure.guest.launch_instance(
                'bionic',
                use_boot_volume=True,
                meta={'HA_Enabled': 'True'},
                vm_name=vm_name)
        return guest

    def get_guests_compute_info(self, vm_name):
        """Return the hostname & juju unit of compute host hosting vm.

        :param vm_name: Name of guest to lookup
        :type vm_name: str
        :returns: Hypervisor name and juju unit name
        :rtype: (str, str)
        """
        current_hypervisor = openstack_utils.get_hypervisor_for_guest(
            self.nova_client, vm_name)
        unit_name = juju_utils.get_unit_name_from_host_name(
            current_hypervisor, 'nova-compute')
        return current_hypervisor, unit_name

    def get_guest_qemu_pid(self, compute_unit_name, vm_uuid, model_name=None):
        """Return the qemu pid of process running guest.

        :param compute_unit_name: Juju unit name of hypervisor running guest
        :type compute_unit_name: str
        :param vm_uuid: Guests UUID
        :type vm_uuid: str
        :param model_name: Name of model running cloud.
        :type model_name: str
        :returns: PID of qemu process
        :rtype: int
        :raises: ValueError
        """
        pid_find_cmd = 'pgrep -u libvirt-qemu -f {}'.format(vm_uuid)
        out = zaza.model.run_on_unit(compute_unit_name,
                                     pid_find_cmd,
                                     model_name=self.model_name)
        return int(out['Stdout'].strip())

    @tenacity.retry(wait=tenacity.wait_exponential(multiplier=2, max=60),
                    reraise=True,
                    stop=tenacity.stop_after_attempt(5),
                    retry=tenacity.retry_if_exception_type(ValueError))
    def wait_for_guest_pid(self, compute_unit_name, vm_uuid, model_name=None):
        """Wait for the qemu process running guest to appear & return its pid.

        :param compute_unit_name: Juju unit name of hypervisor running guest
        :type compute_unit_name: str
        :param vm_uuid: Guests UUID
        :type vm_uuid: str
        :param model_name: Name of model running cloud.
        :type model_name: str
        :returns: PID of qemu process
        :rtype: int
        :raises: ValueError
        """
        return self.get_guest_qemu_pid(compute_unit_name,
                                       vm_uuid,
                                       model_name=self.model_name)

    @tenacity.retry(wait=tenacity.wait_exponential(multiplier=2, max=60),
                    reraise=True,
                    stop=tenacity.stop_after_attempt(5),
                    retry=tenacity.retry_if_exception_type(AssertionError))
    def wait_for_guest_ready(self, vm_name):
        """Wait for the guest to be ready.

        :param vm_name: Name of guest to check.
        :type vm_name: str
        """
        guest_ready_attr_checks = [('OS-EXT-STS:task_state', None),
                                   ('status', 'ACTIVE'),
                                   ('OS-EXT-STS:power_state', 1),
                                   ('OS-EXT-STS:vm_state', 'active')]
        guest = self.nova_client.servers.find(name=vm_name)
        logging.info('Checking guest {} attributes'.format(vm_name))
        for (attr, required_state) in guest_ready_attr_checks:
            logging.info('Checking {} is {}'.format(attr, required_state))
            assert getattr(guest, attr) == required_state

    def test_instance_failover(self):
        """Test masakari managed guest migration."""
        # Workaround for Bug #1874719
        zaza.openstack.configure.hacluster.remove_node('masakari', 'node1')
        # Launch guest
        self.assertTrue(
            zaza.openstack.configure.hacluster.check_all_nodes_online(
                'masakari'))
        vm_name = 'zaza-test-instance-failover'
        self.ensure_guest(vm_name)

        # Locate hypervisor hosting guest and shut it down
        current_hypervisor, unit_name = self.get_guests_compute_info(vm_name)
        zaza.openstack.configure.masakari.simulate_compute_host_failure(
            unit_name, model_name=self.model_name)

        # Wait for instance move
        logging.info('Waiting for guest to move away from {}'.format(
            current_hypervisor))
        # wait_for_server_migration will throw an exception if migration fails
        openstack_utils.wait_for_server_migration(self.nova_client, vm_name,
                                                  current_hypervisor)

        # Bring things back
        zaza.openstack.configure.masakari.simulate_compute_host_recovery(
            unit_name, model_name=self.model_name)
        openstack_utils.enable_all_nova_services(self.nova_client)
        zaza.openstack.configure.masakari.enable_hosts()
        self.wait_for_guest_ready(vm_name)

    def test_instance_restart_on_fail(self):
        """Test single guest crash and recovery."""
        if self.current_release < openstack_utils.get_os_release(
                'bionic_ussuri'):
            raise unittest.SkipTest("Not supported on {}. Bug #1866638".format(
                self.current_release))
        vm_name = 'zaza-test-instance-failover'
        vm = self.ensure_guest(vm_name)
        self.wait_for_guest_ready(vm_name)
        _, unit_name = self.get_guests_compute_info(vm_name)
        logging.info('{} is running on {}'.format(vm_name, unit_name))
        guest_pid = self.get_guest_qemu_pid(unit_name,
                                            vm.id,
                                            model_name=self.model_name)
        logging.info('{} pid is {}'.format(vm_name, guest_pid))
        inital_update_time = datetime.strptime(vm.updated,
                                               "%Y-%m-%dT%H:%M:%SZ")
        logging.info('Simulating vm crash of {}'.format(vm_name))
        zaza.openstack.configure.masakari.simulate_guest_crash(
            guest_pid, unit_name, model_name=self.model_name)
        logging.info(
            'Waiting for {} to be updated and become active'.format(vm_name))
        openstack_utils.wait_for_server_update_and_active(
            self.nova_client, vm_name, inital_update_time)
        new_guest_pid = self.wait_for_guest_pid(unit_name,
                                                vm.id,
                                                model_name=self.model_name)
        logging.info('{} pid is now {}'.format(vm_name, new_guest_pid))
        assert new_guest_pid and new_guest_pid != guest_pid, (
            "Restart failed or never happened")
Beispiel #38
0
def retry_upon_exception(exc, delay, max_delay, max_attempts):
    return tenacity.retry(reraise=True,
                          retry=tenacity.retry_if_exception_type(exc),
                          wait=tenacity.wait_exponential(multiplier=delay,
                                                         max=max_delay),
                          stop=tenacity.stop_after_attempt(max_attempts))
Beispiel #39
0
class StatusManager(object):
    def __init__(self, bigip):
        self.bigip = bigip
        self._octavia_driver_lib = driver_lib.DriverLibrary(
            status_socket=CONF.driver_agent.status_socket_path,
            stats_socket=CONF.driver_agent.stats_socket_path
        )

    def set_active(self, obj):
        """Set provisioning_state of octavia object and all ancestors to
        ACTIVE.

        :param obj: octavia object like loadbalancer, pools, etc.
        """
        self._set_obj_and_ancestors(obj)

    def set_deleted(self, obj):
        """Set provisioning_state of octavia object to DELETED and all
        ancestors to ACTIVE.

        :param obj: octavia object like loadbalancer, pools, etc.
        """
        self._set_obj_and_ancestors(obj, lib_consts.DELETED)

    def set_error(self, obj):
        """Set provisioning_state of octavia object to ERROR and all
        ancestors to ACTIVE.

        :param obj: octavia object like loadbalancer, pools, etc.
        """
        self._set_obj_and_ancestors(obj, lib_consts.ERROR)

    def update_status(self, loadbalancers):
        """Set provisioning_state of loadbalancers and all it's
        children to ACTIVE if PENDING_UPDATE or PENDING_CREATE, else
        DELETED for PENDING_DELETED.

        :param loadbalancers: octavia loadbalancers list
        """

        def _set_deleted_or_active(obj):
            """Sets octavia object to deleted if status was PENDING_DELETE

            :param obj: octavia object
            """
            if utils.pending_delete(obj):
                self.set_deleted(obj)
            else:
                self.set_active(obj)

        for loadbalancer in loadbalancers:
            _set_deleted_or_active(loadbalancer)

            for listener in loadbalancer.listeners:
                _set_deleted_or_active(listener)

                for l7policy in listener.l7policies:
                    _set_deleted_or_active(l7policy)

                    for l7rule in l7policy.l7rules:
                        _set_deleted_or_active(l7rule)

            for pool in loadbalancer.pools:
                _set_deleted_or_active(pool)

                for member in pool.members:
                    _set_deleted_or_active(member)

                if pool.health_monitor:
                    _set_deleted_or_active(pool.health_monitor)

    def _set_obj_and_ancestors(self, obj, state=lib_consts.ACTIVE):
        """Set provisioning_state of octavia object to state and set all ancestors
        to ACTIVE.

        :param obj: octavia object like loadbalancer, pools, etc.
        """
        obj_status = self._status_obj(obj, state)

        # Load Balancer
        if isinstance(obj, data_models.LoadBalancer):
            self._update_status_to_octavia({
                lib_consts.LOADBALANCERS: [obj_status]
            })

        # Listener
        if isinstance(obj, data_models.Listener):
            self._update_status_to_octavia({
                lib_consts.LISTENERS: [obj_status],
                lib_consts.LOADBALANCERS: [self._status_obj(obj.load_balancer)]
            })

        # Pool
        if isinstance(obj, data_models.Pool):
            self._update_status_to_octavia({
                lib_consts.POOLS: [obj_status],
                lib_consts.LOADBALANCERS: [self._status_obj(obj.load_balancer)]
            })

        # Member
        if isinstance(obj, data_models.Member):
            self._update_status_to_octavia({
                lib_consts.MEMBERS: [obj_status],
                lib_consts.POOLS: [self._status_obj(obj.pool)],
                lib_consts.LOADBALANCERS: [self._status_obj(obj.pool.load_balancer)]
            })

        # Health Monitor
        if isinstance(obj, data_models.HealthMonitor):
            self._update_status_to_octavia({
                lib_consts.HEALTHMONITORS: [obj_status],
                lib_consts.POOLS: [self._status_obj(obj.pool)],
                lib_consts.LOADBALANCERS: [self._status_obj(obj.pool.load_balancer)]
            })

        # L7Policy
        if isinstance(obj, data_models.L7Policy):
            self._update_status_to_octavia({
                lib_consts.L7POLICIES: [obj_status],
                lib_consts.LISTENERS: [self._status_obj(obj.listener)],
                lib_consts.LOADBALANCERS: [self._status_obj(obj.listener.load_balancer)]
            })

        # L7Rule
        if isinstance(obj, data_models.L7Rule):
            self._update_status_to_octavia({
                lib_consts.L7RULES: [obj_status],
                lib_consts.L7POLICIES: [self._status_obj(obj.l7policy)],
                lib_consts.LISTENERS: [self._status_obj(obj.l7policy.listener)],
                lib_consts.LOADBALANCERS: [self._status_obj(
                    obj.l7policy.listener.load_balancer)]
            })

    @staticmethod
    def _status_obj(obj,
                    provisioning_status=lib_consts.ACTIVE):
        """Return status object for statup update api consumption

        :param obj: octavia object containing ID
        :param provisioning_status: provisioning status
        :return: status object
        """
        return {
            lib_consts.ID: obj.id,
            lib_consts.PROVISIONING_STATUS: provisioning_status
        }

    @tenacity.retry(
        retry=tenacity.retry_if_exception_type(),
        wait=tenacity.wait_incrementing(start=1, increment=10),
        stop=tenacity.stop_after_attempt(max_attempt_number=3))
    def _update_status_to_octavia(self, status):
        try:
            self._octavia_driver_lib.update_loadbalancer_status(status)
        except driver_exceptions.UpdateStatusError as e:
            msg = ("Error while updating status to octavia: "
                   "%s") % e.fault_string
            LOG.error(msg)
            raise driver_exceptions.UpdateStatusError(msg)
import random


import tenacity


def do_something():
    if random.randint(0, 1) == 0:
        print('Failure')
        raise IOError
    print('Success')
    return True


r = tenacity.Retrying(
    wait=tenacity.wait_fixed(1),
    retry=tenacity.retry_if_exception_type(IOError))
r.call(do_something)
Beispiel #41
0
import os
import sys
import uuid
import httpx
from tenacity import retry, retry_if_exception_type, stop_after_attempt, wait_fixed
import urllib3.exceptions


@retry(
    retry=(retry_if_exception_type(httpx.HTTPError)
           | retry_if_exception_type(urllib3.exceptions.HTTPError)),
    stop=stop_after_attempt(10),
    wait=wait_fixed(60),
)
def download_file(*, url, path, client=None):
    """
    Atomically download a file from ``url`` to ``path``.

    If ``path`` already exists, the file will not be downloaded again.
    This means that different URLs should be saved to different paths.

    This function is meant to be used in cases where the contents of ``url``
    is immutable -- calling it more than once should always return the same bytes.

    Returns the download path.

    """
    # If the URL has already been downloaded, we can skip downloading it again.
    if os.path.exists(path):
        print("path exists")
        return path
Beispiel #42
0
class ExchangeInterface():
    """Interface for performing queries against exchange API's
    """
    def __init__(self, exchange_config):
        """Initializes ExchangeInterface class

        Args:
            exchange_config (dict): A dictionary containing configuration for the exchanges.
        """

        self.logger = structlog.get_logger()
        self.exchanges = dict()

        # Loads the exchanges using ccxt.
        for exchange in exchange_config:
            if exchange_config[exchange]['required']['enabled']:
                new_exchange = getattr(ccxt, exchange)({
                    "enableRateLimit": True
                })

                # sets up api permissions for user if given
                if new_exchange:
                    self.exchanges[new_exchange.id] = new_exchange
                else:
                    self.logger.error("Unable to load exchange %s",
                                      new_exchange)

    @retry(retry=retry_if_exception_type(ccxt.NetworkError),
           stop=stop_after_attempt(3))
    def get_historical_data(self,
                            market_pair,
                            exchange,
                            time_unit,
                            start_date=None,
                            max_periods=100):
        """Get historical OHLCV for a symbol pair

        Decorators:
            retry

        Args:
            market_pair (str): Contains the symbol pair to operate on i.e. BURST/BTC
            exchange (str): Contains the exchange to fetch the historical data from.
            time_unit (str): A string specifying the ccxt time unit i.e. 5m or 1d.
            start_date (int, optional): Timestamp in milliseconds.
            max_periods (int, optional): Defaults to 100. Maximum number of time periods
              back to fetch data for.

        Returns:
            list: Contains a list of lists which contain timestamp, open, high, low, close, volume.
        """

        try:
            if time_unit not in self.exchanges[exchange].timeframes:
                raise ValueError(
                    "{} does not support {} timeframe for OHLCV data. Possible values are: {}"
                    .format(exchange, time_unit,
                            list(self.exchanges[exchange].timeframes)))
        except AttributeError:
            self.logger.error(
                '%s interface does not support timeframe queries! We are unable to fetch data!',
                exchange)
            raise AttributeError(sys.exc_info())

        if not start_date:
            timeframe_regex = re.compile('([0-9]+)([a-zA-Z])')
            timeframe_matches = timeframe_regex.match(time_unit)
            time_quantity = timeframe_matches.group(1)
            time_period = timeframe_matches.group(2)

            timedelta_values = {
                'm': 'minutes',
                'h': 'hours',
                'd': 'days',
                'w': 'weeks',
                'M': 'months',
                'y': 'years'
            }

            timedelta_args = {
                timedelta_values[time_period]: int(time_quantity)
            }

            start_date_delta = timedelta(**timedelta_args)

            max_days_date = datetime.now() - (max_periods * start_date_delta)
            start_date = int(
                max_days_date.replace(tzinfo=timezone.utc).timestamp() * 1000)

        historical_data = self.exchanges[exchange].fetch_ohlcv(
            market_pair, timeframe=time_unit, since=start_date)

        if not historical_data:
            raise ValueError(
                'No historical data provided returned by exchange.')

        # Sort by timestamp in ascending order
        historical_data.sort(key=lambda d: d[0])

        time.sleep(self.exchanges[exchange].rateLimit / 1000)

        return historical_data

    @retry(retry=retry_if_exception_type(ccxt.NetworkError),
           stop=stop_after_attempt(3))
    def get_exchange_markets(self, exchanges=[], markets=[]):
        """Get market data for all symbol pairs listed on all configured exchanges.

        Args:
            markets (list, optional): A list of markets to get from the exchanges. Default is all
                markets.
            exchanges (list, optional): A list of exchanges to collect market data from. Default is
                all enabled exchanges.

        Decorators:
            retry

        Returns:
            dict: A dictionary containing market data for all symbol pairs.
        """

        if not exchanges:
            exchanges = self.exchanges

        exchange_markets = dict()
        for exchange in exchanges:
            exchange_markets[exchange] = self.exchanges[exchange].load_markets(
            )

            if markets:
                curr_markets = exchange_markets[exchange]

                # Only retrieve markets the users specified
                exchange_markets[exchange] = {
                    key: curr_markets[key]
                    for key in curr_markets if key in markets
                }

                for market in markets:
                    if market not in exchange_markets[exchange]:
                        self.logger.info('%s has no market %s, ignoring.',
                                         exchange, market)

            time.sleep(self.exchanges[exchange].rateLimit / 1000)

        return exchange_markets

    def get_default_exchanges(self):
        return self.exchanges
Beispiel #43
0
class Uploads:
    """Class dedicated to all "uploads" related endpoints"""

    # Retry until the unpack agent is finished
    @retry(retry=retry_if_exception_type(TryAgain),
           stop=stop_after_attempt(10))
    def detail_upload(self,
                      upload_id: int,
                      group: str = None,
                      wait_time: int = 0) -> Upload:
        """Get detailled information about an upload

        API Endpoint: GET /uploads/{id}

        Get information about a given upload. If the upload is not ready wait another ``wait_time`` seconds or look at
        the ``Retry-After`` to determine how long the wait period shall be.

        If ``wait_time`` is 0, the time interval specified by the ``Retry-After`` header is used.

        The function stops trying after **10 attempts**.

        :Examples:

        >>> # Wait up to 20 minutes until the upload is ready
        >>> long_upload = detail_upload(1, 120)

        >>> # Wait up to 5 minutes until the upload is ready
        >>> long_upload = detail_upload(1, 30)

        :param upload_id: the id of the upload
        :param group: the group the upload shall belong to
        :param wait_time: use a customized upload wait time instead of Retry-After (in seconds, default: 0)
        :type upload_id: int
        :type group: string
        :type wait_time: int
        :return: the upload data
        :rtype: Upload
        :raises FossologyApiError: if the REST call failed
        :raises AuthorizationError: if the user can't access the group
        """
        headers = {}
        if group:
            headers["groupName"] = group
        response = self.session.get(f"{self.api}/uploads/{upload_id}",
                                    headers=headers)

        if response.status_code == 200:
            logger.debug(f"Got details for upload {upload_id}")
            return Upload.from_json(response.json())

        elif response.status_code == 403:
            description = f"Getting details for upload {upload_id} {get_options(group)}not authorized"
            raise AuthorizationError(description, response)

        elif response.status_code == 503:
            if not wait_time:
                wait_time = response.headers["Retry-After"]
            logger.debug(
                f"Retry GET upload {upload_id} after {wait_time} seconds: {response.json()['message']}"
            )
            time.sleep(int(wait_time))
            raise TryAgain

        else:
            description = f"Error while getting details for upload {upload_id}"
            raise FossologyApiError(description, response)

    def upload_file(  # noqa: C901
        self,
        folder,
        file=None,
        vcs=None,
        url=None,
        server=None,
        description=None,
        access_level=None,
        ignore_scm=False,
        group=None,
        wait_time=0,
    ):
        """Upload a package to FOSSology

        API Endpoint: POST /uploads

        Perform a file, VCS or URL upload and get information about the upload using :func:`~fossology.uploads.Uploads.detail_upload` and passing the ``wait_time`` argument.

        See description of :func:`~fossology.uploads.Uploads.detail_upload` to configure how long the client shall wait for the upload to be ready.

        :Example for a file upload:

        >>> from fossology import Fossology
        >>> from fossology.obj import AccessLevel
        >>> foss = Fossology(FOSS_URL, FOSS_TOKEN, username)
        >>> my_upload = foss.upload_file(
                foss.rootFolder,
                file="my-package.zip",
                description="My product package",
                access_level=AccessLevel.PUBLIC,
            )

        :Example for a VCS upload:

        >>> vcs = {
                "vcsType": "git",
                "vcsUrl": "https://github.com/fossology/fossology-python",
                "vcsName": "fossology-python-github-master",
                "vcsUsername": "",
                "vcsPassword": "",
            }
        >>> vcs_upload = foss.upload_file(
                foss.rootFolder,
                vcs=vcs,
                description="Upload from VCS",
                access_level=AccessLevel.PUBLIC,
            )

        :Example for a URL upload:

        >>> url = {
                "url": "https://github.com/fossology/fossology-python/archive/master.zip",
                "name": "fossology-python-master.zip",
                "accept": "zip",
                "reject": "",
                "maxRecursionDepth": "1",
            }
        >>> url_upload = foss.upload_file(
                foss.rootFolder,
                url=url,
                description="Upload from URL",
                access_level=AccessLevel.PUBLIC,
            )

        :Example for a SERVER upload:

        >>> server = {
                "path": "/tmp/fossology-python",
                "name": "fossology-python",
            }
        >>> server_upload = foss.upload_file(
                foss.rootFolder,
                server=server,
                description="Upload from SERVER",
                access_level=AccessLevel.PUBLIC,
            )


        :param folder: the upload Fossology folder
        :param file: the local path of the file to be uploaded
        :param vcs: the VCS specification to upload from an online repository
        :param url: the URL specification to upload from a url
        :param server: the SERVER specification to upload from fossology server
        :param description: description of the upload (default: None)
        :param access_level: access permissions of the upload (default: protected)
        :param ignore_scm: ignore SCM files (Git, SVN, TFS) (default: True)
        :param group: the group name to chose while uploading the file (default: None)
        :param wait_time: use a customized upload wait time instead of Retry-After (in seconds, default: 0)
        :type folder: Folder
        :type file: string
        :type vcs: dict()
        :type url: dict()
        :type server: dict()
        :type description: string
        :type access_level: AccessLevel
        :type ignore_scm: boolean
        :type group: string
        :type wait_time: int
        :return: the upload data
        :rtype: Upload
        :raises FossologyApiError: if the REST call failed
        :raises AuthorizationError: if the user can't access the group
        """
        headers = {"folderId": str(folder.id)}
        if description:
            headers["uploadDescription"] = description
        if access_level:
            headers["public"] = access_level.value
        if ignore_scm:
            headers["ignoreScm"] = "false"
        if group:
            headers["groupName"] = group

        if file:
            headers["uploadType"] = "server"
            with open(file, "rb") as fp:
                files = {"fileInput": fp}
                response = self.session.post(f"{self.api}/uploads",
                                             files=files,
                                             headers=headers)
        elif vcs or url or server:
            if vcs:
                headers["uploadType"] = "vcs"
                data = json.dumps(vcs)
            elif url:
                headers["uploadType"] = "url"
                data = json.dumps(url)
            elif server:
                headers["uploadType"] = "server"
                data = json.dumps(server)
            headers["Content-Type"] = "application/json"
            response = self.session.post(f"{self.api}/uploads",
                                         data=data,
                                         headers=headers)
        else:
            logger.info(
                "Neither VCS, or Url or filename option given, not uploading anything"
            )
            return

        if file:
            source = f"{file}"
        elif vcs:
            source = vcs.get("vcsName")
        elif url:
            source = url.get("name")
        elif server:
            source = server.get("name")

        if response.status_code == 201:
            try:
                upload = self.detail_upload(response.json()["message"],
                                            wait_time)
                if upload.filesize:
                    logger.info(
                        f"Upload {upload.uploadname} ({upload.filesize}) "
                        f"has been uploaded on {upload.uploaddate}")
                else:
                    logger.info(
                        f"Upload {upload.uploadname} ({upload.hash.size}) "
                        f"has been uploaded on {upload.uploaddate}")
                return upload
            except TryAgain:
                description = f"Upload of {source} failed"
                raise FossologyApiError(description, response)

        elif response.status_code == 403:
            description = (
                f"Upload of {source} {get_options(group, folder)}not authorized"
            )
            raise AuthorizationError(description, response)

        elif server and response.status_code == 500:
            description = (
                f"Upload {description} could not be performed; "
                f"did you add a prefix for '{server['path']}' in Fossology config "
                f"variable 'Admin->Customize->Whitelist for serverupload'? "
                f"Has fossy user read access to {server['path']}?")
            raise FossologyApiError(description, response)

        else:
            description = f"Upload {description} could not be performed"
            raise FossologyApiError(description, response)

    @retry(retry=retry_if_exception_type(TryAgain), stop=stop_after_attempt(3))
    def upload_summary(self, upload, group=None):
        """Get clearing information about an upload

        API Endpoint: GET /uploads/{id}/summary

        :param upload: the upload to gather data from
        :param group: the group name to chose while accessing an upload (default: None)
        :type: Upload
        :type group: string
        :return: the upload summary data
        :rtype: Summary
        :raises FossologyApiError: if the REST call failed
        :raises AuthorizationError: if the user can't access the group
        """
        headers = {}
        if group:
            headers["groupName"] = group
        response = self.session.get(f"{self.api}/uploads/{upload.id}/summary",
                                    headers=headers)

        if response.status_code == 200:
            return Summary.from_json(response.json())

        elif response.status_code == 403:
            description = f"Getting summary of upload {upload.id} {get_options(group)}not authorized"
            raise AuthorizationError(description, response)

        elif response.status_code == 503:
            logger.debug(
                f"Unpack agent for {upload.uploadname} (id={upload.id}) didn't start yet"
            )
            time.sleep(3)
            raise TryAgain
        else:
            description = f"No summary for upload {upload.uploadname} (id={upload.id})"
            raise FossologyApiError(description, response)

    @retry(retry=retry_if_exception_type(TryAgain), stop=stop_after_attempt(3))
    def upload_licenses(self,
                        upload,
                        group: str = None,
                        agent=None,
                        containers=False):
        """Get clearing information about an upload

        API Endpoint: GET /uploads/{id}/licenses

        The response does not generate Python objects yet, the plain JSON data is simply returned.

        :param upload: the upload to gather data from
        :param agent: the license agents to use (e.g. "nomos,monk,ninka,ojo,reportImport", default: "nomos")
        :param containers: wether to show containers or not (default: False)
        :param group: the group name to chose while accessing the upload (default: None)
        :type upload: Upload
        :type agent: string
        :type containers: boolean
        :type group: string
        :return: the list of licenses findings for the specified agent
        :rtype: list of Licenses
        :raises FossologyApiError: if the REST call failed
        :raises AuthorizationError: if the user can't access the group
        """
        headers = {}
        params = {}
        headers = {}
        if group:
            headers["groupName"] = group
        if agent:
            params["agent"] = agent
        else:
            params["agent"] = agent = "nomos"
        if containers:
            params["containers"] = "true"
        if group:
            headers["groupName"] = group

        response = self.session.get(f"{self.api}/uploads/{upload.id}/licenses",
                                    params=params,
                                    headers=headers)

        if response.status_code == 200:
            all_licenses = []
            scanned_files = response.json()
            for file_with_findings in scanned_files:
                file_licenses = Licenses.from_json(file_with_findings)
                all_licenses.append(file_licenses)
            return all_licenses

        elif response.status_code == 403:
            description = f"Getting license for upload {upload.id} {get_options(group)}not authorized"
            raise AuthorizationError(description, response)

        elif response.status_code == 412:
            description = f"Unable to get licenses from {agent} for {upload.uploadname} (id={upload.id})"
            raise FossologyApiError(description, response)

        elif response.status_code == 503:
            logger.debug(
                f"Unpack agent for {upload.uploadname} (id={upload.id}) didn't start yet"
            )
            time.sleep(3)
            raise TryAgain

        else:
            description = f"No licenses for upload {upload.uploadname} (id={upload.id})"
            raise FossologyApiError(description, response)

    def delete_upload(self, upload, group=None):
        """Delete an upload

        API Endpoint: DELETE /uploads/{id}

        :param upload: the upload to be deleted
        :param group: the group name to chose while deleting the upload (default: None)
        :type upload: Upload
        :type group: string
        :raises FossologyApiError: if the REST call failed
        :raises AuthorizationError: if the user can't access the group
        """
        headers = {}
        if group:
            headers["groupName"] = group
        response = self.session.delete(f"{self.api}/uploads/{upload.id}",
                                       headers=headers)

        if response.status_code == 202:
            logger.info(f"Upload {upload.id} has been scheduled for deletion")

        elif response.status_code == 403:
            description = (
                f"Deleting upload {upload.id} {get_options(group)}not authorized"
            )
            raise AuthorizationError(description, response)

        else:
            description = f"Unable to delete upload {upload.id}"
            raise FossologyApiError(description, response)

    def list_uploads(
        self,
        folder=None,
        group=None,
        recursive=True,
        page_size=100,
        page=1,
        all_pages=False,
    ):
        """Get all uploads available to the registered user

        API Endpoint: GET /uploads

        :param folder: only list uploads from the given folder
        :param group: list uploads from a specific group (not only your own uploads) (default: None)
        :param recursive: wether to list uploads from children folders or not (default: True)
        :param page_size: limit the number of uploads per page (default: 100)
        :param page: the number of the page to fetch uploads from (default: 1)
        :param all_pages: get all uploads (default: False)
        :type folder: Folder
        :type group: string
        :type recursive: boolean
        :type page_size: int
        :type page: int
        :type all_pages: boolean
        :return: a tuple containing the list of uploads and the total number of pages
        :rtype: Tuple(list of Upload, int)
        :raises FossologyApiError: if the REST call failed
        :raises AuthorizationError: if the user can't access the group
        """
        params = {}
        headers = {"limit": str(page_size)}
        if group:
            headers["groupName"] = group
        if folder:
            params["folderId"] = folder.id
        if not recursive:
            params["recursive"] = "false"

        uploads_list = list()
        if all_pages:
            # will be reset after the total number of pages has been retrieved from the API
            x_total_pages = 2
        else:
            x_total_pages = page
        while page <= x_total_pages:
            headers["page"] = str(page)
            response = self.session.get(f"{self.api}/uploads",
                                        headers=headers,
                                        params=params)
            if response.status_code == 200:
                for upload in response.json():
                    uploads_list.append(Upload.from_json(upload))
                x_total_pages = int(response.headers.get("X-TOTAL-PAGES", 0))
                if not all_pages or x_total_pages == 0:
                    logger.info(
                        f"Retrieved page {page} of uploads, {x_total_pages} pages are in total available"
                    )
                    return uploads_list, x_total_pages
                page += 1

            elif response.status_code == 403:
                description = f"Retrieving list of uploads {get_options(group, folder)}not authorized"
                raise AuthorizationError(description, response)

            else:
                description = f"Unable to retrieve the list of uploads from page {page}"
                raise FossologyApiError(description, response)
        logger.info(f"Retrieved all {x_total_pages} of uploads")
        return uploads_list, x_total_pages

    def move_upload(self, upload, folder, group=None):
        """Move an upload to another folder

        API Endpoint: PATCH /uploads/{id}

        :param upload: the Upload to be copied in another folder
        :param folder: the destination Folder
        :param group: the group name to chose while changing the upload (default: None)
        :type upload: Upload
        :type folder: Folder
        :type group: string
        :raises FossologyApiError: if the REST call failed
        :raises AuthorizationError: if the user can't access the group or folder
        """
        headers = {"folderId": str(folder.id)}
        if group:
            headers["groupName"] = group
        response = self.session.patch(f"{self.api}/uploads/{upload.id}",
                                      headers=headers)

        if response.status_code == 202:
            logger.info(
                f"Upload {upload.uploadname} has been moved to {folder.name}")

        elif response.status_code == 403:
            description = (
                f"Moving upload {upload.id} {get_options(group, folder)}not authorized"
            )
            raise AuthorizationError(description, response)

        else:
            description = f"Unable to move upload {upload.uploadname} to {folder.name}"
            raise FossologyApiError(description, response)

    def copy_upload(self, upload, folder):
        """Copy an upload in another folder

        API Endpoint: PUT /uploads/{id}

        :param upload: the Upload to be copied in another folder
        :param folder: the destination Folder
        :type upload: Upload
        :type folder: Folder
        :raises FossologyApiError: if the REST call failed
        """
        headers = {"folderId": str(folder.id)}
        response = self.session.put(f"{self.api}/uploads/{upload.id}",
                                    headers=headers)

        if response.status_code == 202:
            logger.info(
                f"Upload {upload.uploadname} has been copied to {folder.name}")

        elif response.status_code == 403:
            description = f"Copy upload {upload.id} {get_options(folder)}not authorized"
            raise AuthorizationError(description, response)

        else:
            description = f"Unable to copy upload {upload.uploadname} to {folder.name}"
            raise FossologyApiError(description, response)
Beispiel #44
0
    return int(round(time.time() * 1000))


@retry(wait=tenacity.wait_fixed(0.05),
       retry=tenacity.retry_if_result(lambda result: result is None))
def _retryable_test_with_wait(thing):
    return thing.go()


@retry(stop=tenacity.stop_after_attempt(3),
       retry=tenacity.retry_if_result(lambda result: result is None))
def _retryable_test_with_stop(thing):
    return thing.go()


@retry(retry=tenacity.retry_if_exception_type(IOError))
def _retryable_test_with_exception_type_io(thing):
    return thing.go()


@retry(
    stop=tenacity.stop_after_attempt(3),
    retry=tenacity.retry_if_exception_type(IOError))
def _retryable_test_with_exception_type_io_attempt_limit(thing):
    return thing.go()


@retry(retry=tenacity.retry_unless_exception_type(NameError))
def _retryable_test_with_unless_exception_type_name(thing):
    return thing.go()
import random


import tenacity


def do_something():
    if random.randint(0, 1) == 0:
        print('Failure')
        raise RuntimeError
    print('Success')


@tenacity.retry(wait=tenacity.wait_fixed(1), retry=tenacity.retry_if_exception_type(RuntimeError))
def do_something_and_retry():
    do_something()


do_something_and_retry()