Ejemplo n.º 1
0
    def receive_messages(self, queue_name, count, wait_seconds_timeout, visibility_timeout):
        """
        Attempt to retrieve visible messages from a queue.

        If a message was read by client and not deleted it is considered to be
        "inflight" and cannot be read. We make attempts to obtain ``count``
        messages but we may return less if messages are in-flight or there
        are simple not enough messages in the queue.

        :param string queue_name: The name of the queue to read from.
        :param int count: The maximum amount of messages to retrieve.
        :param int visibility_timeout: The number of seconds the message should remain invisible to other queue readers.
        """
        queue = self.get_queue(queue_name)
        result = []

        polling_end = unix_time() + wait_seconds_timeout

        # queue.messages only contains visible messages
        while True:
            for message in queue.messages:
                if not message.visible:
                    continue
                message.mark_received(
                    visibility_timeout=visibility_timeout
                )
                result.append(message)
                if len(result) >= count:
                    break

            if result or unix_time() > polling_end:
                break

        return result
Ejemplo n.º 2
0
    def receive_messages(self, queue_name, count, wait_seconds_timeout, visibility_timeout):
        """
        Attempt to retrieve visible messages from a queue.

        If a message was read by client and not deleted it is considered to be
        "inflight" and cannot be read. We make attempts to obtain ``count``
        messages but we may return less if messages are in-flight or there
        are simple not enough messages in the queue.

        :param string queue_name: The name of the queue to read from.
        :param int count: The maximum amount of messages to retrieve.
        :param int visibility_timeout: The number of seconds the message should remain invisible to other queue readers.
        :param int wait_seconds_timeout:  The duration (in seconds) for which the call waits for a message to arrive in
         the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds
        """
        queue = self.get_queue(queue_name)
        result = []

        polling_end = unix_time() + wait_seconds_timeout

        # queue.messages only contains visible messages
        while True:

            if result or (wait_seconds_timeout and unix_time() > polling_end):
                break

            if len(queue.messages) == 0:
                # we want to break here, otherwise it will be an infinite loop
                if wait_seconds_timeout == 0:
                    break

                import time
                time.sleep(0.001)
                continue

            messages_to_dlq = []
            for message in queue.messages:
                if not message.visible:
                    continue
                if queue.dead_letter_queue is not None and message.approximate_receive_count >= queue.redrive_policy['maxReceiveCount']:
                    messages_to_dlq.append(message)
                    continue

                message.mark_received(
                    visibility_timeout=visibility_timeout
                )
                result.append(message)
                if len(result) >= count:
                    break

            for message in messages_to_dlq:
                queue._messages.remove(message)
                queue.dead_letter_queue.add_message(message)

        return result
Ejemplo n.º 3
0
 def create_account_status(self):
     return {
         'CreateAccountStatus': {
             'Id': self.create_account_status_id,
             'AccountName': self.name,
             'State': 'SUCCEEDED',
             'RequestedTimestamp': unix_time(self.create_time),
             'CompletedTimestamp': unix_time(self.create_time),
             'AccountId': self.id,
         }
     }
Ejemplo n.º 4
0
 def __init__(self, partition_key, data, sequence_number, explicit_hash_key):
     self.partition_key = partition_key
     self.data = data
     self.sequence_number = sequence_number
     self.explicit_hash_key = explicit_hash_key
     self.created_at_datetime = datetime.datetime.utcnow()
     self.created_at = unix_time(self.created_at_datetime)
Ejemplo n.º 5
0
    def __init__(self, name, region, **kwargs):
        self.name = name
        self.region = region
        self.tags = {}
        self.permissions = {}

        self._messages = []

        now = unix_time()
        self.created_timestamp = now
        self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region,
                                                                   self.name)
        self.dead_letter_queue = None

        # default settings for a non fifo queue
        defaults = {
            'ContentBasedDeduplication': 'false',
            'DelaySeconds': 0,
            'FifoQueue': 'false',
            'KmsDataKeyReusePeriodSeconds': 300,  # five minutes
            'KmsMasterKeyId': None,
            'MaximumMessageSize': int(64 << 10),
            'MessageRetentionPeriod': 86400 * 4,  # four days
            'Policy': None,
            'ReceiveMessageWaitTimeSeconds': 0,
            'RedrivePolicy': None,
            'VisibilityTimeout': 30,
        }

        defaults.update(kwargs)
        self._set_attributes(defaults, now)

        # Check some conditions
        if self.fifo_queue and not self.name.endswith('.fifo'):
            raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues')
Ejemplo n.º 6
0
def test_list_closed_workflow_executions():
    conn = setup_swf_environment()
    # Leave one workflow execution open to make sure it isn't displayed
    conn.start_workflow_execution(
        'test-domain', 'uid-abcd1234', 'test-workflow', 'v1.0'
    )
    # One closed workflow execution
    run_id = conn.start_workflow_execution(
        'test-domain', 'uid-abcd12345', 'test-workflow', 'v1.0'
    )['runId']
    conn.terminate_workflow_execution('test-domain', 'uid-abcd12345',
                                      details='some details',
                                      reason='a more complete reason',
                                      run_id=run_id)

    yesterday = datetime.now() - timedelta(days=1)
    oldest_date = unix_time(yesterday)
    response = conn.list_closed_workflow_executions(
        'test-domain',
        start_oldest_date=oldest_date,
        workflow_id='test-workflow')
    execution_infos = response['executionInfos']
    len(execution_infos).should.equal(1)
    open_workflow = execution_infos[0]
    open_workflow['workflowType'].should.equal({'version': 'v1.0',
                                                'name': 'test-workflow'})
    open_workflow.should.contain('startTimestamp')
    open_workflow['execution']['workflowId'].should.equal('uid-abcd12345')
    open_workflow['execution'].should.contain('runId')
    open_workflow['cancelRequested'].should.be(False)
    open_workflow['executionStatus'].should.equal('CLOSED')
Ejemplo n.º 7
0
 def describe(self):
     results = {
         "Table": {
             "CreationDateTime": unix_time(self.created_at),
             "KeySchema": {
                 "HashKeyElement": {
                     "AttributeName": self.hash_key_attr,
                     "AttributeType": self.hash_key_type
                 },
             },
             "ProvisionedThroughput": {
                 "ReadCapacityUnits": self.read_capacity,
                 "WriteCapacityUnits": self.write_capacity
             },
             "TableName": self.name,
             "TableStatus": "ACTIVE",
             "ItemCount": len(self),
             "TableSizeBytes": 0,
         }
     }
     if self.has_range_key:
         results["Table"]["KeySchema"]["RangeKeyElement"] = {
             "AttributeName": self.range_key_attr,
             "AttributeType": self.range_key_type
         }
     return results
Ejemplo n.º 8
0
    def __init__(self, name, region, **kwargs):
        self.name = name
        self.visibility_timeout = int(kwargs.get('VisibilityTimeout', 30))
        self.region = region

        self._messages = []

        now = unix_time()

        # kwargs can also have:
        # [Policy, RedrivePolicy]
        self.fifo_queue = kwargs.get('FifoQueue', 'false') == 'true'
        self.content_based_deduplication = kwargs.get('ContentBasedDeduplication', 'false') == 'true'
        self.kms_master_key_id = kwargs.get('KmsMasterKeyId', 'alias/aws/sqs')
        self.kms_data_key_reuse_period_seconds = int(kwargs.get('KmsDataKeyReusePeriodSeconds', 300))
        self.created_timestamp = now
        self.delay_seconds = int(kwargs.get('DelaySeconds', 0))
        self.last_modified_timestamp = now
        self.maximum_message_size = int(kwargs.get('MaximumMessageSize', 64 << 10))
        self.message_retention_period = int(kwargs.get('MessageRetentionPeriod', 86400 * 4))  # four days
        self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name)
        self.receive_message_wait_time_seconds = int(kwargs.get('ReceiveMessageWaitTimeSeconds', 0))

        # wait_time_seconds will be set to immediate return messages
        self.wait_time_seconds = int(kwargs.get('WaitTimeSeconds', 0))

        # Check some conditions
        if self.fifo_queue and not self.name.endswith('.fifo'):
            raise MessageAttributesInvalid('Queue name must end in .fifo for FIFO queues')
Ejemplo n.º 9
0
 def complete(self, event_id, result=None):
     self.execution_status = "CLOSED"
     self.close_status = "COMPLETED"
     self.close_timestamp = unix_time()
     self._add_event(
         "WorkflowExecutionCompleted",
         decision_task_completed_event_id=event_id,
         result=result,
     )
Ejemplo n.º 10
0
 def fail(self, event_id, details=None, reason=None):
     # TODO: implement lenght constraints on details/reason
     self.execution_status = "CLOSED"
     self.close_status = "FAILED"
     self.close_timestamp = unix_time()
     self._add_event(
         "WorkflowExecutionFailed",
         decision_task_completed_event_id=event_id,
         details=details,
         reason=reason,
     )
Ejemplo n.º 11
0
 def describe(self):
     return {
         'Account': {
             'Id': self.id,
             'Arn': self.arn,
             'Email': self.email,
             'Name': self.name,
             'Status': self.status,
             'JoinedMethod': self.joined_method,
             'JoinedTimestamp': unix_time(self.create_time),
         }
     }
Ejemplo n.º 12
0
 def start(self):
     self.start_timestamp = unix_time()
     self._add_event(
         "WorkflowExecutionStarted",
         child_policy=self.child_policy,
         execution_start_to_close_timeout=self.execution_start_to_close_timeout,
         # TODO: fix this hardcoded value
         parent_initiated_event_id=0,
         task_list=self.task_list,
         task_start_to_close_timeout=self.task_start_to_close_timeout,
         workflow_type=self.workflow_type,
     )
     self.schedule_decision_task()
Ejemplo n.º 13
0
 def describe(self):
     results = {
         'Table': {
             'AttributeDefinitions': self.attr,
             'ProvisionedThroughput': self.throughput,
             'TableSizeBytes': 0,
             'TableName': self.name,
             'TableStatus': 'ACTIVE',
             'KeySchema': self.schema,
             'ItemCount': len(self),
             'CreationDateTime': unix_time(self.created_at),
             'GlobalSecondaryIndexes': [index for index in self.global_indexes],
         }
     }
     return results
Ejemplo n.º 14
0
 def to_dict(self):
     key_dict = {
         "KeyMetadata": {
             "AWSAccountId": self.account_id,
             "Arn": self.arn,
             "CreationDate": "%d" % unix_time(),
             "Description": self.description,
             "Enabled": self.enabled,
             "KeyId": self.id,
             "KeyUsage": self.key_usage,
             "KeyState": self.key_state,
         }
     }
     if self.key_state == 'PendingDeletion':
         key_dict['KeyMetadata']['DeletionDate'] = iso_8601_datetime_without_milliseconds(self.deletion_date)
     return key_dict
Ejemplo n.º 15
0
 def describe(self):
     results = {
         "Table": {
             "AttributeDefinitions": self.attr,
             "ProvisionedThroughput": self.throughput,
             "TableSizeBytes": 0,
             "TableName": self.name,
             "TableStatus": "ACTIVE",
             "KeySchema": self.schema,
             "ItemCount": len(self),
             "CreationDateTime": unix_time(self.created_at),
             "GlobalSecondaryIndexes": [index for index in self.global_indexes],
             "LocalSecondaryIndexes": [index for index in self.indexes],
         }
     }
     return results
Ejemplo n.º 16
0
 def __init__(self, activity_id, activity_type, scheduled_event_id,
              workflow_execution, timeouts, input=None):
     self.activity_id = activity_id
     self.activity_type = activity_type
     self.details = None
     self.input = input
     self.last_heartbeat_timestamp = unix_time()
     self.scheduled_event_id = scheduled_event_id
     self.started_event_id = None
     self.state = "SCHEDULED"
     self.task_token = str(uuid.uuid4())
     self.timeouts = timeouts
     self.timeout_type = None
     self.workflow_execution = workflow_execution
     # this is *not* necessarily coherent with workflow execution history,
     # but that shouldn't be a problem for tests
     self.scheduled_at = datetime.utcnow()
Ejemplo n.º 17
0
    def __init__(self, name, visibility_timeout, wait_time_seconds, region):
        self.name = name
        self.visibility_timeout = visibility_timeout or 30
        self.region = region

        # wait_time_seconds will be set to immediate return messages
        self.wait_time_seconds = wait_time_seconds or 0
        self._messages = []

        now = unix_time()

        self.created_timestamp = now
        self.delay_seconds = 0
        self.last_modified_timestamp = now
        self.maximum_message_size = 64 << 10
        self.message_retention_period = 86400 * 4  # four days
        self.queue_arn = 'arn:aws:sqs:{0}:123456789012:{1}'.format(self.region, self.name)
        self.receive_message_wait_time_seconds = 0
Ejemplo n.º 18
0
 def describe(self, base_key='TableDescription'):
     results = {
         base_key: {
             'AttributeDefinitions': self.attr,
             'ProvisionedThroughput': self.throughput,
             'TableSizeBytes': 0,
             'TableName': self.name,
             'TableStatus': 'ACTIVE',
             'TableArn': self.table_arn,
             'KeySchema': self.schema,
             'ItemCount': len(self),
             'CreationDateTime': unix_time(self.created_at),
             'GlobalSecondaryIndexes': [index for index in self.global_indexes],
             'LocalSecondaryIndexes': [index for index in self.indexes],
         }
     }
     if self.stream_specification and self.stream_specification['StreamEnabled']:
         results[base_key]['StreamSpecification'] = self.stream_specification
         if self.latest_stream_label:
             results[base_key]['LatestStreamLabel'] = self.latest_stream_label
             results[base_key]['LatestStreamArn'] = self.table_arn + '/stream/' + self.latest_stream_label
     return results
Ejemplo n.º 19
0
 def __init__(self, event_id, event_type, event_timestamp=None, **kwargs):
     if event_type not in SUPPORTED_HISTORY_EVENT_TYPES:
         raise NotImplementedError(
             "HistoryEvent does not implement attributes for type '{0}'".format(event_type)
         )
     self.event_id = event_id
     self.event_type = event_type
     if event_timestamp:
         self.event_timestamp = event_timestamp
     else:
         self.event_timestamp = unix_time()
     # pre-populate a dict: {"camelCaseKey": value}
     self.event_attributes = {}
     for key, value in kwargs.items():
         if value:
             camel_key = underscores_to_camelcase(key)
             if key == "task_list":
                 value = {"name": value}
             elif key == "workflow_type":
                 value = {"name": value.name, "version": value.version}
             elif key == "activity_type":
                 value = value.to_short_dict()
             self.event_attributes[camel_key] = value
Ejemplo n.º 20
0
    def _set_attributes(self, attributes, now=None):
        if not now:
            now = unix_time()

        integer_fields = ('DelaySeconds', 'KmsDataKeyreusePeriodSeconds',
                          'MaximumMessageSize', 'MessageRetentionPeriod',
                          'ReceiveMessageWaitTime', 'VisibilityTimeout')
        bool_fields = ('ContentBasedDeduplication', 'FifoQueue')

        for key, value in six.iteritems(attributes):
            if key in integer_fields:
                value = int(value)
            if key in bool_fields:
                value = value == "true"

            if key == 'RedrivePolicy' and value is not None:
                continue

            setattr(self, camelcase_to_underscores(key), value)

        if attributes.get('RedrivePolicy', None):
            self._setup_dlq(attributes['RedrivePolicy'])

        self.last_modified_timestamp = now
Ejemplo n.º 21
0
    def schedule_activity_task(self, event_id, attributes):
        # Helper function to avoid repeating ourselves in the next sections
        def fail_schedule_activity_task(_type, _cause):
            # TODO: implement other possible failure mode: OPEN_ACTIVITIES_LIMIT_EXCEEDED
            # NB: some failure modes are not implemented and probably won't be implemented in
            # the future, such as ACTIVITY_CREATION_RATE_EXCEEDED or
            # OPERATION_NOT_PERMITTED
            self._add_event(
                "ScheduleActivityTaskFailed",
                activity_id=attributes["activityId"],
                activity_type=_type,
                cause=_cause,
                decision_task_completed_event_id=event_id,
            )
            self.should_schedule_decision_next = True

        activity_type = self.domain.get_type(
            "activity",
            attributes["activityType"]["name"],
            attributes["activityType"]["version"],
            ignore_empty=True,
        )
        if not activity_type:
            fake_type = ActivityType(attributes["activityType"]["name"],
                                     attributes["activityType"]["version"])
            fail_schedule_activity_task(fake_type,
                                        "ACTIVITY_TYPE_DOES_NOT_EXIST")
            return
        if activity_type.status == "DEPRECATED":
            fail_schedule_activity_task(activity_type,
                                        "ACTIVITY_TYPE_DEPRECATED")
            return
        if any(at for at in self.activity_tasks
               if at.activity_id == attributes["activityId"]):
            fail_schedule_activity_task(activity_type,
                                        "ACTIVITY_ID_ALREADY_IN_USE")
            return

        # find task list or default task list, else fail
        task_list = attributes.get("taskList", {}).get("name")
        if not task_list and activity_type.task_list:
            task_list = activity_type.task_list
        if not task_list:
            fail_schedule_activity_task(activity_type,
                                        "DEFAULT_TASK_LIST_UNDEFINED")
            return

        # find timeouts or default timeout, else fail
        timeouts = {}
        for _type in [
                "scheduleToStartTimeout", "scheduleToCloseTimeout",
                "startToCloseTimeout", "heartbeatTimeout"
        ]:
            default_key = "default_task_" + camelcase_to_underscores(_type)
            default_value = getattr(activity_type, default_key)
            timeouts[_type] = attributes.get(_type, default_value)
            if not timeouts[_type]:
                error_key = default_key.replace("default_task_", "default_")
                fail_schedule_activity_task(
                    activity_type, "{0}_UNDEFINED".format(error_key.upper()))
                return

        # Only add event and increment counters now that nothing went wrong
        evt = self._add_event(
            "ActivityTaskScheduled",
            activity_id=attributes["activityId"],
            activity_type=activity_type,
            control=attributes.get("control"),
            decision_task_completed_event_id=event_id,
            heartbeat_timeout=attributes.get("heartbeatTimeout"),
            input=attributes.get("input"),
            schedule_to_close_timeout=attributes.get("scheduleToCloseTimeout"),
            schedule_to_start_timeout=attributes.get("scheduleToStartTimeout"),
            start_to_close_timeout=attributes.get("startToCloseTimeout"),
            task_list=task_list,
            task_priority=attributes.get("taskPriority"),
        )
        task = ActivityTask(
            activity_id=attributes["activityId"],
            activity_type=activity_type,
            input=attributes.get("input"),
            scheduled_event_id=evt.event_id,
            workflow_execution=self,
            timeouts=timeouts,
        )
        self.domain.add_to_activity_task_list(task_list, task)
        self.open_counts["openActivityTasks"] += 1
        self.latest_activity_task_timestamp = unix_time()
Ejemplo n.º 22
0
 def __init__(self, partition_key, data, sequence_number, explicit_hash_key):
     self.partition_key = partition_key
     self.data = data
     self.sequence_number = sequence_number
     self.explicit_hash_key = explicit_hash_key
     self.create_at = unix_time()
Ejemplo n.º 23
0
    def receive_message(
        self,
        queue_name,
        count,
        wait_seconds_timeout,
        visibility_timeout,
        message_attribute_names=None,
    ):
        # Attempt to retrieve visible messages from a queue.

        # If a message was read by client and not deleted it is considered to be
        # "inflight" and cannot be read. We make attempts to obtain ``count``
        # messages but we may return less if messages are in-flight or there
        # are simple not enough messages in the queue.

        if message_attribute_names is None:
            message_attribute_names = []
        queue = self.get_queue(queue_name)
        result = []
        previous_result_count = len(result)

        polling_end = unix_time() + wait_seconds_timeout
        currently_pending_groups = deepcopy(queue.pending_message_groups)

        # queue.messages only contains visible messages
        while True:

            if result or (wait_seconds_timeout and unix_time() > polling_end):
                break

            messages_to_dlq = []

            for message in queue.messages:
                if not message.visible:
                    continue

                if message in queue.pending_messages:
                    # The message is pending but is visible again, so the
                    # consumer must have timed out.
                    queue.pending_messages.remove(message)
                    currently_pending_groups = deepcopy(
                        queue.pending_message_groups)

                if message.group_id and queue.fifo_queue:
                    if message.group_id in currently_pending_groups:
                        # A previous call is still processing messages in this group, so we cannot deliver this one.
                        continue

                if (queue.dead_letter_queue is not None
                        and queue.redrive_policy
                        and message.approximate_receive_count >=
                        queue.redrive_policy["maxReceiveCount"]):
                    messages_to_dlq.append(message)
                    continue

                queue.pending_messages.add(message)
                message.mark_received(visibility_timeout=visibility_timeout)
                # Create deepcopy to not mutate the message state when filtering for attributes
                message_copy = deepcopy(message)
                _filter_message_attributes(message_copy,
                                           message_attribute_names)
                if not self.is_message_valid_based_on_retention_period(
                        queue_name, message):
                    break
                result.append(message_copy)
                if len(result) >= count:
                    break

            for message in messages_to_dlq:
                queue._messages.remove(message)
                queue.dead_letter_queue.add_message(message)

            if previous_result_count == len(result):
                if wait_seconds_timeout == 0:
                    # There is timeout and we have added no additional results,
                    # so break to avoid an infinite loop.
                    break

                import time

                time.sleep(0.01)
                continue

            previous_result_count = len(result)

        return result
Ejemplo n.º 24
0
 def start(self, started_event_id, previous_started_event_id=None):
     self.state = "STARTED"
     self.started_timestamp = unix_time()
     self.started_event_id = started_event_id
     self.previous_started_event_id = previous_started_event_id
Ejemplo n.º 25
0
    def __init__(
        self,
        name,
        region,
        authentication_strategy,
        auto_minor_version_upgrade,
        configuration,
        deployment_mode,
        encryption_options,
        engine_type,
        engine_version,
        host_instance_type,
        ldap_server_metadata,
        logs,
        maintenance_window_start_time,
        publicly_accessible,
        security_groups,
        storage_type,
        subnet_ids,
        users,
    ):
        self.name = name
        self.id = get_random_hex(6)
        self.arn = f"arn:aws:mq:{region}:{ACCOUNT_ID}:broker:{self.id}"
        self.state = "RUNNING"
        self.created = unix_time()

        self.authentication_strategy = authentication_strategy
        self.auto_minor_version_upgrade = auto_minor_version_upgrade
        self.deployment_mode = deployment_mode
        self.encryption_options = encryption_options
        if not self.encryption_options:
            self.encryption_options = {"useAwsOwnedKey": True}
        self.engine_type = engine_type
        self.engine_version = engine_version
        self.host_instance_type = host_instance_type
        self.ldap_server_metadata = ldap_server_metadata
        self.logs = logs
        if "general" not in self.logs:
            self.logs["general"] = False
        if "audit" not in self.logs:
            if self.engine_type.upper() == "ACTIVEMQ":
                self.logs["audit"] = False
        self.maintenance_window_start_time = maintenance_window_start_time
        if not self.maintenance_window_start_time:
            self.maintenance_window_start_time = {
                "dayOfWeek": "Sunday",
                "timeOfDay": "00:00",
                "timeZone": "UTC",
            }
        self.publicly_accessible = publicly_accessible
        self.security_groups = security_groups
        self.storage_type = storage_type
        self.subnet_ids = subnet_ids
        if not self.subnet_ids:
            if self.deployment_mode == "CLUSTER_MULTI_AZ":
                self.subnet_ids = [
                    "default-az1",
                    "default-az2",
                    "default-az3",
                    "default-az4",
                ]
            elif self.deployment_mode == "ACTIVE_STANDBY_MULTI_AZ":
                self.subnet_ids = ["active-subnet", "standby-subnet"]
            else:
                self.subnet_ids = ["default-subnet"]

        self.users = dict()
        for user in users:
            self.create_user(
                username=user["username"],
                groups=user.get("groups", []),
                console_access=user.get("consoleAccess", False),
            )

        if self.engine_type.upper() == "RABBITMQ":
            self.configurations = None
        else:
            current_config = configuration or {
                "id": f"c-{get_random_hex(6)}",
                "revision": 1,
            }
            self.configurations = {
                "current": current_config,
                "history": [],
            }
        if self.engine_type.upper() == "RABBITMQ":
            console_url = f"https://0000.mq.{region}.amazonaws.com"
            endpoints = ["amqps://mockmq:5671"]
        else:
            console_url = f"https://0000.mq.{region}.amazonaws.com:8162"
            endpoints = [
                "ssl://mockmq:61617",
                "amqp+ssl://mockmq:5671",
                "stomp+ssl://mockmq:61614",
                "mqtt+ssl://mockmq:8883",
                "wss://mockmq:61619",
            ]
        self.instances = [{
            "consoleURL": console_url,
            "endpoints": endpoints,
            "ipAddress": "192.168.0.1",
        }]

        if deployment_mode == "ACTIVE_STANDBY_MULTI_AZ":
            self.instances.append({
                "consoleURL": console_url,
                "endpoints": endpoints,
                "ipAddress": "192.168.0.2",
            })
Ejemplo n.º 26
0
def test_unix_time():
    unix_time().should.equal(1420113600.0)
Ejemplo n.º 27
0
 def __init__(self):
     self.settings = dict()
     self.last_modified = unix_time()
Ejemplo n.º 28
0
 def reached(self):
     return unix_time() >= self.timestamp
Ejemplo n.º 29
0
 def start(self, started_event_id):
     self.state = "STARTED"
     self.started_timestamp = unix_time()
     self.started_event_id = started_event_id
Ejemplo n.º 30
0
 def __init__(self, consumer_name, region_name, stream_arn):
     self.consumer_name = consumer_name
     self.created = unix_time()
     self.stream_arn = stream_arn
     stream_name = stream_arn.split("/")[-1]
     self.consumer_arn = f"arn:aws:kinesis:{region_name}:{ACCOUNT_ID}:stream/{stream_name}/consumer/{consumer_name}"
Ejemplo n.º 31
0
 def describe(self):
     return {
         "ServicePrincipal": self.service_principal,
         "DateEnabled": unix_time(self.date_enabled),
     }
Ejemplo n.º 32
0
    def describe(self):
        admin = self.account.describe()
        admin["DelegationEnabledDate"] = unix_time(self.enabled_date)

        return admin
Ejemplo n.º 33
0
Archivo: models.py Proyecto: vklab/moto
    def put_events(self, events):
        num_events = len(events)

        if num_events > 10:
            # the exact error text is longer, the Value list consists of all the put events
            raise ValidationException(
                "1 validation error detected: "
                "Value '[PutEventsRequestEntry]' at 'entries' failed to satisfy constraint: "
                "Member must have length less than or equal to 10")

        entries = []
        for event in events:
            if "Source" not in event:
                entries.append({
                    "ErrorCode":
                    "InvalidArgument",
                    "ErrorMessage":
                    "Parameter Source is not valid. Reason: Source is a required argument.",
                })
            elif "DetailType" not in event:
                entries.append({
                    "ErrorCode":
                    "InvalidArgument",
                    "ErrorMessage":
                    "Parameter DetailType is not valid. Reason: DetailType is a required argument.",
                })
            elif "Detail" not in event:
                entries.append({
                    "ErrorCode":
                    "InvalidArgument",
                    "ErrorMessage":
                    "Parameter Detail is not valid. Reason: Detail is a required argument.",
                })
            else:
                try:
                    json.loads(event["Detail"])
                except ValueError:  # json.JSONDecodeError exists since Python 3.5
                    entries.append({
                        "ErrorCode": "MalformedDetail",
                        "ErrorMessage": "Detail is malformed.",
                    })
                    continue

                event_id = str(uuid4())
                entries.append({"EventId": event_id})

                # if 'EventBusName' is not especially set, it will be sent to the default one
                event_bus_name = event.get("EventBusName", "default")

                for rule in self.rules.values():
                    rule.send_to_targets(
                        event_bus_name,
                        {
                            "version": "0",
                            "id": event_id,
                            "detail-type": event["DetailType"],
                            "source": event["Source"],
                            "account": ACCOUNT_ID,
                            "time": event.get("Time",
                                              unix_time(datetime.utcnow())),
                            "region": self.region_name,
                            "resources": event.get("Resources", []),
                            "detail": json.loads(event["Detail"]),
                        },
                    )

        return entries
Ejemplo n.º 34
0
 def start(self, started_event_id):
     self.state = "STARTED"
     self.started_timestamp = unix_time()
     self.started_event_id = started_event_id
Ejemplo n.º 35
0
    def schedule_activity_task(self, event_id, attributes):
        # Helper function to avoid repeating ourselves in the next sections
        def fail_schedule_activity_task(_type, _cause):
            # TODO: implement other possible failure mode: OPEN_ACTIVITIES_LIMIT_EXCEEDED
            # NB: some failure modes are not implemented and probably won't be implemented in
            # the future, such as ACTIVITY_CREATION_RATE_EXCEEDED or
            # OPERATION_NOT_PERMITTED
            self._add_event(
                "ScheduleActivityTaskFailed",
                activity_id=attributes["activityId"],
                activity_type=_type,
                cause=_cause,
                decision_task_completed_event_id=event_id,
            )
            self.should_schedule_decision_next = True

        activity_type = self.domain.get_type(
            "activity",
            attributes["activityType"]["name"],
            attributes["activityType"]["version"],
            ignore_empty=True,
        )
        if not activity_type:
            fake_type = ActivityType(attributes["activityType"]["name"],
                                     attributes["activityType"]["version"])
            fail_schedule_activity_task(fake_type,
                                        "ACTIVITY_TYPE_DOES_NOT_EXIST")
            return
        if activity_type.status == "DEPRECATED":
            fail_schedule_activity_task(activity_type,
                                        "ACTIVITY_TYPE_DEPRECATED")
            return
        if any(at for at in self.activity_tasks if at.activity_id == attributes["activityId"]):
            fail_schedule_activity_task(activity_type,
                                        "ACTIVITY_ID_ALREADY_IN_USE")
            return

        # find task list or default task list, else fail
        task_list = attributes.get("taskList", {}).get("name")
        if not task_list and activity_type.task_list:
            task_list = activity_type.task_list
        if not task_list:
            fail_schedule_activity_task(activity_type,
                                        "DEFAULT_TASK_LIST_UNDEFINED")
            return

        # find timeouts or default timeout, else fail
        timeouts = {}
        for _type in ["scheduleToStartTimeout", "scheduleToCloseTimeout", "startToCloseTimeout", "heartbeatTimeout"]:
            default_key = "default_task_" + camelcase_to_underscores(_type)
            default_value = getattr(activity_type, default_key)
            timeouts[_type] = attributes.get(_type, default_value)
            if not timeouts[_type]:
                error_key = default_key.replace("default_task_", "default_")
                fail_schedule_activity_task(activity_type,
                                            "{0}_UNDEFINED".format(error_key.upper()))
                return

        # Only add event and increment counters now that nothing went wrong
        evt = self._add_event(
            "ActivityTaskScheduled",
            activity_id=attributes["activityId"],
            activity_type=activity_type,
            control=attributes.get("control"),
            decision_task_completed_event_id=event_id,
            heartbeat_timeout=attributes.get("heartbeatTimeout"),
            input=attributes.get("input"),
            schedule_to_close_timeout=attributes.get("scheduleToCloseTimeout"),
            schedule_to_start_timeout=attributes.get("scheduleToStartTimeout"),
            start_to_close_timeout=attributes.get("startToCloseTimeout"),
            task_list=task_list,
            task_priority=attributes.get("taskPriority"),
        )
        task = ActivityTask(
            activity_id=attributes["activityId"],
            activity_type=activity_type,
            input=attributes.get("input"),
            scheduled_event_id=evt.event_id,
            workflow_execution=self,
            timeouts=timeouts,
        )
        self.domain.add_to_activity_task_list(task_list, task)
        self.open_counts["openActivityTasks"] += 1
        self.latest_activity_task_timestamp = unix_time()
Ejemplo n.º 36
0
def test_unix_time():
    unix_time().should.equal(1420113600.0)
Ejemplo n.º 37
0
 def reset_heartbeat_clock(self):
     self.last_heartbeat_timestamp = unix_time()
Ejemplo n.º 38
0
    def receive_messages(self, queue_name, count, wait_seconds_timeout,
                         visibility_timeout):
        """
        Attempt to retrieve visible messages from a queue.

        If a message was read by client and not deleted it is considered to be
        "inflight" and cannot be read. We make attempts to obtain ``count``
        messages but we may return less if messages are in-flight or there
        are simple not enough messages in the queue.

        :param string queue_name: The name of the queue to read from.
        :param int count: The maximum amount of messages to retrieve.
        :param int visibility_timeout: The number of seconds the message should remain invisible to other queue readers.
        :param int wait_seconds_timeout:  The duration (in seconds) for which the call waits for a message to arrive in
         the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds
        """
        queue = self.get_queue(queue_name)
        result = []
        previous_result_count = len(result)

        polling_end = unix_time() + wait_seconds_timeout

        # queue.messages only contains visible messages
        while True:

            if result or (wait_seconds_timeout and unix_time() > polling_end):
                break

            messages_to_dlq = []

            for message in queue.messages:
                if not message.visible:
                    continue

                if message in queue.pending_messages:
                    # The message is pending but is visible again, so the
                    # consumer must have timed out.
                    queue.pending_messages.remove(message)

                if message.group_id and queue.fifo_queue:
                    if message.group_id in queue.pending_message_groups:
                        # There is already one active message with the same
                        # group, so we cannot deliver this one.
                        continue

                queue.pending_messages.add(message)

                if (queue.dead_letter_queue is not None
                        and message.approximate_receive_count >=
                        queue.redrive_policy["maxReceiveCount"]):
                    messages_to_dlq.append(message)
                    continue

                message.mark_received(visibility_timeout=visibility_timeout)
                result.append(message)
                if len(result) >= count:
                    break

            for message in messages_to_dlq:
                queue._messages.remove(message)
                queue.dead_letter_queue.add_message(message)

            if previous_result_count == len(result):
                if wait_seconds_timeout == 0:
                    # There is timeout and we have added no additional results,
                    # so break to avoid an infinite loop.
                    break

                import time

                time.sleep(0.01)
                continue

            previous_result_count = len(result)

        return result
Ejemplo n.º 39
0
    def receive_messages(self, queue_name, count, wait_seconds_timeout, visibility_timeout):
        """
        Attempt to retrieve visible messages from a queue.

        If a message was read by client and not deleted it is considered to be
        "inflight" and cannot be read. We make attempts to obtain ``count``
        messages but we may return less if messages are in-flight or there
        are simple not enough messages in the queue.

        :param string queue_name: The name of the queue to read from.
        :param int count: The maximum amount of messages to retrieve.
        :param int visibility_timeout: The number of seconds the message should remain invisible to other queue readers.
        :param int wait_seconds_timeout:  The duration (in seconds) for which the call waits for a message to arrive in
         the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds
        """
        queue = self.get_queue(queue_name)
        result = []
        previous_result_count = len(result)

        polling_end = unix_time() + wait_seconds_timeout

        # queue.messages only contains visible messages
        while True:

            if result or (wait_seconds_timeout and unix_time() > polling_end):
                break

            messages_to_dlq = []

            for message in queue.messages:
                if not message.visible:
                    continue

                if message in queue.pending_messages:
                    # The message is pending but is visible again, so the
                    # consumer must have timed out.
                    queue.pending_messages.remove(message)

                if message.group_id and queue.fifo_queue:
                    if message.group_id in queue.pending_message_groups:
                        # There is already one active message with the same
                        # group, so we cannot deliver this one.
                        continue

                queue.pending_messages.add(message)

                if queue.dead_letter_queue is not None and message.approximate_receive_count >= queue.redrive_policy['maxReceiveCount']:
                    messages_to_dlq.append(message)
                    continue

                message.mark_received(
                    visibility_timeout=visibility_timeout
                )
                result.append(message)
                if len(result) >= count:
                    break

            for message in messages_to_dlq:
                queue._messages.remove(message)
                queue.dead_letter_queue.add_message(message)

            if previous_result_count == len(result):
                if wait_seconds_timeout == 0:
                    # There is timeout and we have added no additional results,
                    # so break to avoid an infinite loop.
                    break

                import time
                time.sleep(0.01)
                continue

            previous_result_count = len(result)

        return result
Ejemplo n.º 40
0
 def reset_heartbeat_clock(self):
     self.last_heartbeat_timestamp = unix_time()
Ejemplo n.º 41
0
 def reached(self):
     return unix_time() >= self.timestamp