Esempio n. 1
0
    def info(self, **kwargs):
        """prints metadata for a remote subject key"""
        subject = kwargs.get("subject", None)
        if not subject:
            raise RadulaError("Missing remote subject key to get info for")

        print json.dumps(self.lib.info(subject)).replace('\\"', '')
def delete(id):
	try:
		contacts_table = Table('Contacts')
		contact = contacts_table.get_item(id=id)
		res = contact.delete()
		return Response(json.dumps({'message' : 'deleted'}),  mimetype='application/json')		
	except Exception as e:
			return Response(json.dumps({'message' : str(e)}),  mimetype='application/json')		
def get_by_id(id):
	try:
		contacts_table = Table('Contacts')
		contact = contacts_table.get_item(id=id)
		res = { 'id':contact['id'], 'name':contact['name'], 'email':contact['email'],'age':str(contact['age'])}
		return Response(json.dumps(res),  mimetype='application/json')
	except Exception as e:
			return Response(json.dumps({'message' : str(e)}),  mimetype='application/json')		
def get_all():
	try:
		contacts_table = Table('Contacts')
		contacts = contacts_table.scan()
		res = []
		for contact in contacts:
			res.append({ 'id':contact['id'], 'name':contact['name'], 'email':contact['email'],'age':str(contact['age'])})

		return Response(json.dumps(res),  mimetype='application/json')
	except Exception as e:
			return Response(json.dumps({'message' : str(e)}),  mimetype='application/json')		
Esempio n. 5
0
    def test_connection_close(self):
        """Check connection re-use after close header is received"""
        HTTPretty.register_uri(HTTPretty.POST,
                               'https://%s/' % self.region.endpoint,
                               json.dumps({'test': 'secure'}),
                               content_type='application/json',
                               connection='close')

        conn = self.region.connect(aws_access_key_id='access_key',
                                   aws_secret_access_key='secret')

        def mock_put_conn(*args, **kwargs):
            raise Exception('put_http_connection should not be called!')

        conn.put_http_connection = mock_put_conn

        resp1 = conn.make_request('myCmd1',
                                  {'par1': 'foo', 'par2': 'baz'},
                                  "/",
                                  "POST")

        # If we've gotten this far then no exception was raised
        # by attempting to put the connection back into the pool
        # Now let's just confirm the close header was actually
        # set or we have another problem.
        self.assertEqual(resp1.getheader('connection'), 'close')
Esempio n. 6
0
    def update_pipeline_notifications(self, id, notifications):
        """
        To update () notifications for a pipeline, send a POST request
        to the `//pipelines/ [pipelineId] /notifications` resource.

        When you update notifications for a pipeline, returns the
        values that you specified in the request.

        :type id: string
        :param id: The identifier of the pipeline for which you want to change
            notification settings.

        :type notifications: dict
        :param notifications: The () topic that you want to notify to report job
            status.
        To receive notifications, you must also subscribe
            to the new topic in the console.

        + **Progressing**: The () topic that you want to
              notify when has started to process the job.
        + **Completed**: The topic that you want to notify
              when has finished processing the job.
        + **Warning**: The topic that you want to notify
              when encounters a warning condition.
        + **Error**: The topic that you want to notify
              when encounters an error condition.

        """
        uri = '/2012-09-25/pipelines/{0}/notifications'.format(id)
        params = {'Notifications': notifications, }
        return self.make_request('POST', uri, expected_status=200,
                                 data=json.dumps(params))
Esempio n. 7
0
    def test_multi_commands(self):
        """Check connection re-use"""
        HTTPretty.register_uri(HTTPretty.POST,
                               'https://%s/' % self.region.endpoint,
                               json.dumps({'test': 'secure'}),
                               content_type='application/json')

        conn = self.region.connect(aws_access_key_id='access_key',
                                   aws_secret_access_key='secret')

        resp1 = conn.make_request('myCmd1',
                                  {'par1': 'foo', 'par2': 'baz'},
                                  "/",
                                  "POST")
        body1 = urlparse.parse_qs(HTTPretty.last_request.body)

        resp2 = conn.make_request('myCmd2',
                                  {'par3': 'bar', 'par4': 'narf'},
                                  "/",
                                  "POST")
        body2 = urlparse.parse_qs(HTTPretty.last_request.body)

        self.assertEqual(body1['par1'], ['foo'])
        self.assertEqual(body1['par2'], ['baz'])
        with self.assertRaises(KeyError):
            body1['par3']

        self.assertEqual(body2['par3'], ['bar'])
        self.assertEqual(body2['par4'], ['narf'])
        with self.assertRaises(KeyError):
            body2['par1']

        self.assertEqual(resp1.read(), '{"test": "secure"}')
        self.assertEqual(resp2.read(), '{"test": "secure"}')
Esempio n. 8
0
    def put_metric_filter(self, log_group_name, filter_name, filter_pattern,
                          metric_transformations):
        """
        Creates or updates a metric filter and associates it with the
        specified log group. Metric filters allow you to configure
        rules to extract metric data from log events ingested through
        `PutLogEvents` requests.

        :type log_group_name: string
        :param log_group_name:

        :type filter_name: string
        :param filter_name: The name of the metric filter.

        :type filter_pattern: string
        :param filter_pattern:

        :type metric_transformations: list
        :param metric_transformations:

        """
        params = {
            'logGroupName': log_group_name,
            'filterName': filter_name,
            'filterPattern': filter_pattern,
            'metricTransformations': metric_transformations,
        }
        return self.make_request(action='PutMetricFilter',
                                 body=json.dumps(params))
Esempio n. 9
0
    def set_alarm_state(self, alarm_name, state_reason, state_value,
                        state_reason_data=None):
        """
        Temporarily sets the state of an alarm. When the updated StateValue
        differs from the previous value, the action configured for the
        appropriate state is invoked. This is not a permanent change. The next
        periodic alarm check (in about a minute) will set the alarm to its
        actual state.

        :type alarm_name: string
        :param alarm_name: Descriptive name for alarm.

        :type state_reason: string
        :param state_reason: Human readable reason.

        :type state_value: string
        :param state_value: OK | ALARM | INSUFFICIENT_DATA

        :type state_reason_data: string
        :param state_reason_data: Reason string (will be jsonified).
        """
        params = {'AlarmName': alarm_name,
                  'StateReason': state_reason,
                  'StateValue': state_value}
        if state_reason_data:
            params['StateReasonData'] = json.dumps(state_reason_data)

        return self.get_status('SetAlarmState', params)
Esempio n. 10
0
    def delete_stream(self, stream_name):
        """
        This operation deletes a stream and all of its shards and
        data. You must shut down any applications that are operating
        on the stream before you delete the stream. If an application
        attempts to operate on a deleted stream, it will receive the
        exception `ResourceNotFoundException`.

        If the stream is in the ACTIVE state, you can delete it. After
        a `DeleteStream` request, the specified stream is in the
        DELETING state until Amazon Kinesis completes the deletion.

        **Note:** Amazon Kinesis might continue to accept data read
        and write operations, such as PutRecord and GetRecords, on a
        stream in the DELETING state until the stream deletion is
        complete.

        When you delete a stream, any shards in that stream are also
        deleted.

        You can use the DescribeStream operation to check the state of
        the stream, which is returned in `StreamStatus`.

        `DeleteStream` has a limit of 5 transactions per second per
        account.

        :type stream_name: string
        :param stream_name: The name of the stream to delete.

        """
        params = {'StreamName': stream_name, }
        return self.make_request(action='DeleteStream',
                                 body=json.dumps(params))
Esempio n. 11
0
    def create_job(self, pipeline_id, input_name, output):
        """
        To create a job, send a POST request to the `//jobs` resource.

        When you create a job, Elastic Transcoder returns JSON data
        that includes the values that you specified plus information
        about the job that is created.

        :type pipeline_id: string
        :param pipeline_id: The `Id` of the pipeline that you want Elastic
            Transcoder to use for transcoding. The pipeline
            determines several settings, including the
            Amazon S3 bucket from which Elastic Transcoder
            gets the files to transcode and the bucket into
            which Elastic Transcoder puts the transcoded
            files.

        :type input_name: dict
        :param input_name: A section of the request body that provides
            information about the file that is being
            transcoded.

        :type output: dict
        :param output: A section of the request body that provides information
            about the transcoded (target) file.

        """
        uri = '/2012-09-25/jobs'
        params = {
            'PipelineId': pipeline_id,
            'Input': input_name,
            'Output': output,
        }
        return self.make_request('POST', uri, expected_status=201,
                                 data=json.dumps(params))
Esempio n. 12
0
    def delete_item(self, table_name, key,
                    expected=None, return_values=None,
                    object_hook=None):
        """
        Delete an item and all of it's attributes by primary key.
        You can perform a conditional delete by specifying an
        expected rule.

        :type table_name: str
        :param table_name: The name of the table containing the item.

        :type key: dict
        :param key: A Python version of the Key data structure
            defined by DynamoDB.

        :type expected: dict
        :param expected: A Python version of the Expected
            data structure defined by DynamoDB.

        :type return_values: str
        :param return_values: Controls the return of attribute
            name-value pairs before then were changed.  Possible
            values are: None or 'ALL_OLD'. If 'ALL_OLD' is
            specified and the item is overwritten, the content
            of the old item is returned.
        """
        data = {'TableName': table_name,
                'Key': key}
        if expected:
            data['Expected'] = expected
        if return_values:
            data['ReturnValues'] = return_values
        json_input = json.dumps(data)
        return self.make_request('DeleteItem', json_input,
                                 object_hook=object_hook)
Esempio n. 13
0
File: layer1.py Progetto: 10sr/hue
    def add_attachments_to_set(self, attachments, attachment_set_id=None):
        """
        Adds one or more attachments to an attachment set. If an
        `AttachmentSetId` is not specified, a new attachment set is
        created, and the ID of the set is returned in the response. If
        an `AttachmentSetId` is specified, the attachments are added
        to the specified set, if it exists.

        An attachment set is a temporary container for attachments
        that are to be added to a case or case communication. The set
        is available for one hour after it is created; the
        `ExpiryTime` returned in the response indicates when the set
        expires. The maximum number of attachments in a set is 3, and
        the maximum size of any attachment in the set is 5 MB.

        :type attachment_set_id: string
        :param attachment_set_id: The ID of the attachment set. If an
            `AttachmentSetId` is not specified, a new attachment set is
            created, and the ID of the set is returned in the response. If an
            `AttachmentSetId` is specified, the attachments are added to the
            specified set, if it exists.

        :type attachments: list
        :param attachments: One or more attachments to add to the set. The
            limit is 3 attachments per set, and the size limit is 5 MB per
            attachment.

        """
        params = {'attachments': attachments, }
        if attachment_set_id is not None:
            params['attachmentSetId'] = attachment_set_id
        return self.make_request(action='AddAttachmentsToSet',
                                 body=json.dumps(params))
Esempio n. 14
0
    def unlink_identity(self, identity_id, logins, logins_to_remove):
        """
        Unlinks a federated identity from an existing account.
        Unlinked logins will be considered new identities next time
        they are seen. Removing the last linked login will make this
        identity inaccessible.

        :type identity_id: string
        :param identity_id: A unique identifier in the format REGION:GUID.

        :type logins: map
        :param logins: A set of optional name/value pairs that map provider
            names to provider tokens.

        :type logins_to_remove: list
        :param logins_to_remove: Provider names to unlink from this identity.

        """
        params = {
            'IdentityId': identity_id,
            'Logins': logins,
            'LoginsToRemove': logins_to_remove,
        }
        return self.make_request(action='UnlinkIdentity',
                                 body=json.dumps(params))
Esempio n. 15
0
    def create_log_group(self, log_group_name):
        """
        Creates a new log group with the specified name. The name of
        the log group must be unique within a region for an AWS
        account. You can create up to 100 log groups per account.

        You must use the following guidelines when naming a log group:

        + Log group names can be between 1 and 512 characters long.
        + Allowed characters are az, AZ, 09, '_' (underscore), '-'
          (hyphen), '/' (forward slash), and '.' (period).



        Log groups are created with a default retention of 14 days.
        The retention attribute allow you to configure the number of
        days you want to retain log events in the specified log group.
        See the `SetRetention` operation on how to modify the
        retention of your log groups.

        :type log_group_name: string
        :param log_group_name:

        """
        params = {'logGroupName': log_group_name, }
        return self.make_request(action='CreateLogGroup',
                                 body=json.dumps(params))
Esempio n. 16
0
    def create_log_stream(self, log_group_name, log_stream_name):
        """
        Creates a new log stream in the specified log group. The name
        of the log stream must be unique within the log group. There
        is no limit on the number of log streams that can exist in a
        log group.

        You must use the following guidelines when naming a log
        stream:

        + Log stream names can be between 1 and 512 characters long.
        + The ':' colon character is not allowed.

        :type log_group_name: string
        :param log_group_name:

        :type log_stream_name: string
        :param log_stream_name:

        """
        params = {
            'logGroupName': log_group_name,
            'logStreamName': log_stream_name,
        }
        return self.make_request(action='CreateLogStream',
                                 body=json.dumps(params))
Esempio n. 17
0
    def create_identity_pool(self, identity_pool_name,
                             allow_unauthenticated_identities,
                             supported_login_providers=None):
        """
        Creates a new identity pool. The identity pool is a store of
        user identity information that is specific to your AWS
        account.

        :type identity_pool_name: string
        :param identity_pool_name: A string that you provide.

        :type allow_unauthenticated_identities: boolean
        :param allow_unauthenticated_identities: TRUE if the identity pool
            supports unauthenticated logins.

        :type supported_login_providers: map
        :param supported_login_providers: Optional key:value pairs mapping
            provider names to provider app IDs.

        """
        params = {
            'IdentityPoolName': identity_pool_name,
            'AllowUnauthenticatedIdentities': allow_unauthenticated_identities,
        }
        if supported_login_providers is not None:
            params['SupportedLoginProviders'] = supported_login_providers
        return self.make_request(action='CreateIdentityPool',
                                 body=json.dumps(params))
Esempio n. 18
0
    def list_datasets(self, identity_pool_id, identity_id, next_token=None,
                      max_results=None):
        """
        Lists datasets for an identity.

        :type identity_pool_id: string
        :param identity_pool_id: A name-spaced GUID (for example, us-
            east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
            Cognito. GUID generation is unique within a region.

        :type identity_id: string
        :param identity_id: A name-spaced GUID (for example, us-
            east-1:23EC4050-6AEA-7089-A2DD-08002EXAMPLE) created by Amazon
            Cognito. GUID generation is unique within a region.

        :type next_token: string
        :param next_token: A pagination token for obtaining the next page of
            results.

        :type max_results: integer
        :param max_results: The maximum number of results to be returned.

        """
        uri = '/identitypools/{0}/identities/{1}/datasets'.format(
            identity_pool_id, identity_id)
        params = {}
        headers = {}
        return self.make_request('GET', uri, expected_status=200,
                                 data=json.dumps(params), headers=headers)
Esempio n. 19
0
    def list_tags_for_stream(self, stream_name, exclusive_start_tag_key=None,
                             limit=None):
        """
        Lists the tags for the specified Amazon Kinesis stream.

        :type stream_name: string
        :param stream_name: The name of the stream.

        :type exclusive_start_tag_key: string
        :param exclusive_start_tag_key: The key to use as the starting point
            for the list of tags. If this parameter is set, `ListTagsForStream`
            gets all tags that occur after `ExclusiveStartTagKey`.

        :type limit: integer
        :param limit: The number of tags to return. If this number is less than
            the total number of tags associated with the stream, `HasMoreTags`
            is set to `True`. To list additional tags, set
            `ExclusiveStartTagKey` to the last key in the response.

        """
        params = {'StreamName': stream_name, }
        if exclusive_start_tag_key is not None:
            params['ExclusiveStartTagKey'] = exclusive_start_tag_key
        if limit is not None:
            params['Limit'] = limit
        return self.make_request(action='ListTagsForStream',
                                 body=json.dumps(params))
Esempio n. 20
0
    def update_identity_pool(self, identity_pool_id, identity_pool_name,
                             allow_unauthenticated_identities,
                             supported_login_providers=None):
        """
        Updates a user pool.

        :type identity_pool_id: string
        :param identity_pool_id: An identity pool ID in the format REGION:GUID.

        :type identity_pool_name: string
        :param identity_pool_name: A string that you provide.

        :type allow_unauthenticated_identities: boolean
        :param allow_unauthenticated_identities: TRUE if the identity pool
            supports unauthenticated logins.

        :type supported_login_providers: map
        :param supported_login_providers: Optional key:value pairs mapping
            provider names to provider app IDs.

        """
        params = {
            'IdentityPoolId': identity_pool_id,
            'IdentityPoolName': identity_pool_name,
            'AllowUnauthenticatedIdentities': allow_unauthenticated_identities,
        }
        if supported_login_providers is not None:
            params['SupportedLoginProviders'] = supported_login_providers
        return self.make_request(action='UpdateIdentityPool',
                                 body=json.dumps(params))
Esempio n. 21
0
    def update_pipeline_status(self, id, status):
        """
        To pause or reactivate a pipeline, so the pipeline stops or
        restarts processing jobs, update the status for the pipeline.
        Send a POST request to the `//pipelines/ [pipelineId] /status`
        resource.

        Changing the pipeline status is useful if you want to cancel
        one or more jobs. You can't cancel jobs after has started
        processing them; if you pause the pipeline to which you
        submitted the jobs, you have more time to get the job IDs for
        the jobs that you want to cancel, and to send a CancelJob
        request.

        :type id: string
        :param id: The identifier of the pipeline to update.

        :type status: string
        :param status: The new status of the pipeline:


        + `active`: Enable the pipeline, so it starts processing
              jobs.
        + `paused`: Disable the pipeline, so it stops processing
              jobs.

        """
        uri = '/2012-09-25/pipelines/{0}/status'.format(id)
        params = {'Status': status, }
        return self.make_request('POST', uri, expected_status=200,
                                 data=json.dumps(params))
Esempio n. 22
0
    def set_vault_notifications(self, vault_name, notification_config):
        """
        This operation retrieves the notification-configuration
        subresource set on the vault.

        :type vault_name: str
        :param vault_name: The name of the new vault

        :type notification_config: dict
        :param notification_config: A Python dictionary containing
            an SNS Topic and events for which you want Amazon Glacier
            to send notifications to the topic.  Possible events are:

            * ArchiveRetrievalCompleted - occurs when a job that was
              initiated for an archive retrieval is completed.
            * InventoryRetrievalCompleted - occurs when a job that was
              initiated for an inventory retrieval is completed.

            The format of the dictionary is:

                {'SNSTopic': 'mytopic',
                 'Events': [event1,...]}
        """
        uri = 'vaults/%s/notification-configuration' % vault_name
        json_config = json.dumps(notification_config)
        return self.make_request('PUT', uri, data=json_config,
                                 ok_responses=(204,))
Esempio n. 23
0
    def list_tables(self, limit=None, start_table=None):
        """
        Returns a dictionary of results.  The dictionary contains
        a **TableNames** key whose value is a list of the table names.
        The dictionary could also contain a **LastEvaluatedTableName**
        key whose value would be the last table name returned if
        the complete list of table names was not returned.  This
        value would then be passed as the ``start_table`` parameter on
        a subsequent call to this method.

        :type limit: int
        :param limit: The maximum number of tables to return.

        :type start_table: str
        :param start_table: The name of the table that starts the
            list.  If you ran a previous list_tables and not
            all results were returned, the response dict would
            include a LastEvaluatedTableName attribute.  Use
            that value here to continue the listing.
        """
        data = {}
        if limit:
            data['Limit'] = limit
        if start_table:
            data['ExclusiveStartTableName'] = start_table
        json_input = json.dumps(data)
        return self.make_request('ListTables', json_input)
Esempio n. 24
0
    def create_table(self, table_name, schema, provisioned_throughput):
        """
        Add a new table to your account.  The table name must be unique
        among those associated with the account issuing the request.
        This request triggers an asynchronous workflow to begin creating
        the table.  When the workflow is complete, the state of the
        table will be ACTIVE.

        :type table_name: str
        :param table_name: The name of the table to create.

        :type schema: dict
        :param schema: A Python version of the KeySchema data structure
            as defined by DynamoDB

        :type provisioned_throughput: dict
        :param provisioned_throughput: A Python version of the
            ProvisionedThroughput data structure defined by
            DynamoDB.
        """
        data = {'TableName': table_name,
                'KeySchema': schema,
                'ProvisionedThroughput': provisioned_throughput}
        json_input = json.dumps(data)
        response_dict = self.make_request('CreateTable', json_input)
        return response_dict
Esempio n. 25
0
    def update_pipeline_status(self, id=None, status=None):
        """
        The UpdatePipelineStatus operation pauses or reactivates a
        pipeline, so that the pipeline stops or restarts the
        processing of jobs.

        Changing the pipeline status is useful if you want to cancel
        one or more jobs. You can't cancel jobs after Elastic
        Transcoder has started processing them; if you pause the
        pipeline to which you submitted the jobs, you have more time
        to get the job IDs for the jobs that you want to cancel, and
        to send a CancelJob request.

        :type id: string
        :param id: The identifier of the pipeline to update.

        :type status: string
        :param status:
        The desired status of the pipeline:


        + `Active`: The pipeline is processing jobs.
        + `Paused`: The pipeline is not currently processing jobs.

        """
        uri = '/2012-09-25/pipelines/{0}/status'.format(id)
        params = {}
        if id is not None:
            params['Id'] = id
        if status is not None:
            params['Status'] = status
        return self.make_request('POST', uri, expected_status=200,
                                 data=json.dumps(params))
Esempio n. 26
0
File: layer1.py Progetto: 10sr/hue
    def list_functions(self, marker=None, max_items=None):
        """
        Returns a list of your Lambda functions. For each function,
        the response includes the function configuration information.
        You must use GetFunction to retrieve the code for your
        function.

        This operation requires permission for the
        `lambda:ListFunctions` action.

        :type marker: string
        :param marker: Optional string. An opaque pagination token returned
            from a previous `ListFunctions` operation. If present, indicates
            where to continue the listing.

        :type max_items: integer
        :param max_items: Optional integer. Specifies the maximum number of AWS
            Lambda functions to return in response. This parameter value must
            be greater than 0.

        """

        uri = '/2014-11-13/functions/'
        params = {}
        headers = {}
        query_params = {}
        if marker is not None:
            query_params['Marker'] = marker
        if max_items is not None:
            query_params['MaxItems'] = max_items
        return self.make_request('GET', uri, expected_status=200,
                                 data=json.dumps(params), headers=headers,
                                 params=query_params)
Esempio n. 27
0
    def describe_services(self, service_code_list=None, language=None):
        """
        Returns the current list of AWS services and a list of service
        categories that applies to each one. You then use service
        names and categories in your CreateCase requests. Each AWS
        service has its own set of categories.

        The service codes and category codes correspond to the values
        that are displayed in the **Service** and **Category** drop-
        down lists on the AWS Support Center `Open a new case`_ page.
        The values in those fields, however, do not necessarily match
        the service codes and categories returned by the
        `DescribeServices` request. Always use the service codes and
        categories obtained programmatically. This practice ensures
        that you always have the most recent set of service and
        category codes.

        :type service_code_list: list
        :param service_code_list: A JSON-formatted list of service codes
            available for AWS services.

        :type language: string
        :param language: The ISO 639-1 code for the language in which AWS
            provides support. AWS Support currently supports English ("en") and
            Japanese ("ja"). Language parameters must be passed explicitly for
            operations that take them.

        """
        params = {}
        if service_code_list is not None:
            params['serviceCodeList'] = service_code_list
        if language is not None:
            params['language'] = language
        return self.make_request(action='DescribeServices',
                                 body=json.dumps(params))
Esempio n. 28
0
    def describe_log_groups(self, log_group_name_prefix=None,
                            next_token=None, limit=None):
        """
        Returns all the log groups that are associated with the AWS
        account making the request. The list returned in the response
        is ASCII-sorted by log group name.

        By default, this operation returns up to 50 log groups. If
        there are more log groups to list, the response would contain
        a `nextToken` value in the response body. You can also limit
        the number of log groups returned in the response by
        specifying the `limit` parameter in the request.

        :type log_group_name_prefix: string
        :param log_group_name_prefix:

        :type next_token: string
        :param next_token: A string token used for pagination that points to
            the next page of results. It must be a value obtained from the
            response of the previous `DescribeLogGroups` request.

        :type limit: integer
        :param limit: The maximum number of items returned in the response. If
            you don't specify a value, the request would return up to 50 items.

        """
        params = {}
        if log_group_name_prefix is not None:
            params['logGroupNamePrefix'] = log_group_name_prefix
        if next_token is not None:
            params['nextToken'] = next_token
        if limit is not None:
            params['limit'] = limit
        return self.make_request(action='DescribeLogGroups',
                                 body=json.dumps(params))
Esempio n. 29
0
File: layer1.py Progetto: Cajs/boto
    def report_task_progress(self, task_id):
        """
        Updates the AWS Data Pipeline service on the progress of the
        calling task runner. When the task runner is assigned a task,
        it should call ReportTaskProgress to acknowledge that it has
        the task within 2 minutes. If the web service does not recieve
        this acknowledgement within the 2 minute window, it will
        assign the task in a subsequent PollForTask call. After this
        initial acknowledgement, the task runner only needs to report
        progress every 15 minutes to maintain its ownership of the
        task. You can change this reporting time from 15 minutes by
        specifying a `reportProgressTimeout` field in your pipeline.
        If a task runner does not report its status after 5 minutes,
        AWS Data Pipeline will assume that the task runner is unable
        to process the task and will reassign the task in a subsequent
        response to PollForTask. task runners should call
        ReportTaskProgress every 60 seconds.

        :type task_id: string
        :param task_id: Identifier of the task assigned to the task runner.
            This value is provided in the TaskObject that the service returns
            with the response for the PollForTask action.

        """
        params = {'taskId': task_id, }
        return self.make_request(action='ReportTaskProgress',
                                 body=json.dumps(params))
Esempio n. 30
0
File: layer1.py Progetto: Cajs/boto
    def set_status(self, object_ids, status, pipeline_id):
        """
        Requests that the status of an array of physical or logical
        pipeline objects be updated in the pipeline. This update may
        not occur immediately, but is eventually consistent. The
        status that can be set depends on the type of object.

        :type pipeline_id: string
        :param pipeline_id: Identifies the pipeline that contains the objects.

        :type object_ids: list
        :param object_ids: Identifies an array of objects. The corresponding
            objects can be either physical or components, but not a mix of both
            types.

        :type status: string
        :param status: Specifies the status to be set on all the objects in
            `objectIds`. For components, this can be either `PAUSE` or
            `RESUME`. For instances, this can be either `CANCEL`, `RERUN`, or
            `MARK_FINISHED`.

        """
        params = {
            'pipelineId': pipeline_id,
            'objectIds': object_ids,
            'status': status,
        }
        return self.make_request(action='SetStatus',
                                 body=json.dumps(params))
Esempio n. 31
0
    def modify_hapg(self, hapg_arn, label=None, partition_serial_list=None):
        """
        Modifies an existing high-availability partition group.

        :type hapg_arn: string
        :param hapg_arn: The ARN of the high-availability partition group to
            modify.

        :type label: string
        :param label: The new label for the high-availability partition group.

        :type partition_serial_list: list
        :param partition_serial_list: The list of partition serial numbers to
            make members of the high-availability partition group.

        """
        params = {'HapgArn': hapg_arn, }
        if label is not None:
            params['Label'] = label
        if partition_serial_list is not None:
            params['PartitionSerialList'] = partition_serial_list
        return self.make_request(action='ModifyHapg',
                                 body=json.dumps(params))
    def put_key_policy(self, key_id, policy_name, policy):
        """
        Attaches a policy to the specified key.

        :type key_id: string
        :param key_id: Unique identifier of the key. This can be an ARN, an
            alias, or a globally unique identifier.

        :type policy_name: string
        :param policy_name: Name of the policy to be attached. Currently, the
            only supported name is "default".

        :type policy: string
        :param policy: The policy, in JSON format, to be attached to the key.

        """
        params = {
            'KeyId': key_id,
            'PolicyName': policy_name,
            'Policy': policy,
        }
        return self.make_request(action='PutKeyPolicy',
                                 body=json.dumps(params))
Esempio n. 33
0
    def get_config(self, client_arn, client_version, hapg_list):
        """
        Gets the configuration files necessary to connect to all high
        availability partition groups the client is associated with.

        :type client_arn: string
        :param client_arn: The ARN of the client.

        :type client_version: string
        :param client_version: The client version.

        :type hapg_list: list
        :param hapg_list: A list of ARNs that identify the high-availability
            partition groups that are associated with the client.

        """
        params = {
            'ClientArn': client_arn,
            'ClientVersion': client_version,
            'HapgList': hapg_list,
        }
        return self.make_request(action='GetConfig',
                                 body=json.dumps(params))
Esempio n. 34
0
    def evaluate_expression(self, pipeline_id, expression, object_id):
        """
        Evaluates a string in the context of a specified object. A
        task runner can use this action to evaluate SQL queries stored
        in Amazon S3.

        :type pipeline_id: string
        :param pipeline_id: The identifier of the pipeline.

        :type object_id: string
        :param object_id: The identifier of the object.

        :type expression: string
        :param expression: The expression to evaluate.

        """
        params = {
            'pipelineId': pipeline_id,
            'objectId': object_id,
            'expression': expression,
        }
        return self.make_request(action='EvaluateExpression',
                                 body=json.dumps(params))
    def list_keys(self, limit=None, marker=None):
        """
        Lists the customer main keys.

        :type limit: integer
        :param limit: Specify this parameter only when paginating results to
            indicate the maximum number of keys you want listed in the
            response. If there are additional keys beyond the maximum you
            specify, the `Truncated` response element will be set to `true.`

        :type marker: string
        :param marker: Use this parameter only when paginating results, and
            only in a subsequent request after you've received a response where
            the results are truncated. Set it to the value of the `NextMarker`
            in the response you just received.

        """
        params = {}
        if limit is not None:
            params['Limit'] = limit
        if marker is not None:
            params['Marker'] = marker
        return self.make_request(action='ListKeys', body=json.dumps(params))
Esempio n. 36
0
    def delete_pipeline(self, pipeline_id):
        """
        Permanently deletes a pipeline, its pipeline definition and
        its run history. You cannot query or restore a deleted
        pipeline. AWS Data Pipeline will attempt to cancel instances
        associated with the pipeline that are currently being
        processed by task runners. Deleting a pipeline cannot be
        undone.

        To temporarily pause a pipeline instead of deleting it, call
        SetStatus with the status set to Pause on individual
        components. Components that are paused by SetStatus can be
        resumed.

        :type pipeline_id: string
        :param pipeline_id: The identifier of the pipeline to be deleted.

        """
        params = {
            'pipelineId': pipeline_id,
        }
        return self.make_request(action='DeletePipeline',
                                 body=json.dumps(params))
Esempio n. 37
0
    def describe_pipelines(self, pipeline_ids):
        """
        Retrieve metadata about one or more pipelines. The information
        retrieved includes the name of the pipeline, the pipeline
        identifier, its current state, and the user account that owns
        the pipeline. Using account credentials, you can retrieve
        metadata about pipelines that you or your IAM users have
        created. If you are using an IAM user account, you can retrieve
        metadata about only those pipelines you have read permission
        for.

        :type pipeline_ids: list
        :param pipeline_ids: Identifiers of the pipelines to describe. You can
            pass as many as 25 identifiers in a single call to
            DescribePipelines. You can obtain pipeline identifiers by calling
            ListPipelines.

        """
        params = {
            'pipelineIds': pipeline_ids,
        }
        return self.make_request(action='DescribePipelines',
                                 body=json.dumps(params))
Esempio n. 38
0
    def refresh_trusted_advisor_check(self, check_id):
        """
        Requests a refresh of the Trusted Advisor check that has the
        specified check ID. Check IDs can be obtained by calling
        DescribeTrustedAdvisorChecks.

        The response contains a RefreshTrustedAdvisorCheckResult
        object, which contains these fields:


        + **Status.** The refresh status of the check: "none",
          "enqueued", "processing", "success", or "abandoned".
        + **MillisUntilNextRefreshable.** The amount of time, in
          milliseconds, until the check is eligible for refresh.
        + **CheckId.** The unique identifier for the check.

        :type check_id: string
        :param check_id: The unique identifier for the Trusted Advisor check.

        """
        params = {'checkId': check_id, }
        return self.make_request(action='RefreshTrustedAdvisorCheck',
                                 body=json.dumps(params))
Esempio n. 39
0
    def describe_log_groups(self,
                            log_group_name_prefix=None,
                            next_token=None,
                            limit=None):
        """
        Returns all the log groups that are associated with the AWS
        account making the request. The list returned in the response
        is ASCII-sorted by log group name.

        By default, this operation returns up to 50 log groups. If
        there are more log groups to list, the response would contain
        a `nextToken` value in the response body. You can also limit
        the number of log groups returned in the response by
        specifying the `limit` parameter in the request.

        :type log_group_name_prefix: string
        :param log_group_name_prefix:

        :type next_token: string
        :param next_token: A string token used for pagination that points to
            the next page of #1lab_results. It must be a value obtained from the
            response of the previous `DescribeLogGroups` request.

        :type limit: integer
        :param limit: The maximum number of items returned in the response. If
            you don't specify a value, the request would return up to 50 items.

        """
        params = {}
        if log_group_name_prefix is not None:
            params['logGroupNamePrefix'] = log_group_name_prefix
        if next_token is not None:
            params['nextToken'] = next_token
        if limit is not None:
            params['limit'] = limit
        return self.make_request(action='DescribeLogGroups',
                                 body=json.dumps(params))
Esempio n. 40
0
    def create_job(self, pipeline_id, input_name, output):
        """
        To create a job, send a POST request to the `//jobs` resource.

        When you create a job, Elastic Transcoder returns JSON data
        that includes the values that you specified plus information
        about the job that is created.

        :type pipeline_id: string
        :param pipeline_id: The `Id` of the pipeline that you want Elastic
            Transcoder to use for transcoding. The pipeline
            determines several settings, including the
            Amazon S3 bucket from which Elastic Transcoder
            gets the files to transcode and the bucket into
            which Elastic Transcoder puts the transcoded
            files.

        :type input_name: dict
        :param input_name: A section of the request body that provides
            information about the file that is being
            transcoded.

        :type output: dict
        :param output: A section of the request body that provides information
            about the transcoded (target) file.

        """
        uri = '/2012-09-25/jobs'
        params = {
            'PipelineId': pipeline_id,
            'Input': input_name,
            'Output': output,
        }
        return self.make_request('POST',
                                 uri,
                                 expected_status=201,
                                 data=json.dumps(params))
Esempio n. 41
0
    def get_item(self, table_name, key, attributes_to_get=None,
                 consistent_read=False, object_hook=None):
        """
        Return a set of attributes for an item that matches
        the supplied key.

        :type table_name: str
        :param table_name: The name of the table containing the item.

        :type key: dict
        :param key: A Python version of the Key data structure
            defined by DynamoDB.

        :type attributes_to_get: list
        :param attributes_to_get: A list of attribute names.
            If supplied, only the specified attribute names will
            be returned.  Otherwise, all attributes will be returned.

        :type consistent_read: bool
        :param consistent_read: If True, a consistent read
            request is issued.  Otherwise, an eventually consistent
            request is issued.
        """
        data = {'TableName': table_name,
                'Key': key}
        if attributes_to_get:
            data['AttributesToGet'] = attributes_to_get
        if consistent_read:
            data['ConsistentRead'] = True
        json_input = json.dumps(data)
        response = self.make_request('GetItem', json_input,
                                     object_hook=object_hook)
        if 'Item' not in response:
            raise dynamodb_exceptions.DynamoDBKeyNotFoundError(
                "Key does not exist."
            )
        return response
Esempio n. 42
0
    def list_operations(self, marker=None, max_items=None):
        """
        This operation returns the operation IDs of operations that
        are not yet complete.

        :type marker: string
        :param marker: For an initial request for a list of operations, omit
            this element. If the number of operations that are not yet complete
            is greater than the value that you specified for `MaxItems`, you
            can use `Marker` to return additional operations. Get the value of
            `NextPageMarker` from the previous response, and submit another
            request that includes the value of `NextPageMarker` in the `Marker`
            element.
        Type: String

        Default: None

        Required: No

        :type max_items: integer
        :param max_items: Number of domains to be returned.
        Type: Integer

        Default: 20

        Constraints: A value between 1 and 100.

        Required: No

        """
        params = {}
        if marker is not None:
            params['Marker'] = marker
        if max_items is not None:
            params['MaxItems'] = max_items
        return self.make_request(action='ListOperations',
                                 body=json.dumps(params))
Esempio n. 43
0
    def list_streams(self, limit=None, exclusive_start_stream_name=None):
        """
        Lists your streams.

        The number of streams may be too large to return from a single
        call to `ListStreams`. You can limit the number of returned
        streams using the `Limit` parameter. If you do not specify a
        value for the `Limit` parameter, Amazon Kinesis uses the
        default limit, which is currently 10.

        You can detect if there are more streams available to list by
        using the `HasMoreStreams` flag from the returned output. If
        there are more streams available, you can request more streams
        by using the name of the last stream returned by the
        `ListStreams` request in the `ExclusiveStartStreamName`
        parameter in a subsequent request to `ListStreams`. The group
        of stream names returned by the subsequent request is then
        added to the list. You can continue this process until all the
        stream names have been collected in the list.

        `ListStreams` has a limit of 5 transactions per second per
        account.

        :type limit: integer
        :param limit: The maximum number of streams to list.

        :type exclusive_start_stream_name: string
        :param exclusive_start_stream_name: The name of the stream to start the
            list with.

        """
        params = {}
        if limit is not None:
            params['Limit'] = limit
        if exclusive_start_stream_name is not None:
            params['ExclusiveStartStreamName'] = exclusive_start_stream_name
        return self.make_request(action='ListStreams', body=json.dumps(params))
Esempio n. 44
0
    def put_pipeline_definition(self, pipeline_objects, pipeline_id):
        """
        Adds tasks, schedules, and preconditions that control the
        behavior of the pipeline. You can use PutPipelineDefinition to
        populate a new pipeline or to update an existing pipeline that
        has not yet been activated.

        PutPipelineDefinition also validates the configuration as it
        adds it to the pipeline. Changes to the pipeline are saved
        unless one of the following three validation errors exists in
        the pipeline.

        #. An object is missing a name or identifier field.
        #. A string or reference field is empty.
        #. The number of objects in the pipeline exceeds the maximum
           allowed objects.



        Pipeline object definitions are passed to the
        PutPipelineDefinition action and returned by the
        GetPipelineDefinition action.

        :type pipeline_id: string
        :param pipeline_id: The identifier of the pipeline to be configured.

        :type pipeline_objects: list
        :param pipeline_objects: The objects that define the pipeline. These
            will overwrite the existing pipeline definition.

        """
        params = {
            'pipelineId': pipeline_id,
            'pipelineObjects': pipeline_objects,
        }
        return self.make_request(action='PutPipelineDefinition',
                                 body=json.dumps(params))
Esempio n. 45
0
    def retrieve_domain_auth_code(self, domain_name):
        """
        This operation returns the AuthCode for the domain. To
        transfer a domain to another registrar, you provide this value
        to the new registrar.

        :type domain_name: string
        :param domain_name: The name of a domain.
        Type: String

        Default: None

        Constraints: The domain name can contain only the letters a through z,
            the numbers 0 through 9, and hyphen (-). Internationalized Domain
            Names are not supported.

        Required: Yes

        """
        params = {
            'DomainName': domain_name,
        }
        return self.make_request(action='RetrieveDomainAuthCode',
                                 body=json.dumps(params))
Esempio n. 46
0
    def get_domain_detail(self, domain_name):
        """
        This operation returns detailed information about the domain.
        The domain's contact information is also returned as part of
        the output.

        :type domain_name: string
        :param domain_name: The name of a domain.
        Type: String

        Default: None

        Constraints: The domain name can contain only the letters a through z,
            the numbers 0 through 9, and hyphen (-). Internationalized Domain
            Names are not supported.

        Required: Yes

        """
        params = {
            'DomainName': domain_name,
        }
        return self.make_request(action='GetDomainDetail',
                                 body=json.dumps(params))
Esempio n. 47
0
    def update_pipeline_notifications(self, id, notifications):
        """
        To update () notifications for a pipeline, send a POST request
        to the `//pipelines/ [pipelineId] /notifications` resource.

        When you update notifications for a pipeline, returns the
        values that you specified in the request.

        :type id: string
        :param id: The identifier of the pipeline for which you want to change
            notification settings.

        :type notifications: dict
        :param notifications: The () topic that you want to notify to report job
            status.
        To receive notifications, you must also subscribe
            to the new topic in the console.

        + **Progressing**: The () topic that you want to
              notify when has started to process the job.
        + **Completed**: The topic that you want to notify
              when has finished processing the job.
        + **Warning**: The topic that you want to notify
              when encounters a warning condition.
        + **Error**: The topic that you want to notify
              when encounters an error condition.

        """
        uri = '/2012-09-25/pipelines/{0}/notifications'.format(id)
        params = {
            'Notifications': notifications,
        }
        return self.make_request('POST',
                                 uri,
                                 expected_status=200,
                                 data=json.dumps(params))
Esempio n. 48
0
    def create_pipeline(self, name, unique_id, description=None):
        """
        Creates a new empty pipeline. When this action succeeds, you
        can then use the PutPipelineDefinition action to populate the
        pipeline.

        :type name: string
        :param name: The name of the new pipeline. You can use the same name
            for multiple pipelines associated with your AWS account, because
            AWS Data Pipeline assigns each new pipeline a unique pipeline
            identifier.

        :type unique_id: string
        :param unique_id: A unique identifier that you specify. This identifier
            is not the same as the pipeline identifier assigned by AWS Data
            Pipeline. You are responsible for defining the format and ensuring
            the uniqueness of this identifier. You use this parameter to ensure
            idempotency during repeated calls to CreatePipeline. For example,
            if the first call to CreatePipeline does not return a clear
            success, you can pass in the same unique identifier and pipeline
            name combination on a subsequent call to CreatePipeline.
            CreatePipeline ensures that if a pipeline already exists with the
            same name and unique identifier, a new pipeline will not be
            created. Instead, you'll receive the pipeline identifier from the
            previous attempt. The uniqueness of the name and unique identifier
            combination is scoped to the AWS account or IAM user credentials.

        :type description: string
        :param description: The description of the new pipeline.

        """
        params = {'name': name, 'uniqueId': unique_id, }
        if description is not None:
            params['description'] = description
        return self.make_request(action='CreatePipeline',
                                 body=json.dumps(params))
Esempio n. 49
0
    def get_open_id_token(self, identity_id, logins=None):
        """
        Gets an OpenID token, using a known Cognito ID. This known
        Cognito ID is returned by GetId. You can optionally add
        additional logins for the identity. Supplying multiple logins
        creates an implicit link.

        The OpenId token is valid for 15 minutes.

        :type identity_id: string
        :param identity_id: A unique identifier in the format REGION:GUID.

        :type logins: map
        :param logins: A set of optional name-value pairs that map provider
            names to provider tokens.

        """
        params = {
            'IdentityId': identity_id,
        }
        if logins is not None:
            params['Logins'] = logins
        return self.make_request(action='GetOpenIdToken',
                                 body=json.dumps(params))
Esempio n. 50
0
    def decrypt(self, ciphertext_blob, encryption_context=None,
                grant_tokens=None):
        """
        Decrypts ciphertext. Ciphertext is plaintext that has been
        previously encrypted by using the Encrypt function.

        :type ciphertext_blob: blob
        :param ciphertext_blob: Ciphertext including metadata.

        :type encryption_context: map
        :param encryption_context: The encryption context. If this was
            specified in the Encrypt function, it must be specified here or the
            decryption operation will fail. For more information, see
            `Encryption Context`_.

        :type grant_tokens: list
        :param grant_tokens: A list of grant tokens that represent grants which
            can be used to provide long term permissions to perform decryption.

        """
        if not isinstance(ciphertext_blob, six.binary_type):
            raise TypeError(
                "Value of argument ``ciphertext_blob`` "
                "must be of type %s." % six.binary_type)
        ciphertext_blob = base64.b64encode(ciphertext_blob)
        params = {'CiphertextBlob': ciphertext_blob, }
        if encryption_context is not None:
            params['EncryptionContext'] = encryption_context
        if grant_tokens is not None:
            params['GrantTokens'] = grant_tokens
        response = self.make_request(action='Decrypt',
                                     body=json.dumps(params))
        if response.get('Plaintext') is not None:
            response['Plaintext'] = base64.b64decode(
                response['Plaintext'].encode('utf-8'))
        return response
Esempio n. 51
0
    def list_identity_pool_usage(self, next_token=None, max_results=None):
        """
        Gets a list of identity pools registered with Cognito.

        :type next_token: string
        :param next_token: A pagination token for obtaining the next page of
            results.

        :type max_results: integer
        :param max_results: The maximum number of results to be returned.

        """

        uri = '/identitypools'
        params = {}
        headers = {}
        query_params = {}
        if next_token is not None:
            query_params['nextToken'] = next_token
        if max_results is not None:
            query_params['maxResults'] = max_results
        return self.make_request('GET', uri, expected_status=200,
                                 data=json.dumps(params), headers=headers,
                                 params=query_params)
Esempio n. 52
0
    def update_pipeline_status(self, id=None, status=None):
        """
        To pause or reactivate a pipeline, so the pipeline stops or
        restarts processing jobs, update the status for the pipeline.
        Send a POST request to the `/2012-09-25/pipelines/
        [pipelineId] /status` resource.

        Changing the pipeline status is useful if you want to cancel
        one or more jobs. You can't cancel jobs after Elastic
        Transcoder has started processing them; if you pause the
        pipeline to which you submitted the jobs, you have more time
        to get the job IDs for the jobs that you want to cancel, and
        to send a CancelJob request.

        :type id: string
        :param id: The identifier of the pipeline to update.

        :type status: string
        :param status:
        The desired status of the pipeline:


        + `Active`: The pipeline is processing jobs.
        + `Paused`: The pipeline is not currently processing jobs.

        """
        uri = '/2012-09-25/pipelines/{0}/status'.format(id)
        params = {}
        if id is not None:
            params['Id'] = id
        if status is not None:
            params['Status'] = status
        return self.make_request('POST',
                                 uri,
                                 expected_status=200,
                                 data=json.dumps(params))
Esempio n. 53
0
    def list_functions(self, marker=None, max_items=None):
        """
        Returns a list of your Lambda functions. For each function,
        the response includes the function configuration information.
        You must use GetFunction to retrieve the code for your
        function.

        This operation requires permission for the
        `lambda:ListFunctions` action.

        :type marker: string
        :param marker: Optional string. An opaque pagination token returned
            from a previous `ListFunctions` operation. If present, indicates
            where to continue the listing.

        :type max_items: integer
        :param max_items: Optional integer. Specifies the maximum number of AWS
            Lambda functions to return in response. This parameter value must
            be greater than 0.

        """

        uri = '/2014-11-13/functions/'
        params = {}
        headers = {}
        query_params = {}
        if marker is not None:
            query_params['Marker'] = marker
        if max_items is not None:
            query_params['MaxItems'] = max_items
        return self.make_request('GET',
                                 uri,
                                 expected_status=200,
                                 data=json.dumps(params),
                                 headers=headers,
                                 params=query_params)
    def create_deployment_config(self, deployment_config_name,
                                 minimum_healthy_hosts=None):
        """
        Creates a new deployment configuration.

        :type deployment_config_name: string
        :param deployment_config_name: The name of the deployment configuration
            to create.

        :type minimum_healthy_hosts: dict
        :param minimum_healthy_hosts: The minimum number of healthy instances
            that should be available at any time during the deployment. There
            are two parameters expected in the input: type and value.
        The type parameter takes either of the following values:


        + HOST_COUNT: The value parameter represents the minimum number of
              healthy instances, as an absolute value.
        + FLEET_PERCENT: The value parameter represents the minimum number of
              healthy instances, as a percentage of the total number of instances
              in the deployment. If you specify FLEET_PERCENT, then at the start
              of the deployment AWS CodeDeploy converts the percentage to the
              equivalent number of instances and rounds fractional instances up.


        The value parameter takes an integer.

        For example, to set a minimum of 95% healthy instances, specify a type
            of FLEET_PERCENT and a value of 95.

        """
        params = {'deploymentConfigName': deployment_config_name, }
        if minimum_healthy_hosts is not None:
            params['minimumHealthyHosts'] = minimum_healthy_hosts
        return self.make_request(action='CreateDeploymentConfig',
                                 body=json.dumps(params))
Esempio n. 55
0
    def deliver_config_snapshot(self, delivery_channel_name):
        """
        Schedules delivery of a configuration snapshot to the Amazon
        S3 bucket in the specified delivery channel. After the
        delivery has started, AWS Config sends following notifications
        using an Amazon SNS topic that you have specified.


        + Notification of starting the delivery.
        + Notification of delivery completed, if the delivery was
          successfully completed.
        + Notification of delivery failure, if the delivery failed to
          complete.

        :type delivery_channel_name: string
        :param delivery_channel_name: The name of the delivery channel through
            which the snapshot is delivered.

        """
        params = {
            'deliveryChannelName': delivery_channel_name,
        }
        return self.make_request(action='DeliverConfigSnapshot',
                                 body=json.dumps(params))
Esempio n. 56
0
    def get_pipeline_definition(self, pipeline_id, version=None):
        """
        Returns the definition of the specified pipeline. You can call
        GetPipelineDefinition to retrieve the pipeline definition you
        provided using PutPipelineDefinition.

        :type pipeline_id: string
        :param pipeline_id: The identifier of the pipeline.

        :type version: string
        :param version: The version of the pipeline definition to retrieve.
            This parameter accepts the values `latest` (default) and `active`.
            Where `latest` indicates the last definition saved to the pipeline
            and `active` indicates the last definition of the pipeline that was
            activated.

        """
        params = {
            'pipelineId': pipeline_id,
        }
        if version is not None:
            params['version'] = version
        return self.make_request(action='GetPipelineDefinition',
                                 body=json.dumps(params))
Esempio n. 57
0
    def query(self,
              table_name,
              hash_key_value,
              range_key_conditions=None,
              attributes_to_get=None,
              limit=None,
              consistent_read=False,
              scan_index_forward=True,
              exclusive_start_key=None,
              object_hook=None,
              count=False):
        """
        Perform a query of DynamoDB.  This version is currently punting
        and expecting you to provide a full and correct JSON body
        which is passed as is to DynamoDB.

        :type table_name: str
        :param table_name: The name of the table to query.

        :type hash_key_value: dict
        :param key: A DynamoDB-style HashKeyValue.

        :type range_key_conditions: dict
        :param range_key_conditions: A Python version of the
            RangeKeyConditions data structure.

        :type attributes_to_get: list
        :param attributes_to_get: A list of attribute names.
            If supplied, only the specified attribute names will
            be returned.  Otherwise, all attributes will be returned.

        :type limit: int
        :param limit: The maximum number of items to return.

        :type count: bool
        :param count: If True, Amazon DynamoDB returns a total
            number of items for the Query operation, even if the
            operation has no matching items for the assigned filter.

        :type consistent_read: bool
        :param consistent_read: If True, a consistent read
            request is issued.  Otherwise, an eventually consistent
            request is issued.

        :type scan_index_forward: bool
        :param scan_index_forward: Specified forward or backward
            traversal of the index.  Default is forward (True).

        :type exclusive_start_key: list or tuple
        :param exclusive_start_key: Primary key of the item from
            which to continue an earlier query.  This would be
            provided as the LastEvaluatedKey in that query.
        """
        data = {'TableName': table_name, 'HashKeyValue': hash_key_value}
        if range_key_conditions:
            data['RangeKeyCondition'] = range_key_conditions
        if attributes_to_get:
            data['AttributesToGet'] = attributes_to_get
        if limit:
            data['Limit'] = limit
        if count:
            data['Count'] = True
        if consistent_read:
            data['ConsistentRead'] = True
        if scan_index_forward:
            data['ScanIndexForward'] = True
        else:
            data['ScanIndexForward'] = False
        if exclusive_start_key:
            data['ExclusiveStartKey'] = exclusive_start_key
        json_input = json.dumps(data)
        return self.make_request('Query', json_input, object_hook=object_hook)
Esempio n. 58
0
    def create_case(self,
                    subject,
                    communication_body,
                    service_code=None,
                    severity_code=None,
                    category_code=None,
                    cc_email_addresses=None,
                    language=None,
                    issue_type=None,
                    attachment_set_id=None):
        """
        Creates a new case in the AWS Support Center. This operation
        is modeled on the behavior of the AWS Support Center `Open a
        new case`_ page. Its parameters require you to specify the
        following information:


        #. **IssueType.** The type of issue for the case. You can
           specify either "customer-service" or "technical." If you do
           not indicate a value, the default is "technical."
        #. **ServiceCode.** The code for an AWS service. You obtain
           the `ServiceCode` by calling DescribeServices.
        #. **CategoryCode.** The category for the service defined for
           the `ServiceCode` value. You also obtain the category code for
           a service by calling DescribeServices. Each AWS service
           defines its own set of category codes.
        #. **SeverityCode.** A value that indicates the urgency of the
           case, which in turn determines the response time according to
           your service level agreement with AWS Support. You obtain the
           SeverityCode by calling DescribeSeverityLevels.
        #. **Subject.** The **Subject** field on the AWS Support
           Center `Open a new case`_ page.
        #. **CommunicationBody.** The **Description** field on the AWS
           Support Center `Open a new case`_ page.
        #. **AttachmentSetId.** The ID of a set of attachments that
           has been created by using AddAttachmentsToSet.
        #. **Language.** The human language in which AWS Support
           handles the case. English and Japanese are currently
           supported.
        #. **CcEmailAddresses.** The AWS Support Center **CC** field
           on the `Open a new case`_ page. You can list email addresses
           to be copied on any correspondence about the case. The account
           that opens the case is already identified by passing the AWS
           Credentials in the HTTP POST method or in a method or function
           call from one of the programming languages supported by an
           `AWS SDK`_.


        A successful CreateCase request returns an AWS Support case
        number. Case numbers are used by the DescribeCases operation
        to retrieve existing AWS Support cases.

        :type subject: string
        :param subject: The title of the AWS Support case.

        :type service_code: string
        :param service_code: The code for the AWS service returned by the call
            to DescribeServices.

        :type severity_code: string
        :param severity_code: The code for the severity level returned by the
            call to DescribeSeverityLevels.

        :type category_code: string
        :param category_code: The category of problem for the AWS Support case.

        :type communication_body: string
        :param communication_body: The communication body text when you create
            an AWS Support case by calling CreateCase.

        :type cc_email_addresses: list
        :param cc_email_addresses: A list of email addresses that AWS Support
            copies on case correspondence.

        :type language: string
        :param language: The ISO 639-1 code for the language in which AWS
            provides support. AWS Support currently supports English ("en") and
            Japanese ("ja"). Language parameters must be passed explicitly for
            operations that take them.

        :type issue_type: string
        :param issue_type: The type of issue for the case. You can specify
            either "customer-service" or "technical." If you do not indicate a
            value, the default is "technical."

        :type attachment_set_id: string
        :param attachment_set_id: The ID of a set of one or more attachments
            for the case. Create the set by using AddAttachmentsToSet.

        """
        params = {
            'subject': subject,
            'communicationBody': communication_body,
        }
        if service_code is not None:
            params['serviceCode'] = service_code
        if severity_code is not None:
            params['severityCode'] = severity_code
        if category_code is not None:
            params['categoryCode'] = category_code
        if cc_email_addresses is not None:
            params['ccEmailAddresses'] = cc_email_addresses
        if language is not None:
            params['language'] = language
        if issue_type is not None:
            params['issueType'] = issue_type
        if attachment_set_id is not None:
            params['attachmentSetId'] = attachment_set_id
        return self.make_request(action='CreateCase', body=json.dumps(params))
Esempio n. 59
0
 def to_json(self):
     """
     Return the JSON representation of the options as a string.
     """
     return json.dumps(self)
Esempio n. 60
0
    def describe_cases(self,
                       case_id_list=None,
                       display_id=None,
                       after_time=None,
                       before_time=None,
                       include_resolved_cases=None,
                       next_token=None,
                       max_results=None,
                       language=None,
                       include_communications=None):
        """
        Returns a list of cases that you specify by passing one or
        more case IDs. In addition, you can filter the cases by date
        by setting values for the `AfterTime` and `BeforeTime` request
        parameters.

        Case data is available for 12 months after creation. If a case
        was created more than 12 months ago, a request for data might
        cause an error.

        The response returns the following in JSON format:


        #. One or more CaseDetails data types.
        #. One or more `NextToken` values, which specify where to
           paginate the returned records represented by the `CaseDetails`
           objects.

        :type case_id_list: list
        :param case_id_list: A list of ID numbers of the support cases you want
            returned. The maximum number of cases is 100.

        :type display_id: string
        :param display_id: The ID displayed for a case in the AWS Support
            Center user interface.

        :type after_time: string
        :param after_time: The start date for a filtered date search on support
            case communications. Case communications are available for 12
            months after creation.

        :type before_time: string
        :param before_time: The end date for a filtered date search on support
            case communications. Case communications are available for 12
            months after creation.

        :type include_resolved_cases: boolean
        :param include_resolved_cases: Specifies whether resolved support cases
            should be included in the DescribeCases #1lab_results. The default is
            false .

        :type next_token: string
        :param next_token: A resumption point for pagination.

        :type max_results: integer
        :param max_results: The maximum number of #1lab_results to return before
            paginating.

        :type language: string
        :param language: The ISO 639-1 code for the language in which AWS
            provides support. AWS Support currently supports English ("en") and
            Japanese ("ja"). Language parameters must be passed explicitly for
            operations that take them.

        :type include_communications: boolean
        :param include_communications: Specifies whether communications should
            be included in the DescribeCases #1lab_results. The default is true .

        """
        params = {}
        if case_id_list is not None:
            params['caseIdList'] = case_id_list
        if display_id is not None:
            params['displayId'] = display_id
        if after_time is not None:
            params['afterTime'] = after_time
        if before_time is not None:
            params['beforeTime'] = before_time
        if include_resolved_cases is not None:
            params['includeResolvedCases'] = include_resolved_cases
        if next_token is not None:
            params['nextToken'] = next_token
        if max_results is not None:
            params['maxResults'] = max_results
        if language is not None:
            params['language'] = language
        if include_communications is not None:
            params['includeCommunications'] = include_communications
        return self.make_request(action='DescribeCases',
                                 body=json.dumps(params))