Esempio n. 1
0
    def __setitem__(self, key, value):
        # Do some safety checking so we don't save a malformed state
        if key == self._STATE_KEY and not getattr(self.States,
                                                  str(value).upper(), None):
            LOGGER.error('Current state cannot be saved with value \'%s\'',
                         value)
            return

        dict.__setitem__(self, key, value)

        # If this is a key related to the state config, save the state in parameter store
        if key in self._state_keys():
            self._save_state()
Esempio n. 2
0
    def _check_http_response(self, response):
        """Method for checking for a valid HTTP response code

        Returns:
            bool: Indicator of whether or not this request was successful
        """
        success = response is not None and (200 <= response.status_code <= 299)

        if response is not None and not success:
            LOGGER.error('HTTP request failed for service \'%s\': [%d] %s',
                         self.type(), response.status_code,
                         response.json()['message'])

        return success
Esempio n. 3
0
    def _request_token(self):
        """Request OAuth token from salesforce

        Meanwhile, it will also get instance url which will be used in future
        requests. The instance url identifies the Salesforce instance to which
        API calls should be sent.

        Returns:
            bool: Returns True if update auth headers and instance url successfully.
        """
        headers = {'Content-Type': 'application/x-www-form-urlencoded'}
        # required credentials when request for a token.
        data = {
            'grant_type':
            'password',
            'client_id':
            self._config.auth['client_id'],
            'client_secret':
            self._config.auth['client_secret'],
            'username':
            self._config.auth['username'],
            'password':
            '******'.format(self._config.auth['password'],
                          self._config.auth['security_token']),
            'response_type':
            'code',
            'redirect_uri':
            self._SALESFORCE_TOKEN_URL
        }
        success, response = self._make_post_request(self._SALESFORCE_TOKEN_URL,
                                                    headers, data, False)

        if not (success and response):
            return False

        if not (response.get('access_token') and response.get('instance_url')):
            LOGGER.error(
                'Response invalid generating headers for service \'%s\'',
                self._type())
            return False

        bearer = 'Bearer {}'.format(response.get('access_token'))
        self._auth_headers = {
            'Content-Type': 'application/json',
            'Authorization': bearer
        }
        self._instance_url = response.get('instance_url')
        LOGGER.debug('Successfully obtain OAuth token and instance URL')
        return True
Esempio n. 4
0
    def _finalize(self):
        """Method for performing any final steps, like saving applicable state"""
        self._config.mark_success()

        if not self._last_timestamp:
            LOGGER.error('Ending last timestamp is 0. This should not happen and is likely '
                         'due to the subclass not setting this value.')

        if self._last_timestamp == self._config.start_last_timestamp:
            LOGGER.error('Ending last timestamp is the same as the beginning last timestamp')

        LOGGER.info('App complete for service \'%s\'. Gathered %d logs in %d polls.',
                    self.type(), self._gathered_log_count, self._poll_count)

        self._config.last_timestamp = self._last_timestamp
Esempio n. 5
0
    def _send_logs_to_stream_alert(self, source_function, logs):
        """Protected method for sending logs to the rule processor lambda
        function for processing. This performs some size checks before sending.

        Args:
            source_function (str): The app function name from which the logs came
            logs (list): List of the logs that have been gathered
        """
        # Create a payload to be sent to the rule processor that contains the
        # service these logs were collected from and the list of logs
        payload = {
            'Records': [{
                'stream_alert_app': source_function,
                'logs': logs
            }]
        }
        payload_json = json.dumps(payload, separators=(',', ':'))
        if len(payload_json) > MAX_LAMBDA_PAYLOAD_SIZE:
            LOGGER.debug(
                'Log payload size for %d logs exceeds limit and will be '
                'segmented (%d > %d max).', len(logs), len(payload_json),
                MAX_LAMBDA_PAYLOAD_SIZE)
            return False

        LOGGER.debug('Sending %d logs to rule processor with payload size %d',
                     len(logs), len(payload_json))

        try:
            response = Batcher.LAMBDA_CLIENT.invoke(
                FunctionName=self.rp_function,
                InvocationType='Event',
                Payload=payload_json,
                Qualifier='production')

        except ClientError as err:
            LOGGER.error(
                'An error occurred while sending logs to '
                '\'%s:production\'. Error is: %s', self.rp_function,
                err.response)
            raise

        LOGGER.info('Sent %d logs to \'%s\' with Lambda request ID \'%s\'',
                    len(logs), self.rp_function,
                    response['ResponseMetadata']['RequestId'])

        return True
Esempio n. 6
0
    def _initialize(self):
        """Method for performing any startup steps, like setting state to running"""
        # Perform another safety check to make sure this is not being invoked already
        if self._config.is_running:
            LOGGER.error('App already running for service \'%s\'.', self.type())
            return False

        LOGGER.info('App starting for service \'%s\'.', self.type())

        # Validate the auth in the config. This raises an exception upon failure
        self._validate_auth()

        self._last_timestamp = self._config.last_timestamp

        # Mark this app as running, which updates the parameter store
        self._config.mark_running()

        return True
Esempio n. 7
0
    def _gather_logs(self):
        """Gather the Box Admin Events

        The ideal way to do this would be to use the boxsdk.events.Events class and the
        `get_events` method to retrieve these events. However, this method does allow you
        to pass keyword arguments (such as params) which are needed for specifying the
        'created_after' parameter.

        Returns:
            bool or list: If the execution fails for some reason, return False.
                Otherwise, return a list of box admin event entries.
        """
        if not self._create_client():
            LOGGER.error('Could not create box client for %s', self.type())
            return False

        result, response = self._make_request()

        # If the result is False, errors would be previously logged up
        # the stack before this, so just return False
        if not result:
            return False

        if not response:
            LOGGER.error('No results received from the Box API request for %s',
                         self.type())
            return False

        self._more_to_poll = int(
            response['chunk_size']) >= self._MAX_CHUNK_SIZE

        events = response.get('entries', [])
        if not events:
            LOGGER.info(
                'No events in response from the Box API request for %s',
                self.type())
            return False

        self._next_stream_position = response['next_stream_position']

        self._last_timestamp = events[-1]['created_at']

        return events
Esempio n. 8
0
    def _get_latest_api_version(self):
        """GET request to fetch supported API versions and find the latest API version

        The example of response json body:
        [
            {
                "version": "20.0",
                "label": "Winter '11",
                "url": "/services/data/v20.0"
            },
            {
                "version": "21.0",
                "label": "Spring '11",
                "url": "/services/data/v21.0"
            },
            {
                "version": "26.0",
                "label": "Winter '13",
                "url": "/services/data/v26.0"
            }
        ]

        Returns:
            bool: Return True if get latest api version successfully.
        """
        url = '{}/services/data/'.format(self._instance_url)
        success, response = self._make_get_request(url, self._auth_headers)

        if not (success and response):
            LOGGER.error('Failed to fetch lastest api version')
            return False

        versions = [float(version.get('version', 0)) for version in response]
        if versions:
            self._latest_api_version = str(sorted(versions)[-1])
            if self._latest_api_version == '0.0':
                LOGGER.error('Failed to obtain latest API version')
                return False
            LOGGER.debug('Successfully obtain latest API version %s',
                         self._latest_api_version)
            return True
Esempio n. 9
0
    def __setitem__(self, key, value):
        # Do some safety checking so we don't save a malformed state
        if key == self._STATE_KEY and not getattr(self.States, str(value).upper(), None):
            LOGGER.error('Current state cannot be saved with value \'%s\'', value)
            return

        # Cache the old value to see if the new value differs
        current_value = self.get(key)

        dict.__setitem__(self, key, value)

        # If this is a key related to the state config, save the state in parameter store
        if key in self._state_keys() and current_value != value:
            state_name = '_'.join([self['function_name'], self.STATE_CONFIG_SUFFIX])
            param_value = json.dumps({key: self[key] for key in self._state_keys()})
            try:
                self._save_state(state_name, param_value)
            except ClientError as err:
                raise AppIntegrationStateError('Could not save current state to parameter '
                                               'store with name \'{}\'. Response: '
                                               '{}'.format(state_name, err.response))
Esempio n. 10
0
    def _gather_logs(self):
        """Gather the G Suite Admin Report logs for this application type

        Returns:
            bool or list: If the execution fails for some reason, return False.
                Otherwise, return a list of activies for this application type.
        """
        if not self._create_service():
            return False

        activities_list = self._activities_service.list(
            userKey='all',
            applicationName=self._type(),
            startTime=self._last_timestamp,
            pageToken=self._next_page_token if self._next_page_token else None)

        try:
            results = activities_list.execute()
        except errors.HttpError:
            LOGGER.exception('Failed to execute activities listing')
            return False

        if not results:
            LOGGER.error(
                'No results received from the G Suite API request for %s',
                self.type())
            return False

        self._next_page_token = results.get('nextPageToken')
        self._more_to_poll = bool(self._next_page_token)

        activities = results.get('items', [])
        if not activities:
            LOGGER.info('No logs in response from G Suite API request for %s',
                        self.type())
            return False

        self._last_timestamp = activities[-1]['id']['time']

        return activities
Esempio n. 11
0
    def _fetch_event_logs(self, log_file_path):
        """Get event logs by sending GET request to each log file location.

        Args:
            log_file_path (str): log file location.

        Returns:
            list: a list of event logs or None.
        """
        url = '{}/{}'.format(self._instance_url, log_file_path)
        try:
            success, resp = self._make_get_request(url, self._auth_headers)
        except SalesforceAppError:
            LOGGER.exception('Failed to get event logs')
            return

        if not (success and resp):
            LOGGER.error('Failed to get event logs')
            return

        # skip header line before passing to rule processor
        return resp.splitlines()[1:]
Esempio n. 12
0
    def _generate_headers(self):
        """Each request will request a new token to call the resources APIs.

        More details to be found here:
            https://developers.onelogin.com/api-docs/1/oauth20-tokens/generate-tokens-2

        Returns:
            str: Bearer token to be used to call the OneLogin resource APIs
        """
        if self._auth_headers:
            return True

        authorization = 'client_id: {}, client_secret: {}'.format(
            self._config.auth['client_id'], self._config.auth['client_secret'])

        headers_token = {
            'Authorization': authorization,
            'Content-Type': 'application/json'
        }

        result, response = self._make_post_request(
            self._token_endpoint(), headers_token,
            {'grant_type': 'client_credentials'})

        if not result:
            return False

        if not response:
            LOGGER.error(
                'Response invalid generating headers for service \'%s\'',
                self.type())
            return False

        bearer = 'bearer:{}'.format(response.get('access_token'))
        self._auth_headers = {'Authorization': bearer}

        return True
Esempio n. 13
0
        def do_gather():
            """Perform the gather using this scoped method so we can time it"""
            logs = self._gather_logs()

            # Make sure there are logs, this can be False if there was an issue polling
            # of if there are no new logs to be polled
            if not logs:
                LOGGER.error('Gather process for service \'%s\' was not able to poll any logs',
                             self.type())
                return

            # Increment the count of logs gathered
            self._gathered_log_count += len(logs)

            # Utilize the batcher to send logs to the rule processor
            self._batcher.send_logs(self._config['function_name'], logs)

            LOGGER.debug('Updating config last timestamp from %d to %d',
                         self._config.last_timestamp, self._last_timestamp)

            # Save the config's last timestamp after each function run
            self._config.last_timestamp = self._last_timestamp

            self._poll_count += 1
Esempio n. 14
0
    def _validate_status_code(self, resp):
        """Validate status code from get response

        Args:
            resp (Response): The response object from get request

        Returns:
            bool: Returns True if status_code is 200. Otherwise returns False,
                except
        """
        if resp.status_code == 200:
            return True
        if resp.status_code == 401:
            # The OAuth token used has expired or is invalid.
            # Retry to renew OAuth token
            LOGGER.error(
                'The OAuth token used has expired or is invalid. '
                'Error code %s, error message %s',
                resp.json().get('errorCode', None),
                resp.json().get('message', None))
            self._request_token()

            # Get request will retry when SalesforceAppError exception raised
            raise SalesforceAppError
        elif resp.status_code == 403 and resp.json().get(
                'errorCode') == 'REQUEST_LIMIT_EXCEEDED':
            # Exceeded API request limits in your org. Log this information for
            # future reference.
            LOGGER.error('Exceeded API request limits')
            return False
        elif resp.status_code == 500:
            # Server internal error. Get request will retry.
            raise SalesforceAppError
        elif resp.status_code > 200:
            LOGGER.error(
                'Unexpected status code %d detected, error message %s',
                resp.status_code, resp.json())
            return False
Esempio n. 15
0
    def _get_onelogin_events(self):
        """Get all events from the endpoint for this timeframe

        Returns:
            [
                {
                    'id': <int:id>,
                    'created_at': <str:created_at>,
                    'account_id': <int:account_id>,
                    'user_id': <int:user_id>,
                    'event_type_id': <int:event_type_id>,
                    'notes': <str:notes>,
                    'ipaddr': <str:ipaddr>,
                    'actor_user_id': <int:actor_user_id>,
                    'assuming_acting_user_id': null,
                    'role_id': <int:role_id>,
                    'app_id': <int:app_id>,
                    'group_id': <int:group_id>,
                    'otp_device_id': <int:otp_device_id>,
                    'policy_id': <int:policy_id>,
                    'actor_system': <str:actor_system>,
                    'custom_message': <str:custom_message>,
                    'role_name': <str:role_name>,
                    'app_name': <str:app_name>,
                    'group_name': <str:group_name>,
                    'actor_user_name': <str:actor_user_name>,
                    'user_name': <str:user_name>,
                    'policy_name': <str:policy_name>,
                    'otp_device_name': <str:otp_device_name>,
                    'operation_name': <str:operation_name>,
                    'directory_sync_run_id': <int:directory_sync_run_id>,
                    'directory_id': <int:directory_id>,
                    'resolution': <str:resolution>,
                    'client_id': <int:client_id>,
                    'resource_type_id': <int:resource_type_id>,
                    'error_description': <str:error_description>
                }
            ]
        """
        # Make sure we have authentication headers
        if not self._auth_headers:
            LOGGER.error('No authentication headers for service \'%s\'',
                         self.type())
            return False

        # Are we just getting events or getting paginated events?
        if self._next_page_url:
            params = None
            request_url = self._next_page_url
        else:
            params = {'since': self._last_timestamp}
            request_url = self._events_endpoint()

        result, response = self._make_get_request(request_url,
                                                  self._auth_headers, params)

        if not result:
            # If we hit the rate limit, update the sleep time
            if response.get('status'):
                r_status = response.get('status')
                if r_status['code'] == 400 and r_status[
                        'message'] == 'rate_limit_exceeded':
                    self._set_rate_limit_sleep()

            return False

        # Fail if response is invalid
        if not response:
            LOGGER.error('Response is invalid for service \'%s\'', self.type())
            return False

        # Set pagination link, if there is any
        self._next_page_url = response['pagination']['next_link']
        self._more_to_poll = bool(self._next_page_url)

        # Adjust the last seen event, if the events list is not empty
        if not response['data']:
            LOGGER.error('Empty list of events for service \'%s\'',
                         self.type())
            return False

        self._last_timestamp = response['data'][-1]['created_at']

        # Return the list of events to the caller so they can be send to the batcher
        return response['data']
Esempio n. 16
0
    def _list_log_files(self):
        """Fetch a list of available log files by event types.

        An event generates log data in real time. However, log files are generated
        the day after an event takes place, during nonpeak hours. Therefore, log
        file data is unavailable for at least one day after an event.

        Returns:
            list: a list of log file location or empty list.

        An example of log files response json body:
        {
            "totalSize": 2,
            "done": True,
            "records": [
                {
                    "attributes": {
                        "type": "EventLogFile",
                        "url": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001bROAQ"
                    },
                    "Id": "0ATD000000001bROAQ",
                    "EventType": "Console",
                    "LogFile": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001bROAQ/LogFile",
                    "LogDate": "2014-03-14T00:00:00.000+0000",
                    "LogFileLength": 2692.0
                },
                {
                    "attributes": {
                        "type": "EventLogFile",
                        "url": "/services/data/v32.0/sobjects/EventLogFile/0ATD000000001SdOAI"
                    },
                    "Id": "0ATD000000001SdOAI",
                    "EventType": "Console",
                    "LogFile": "/services/data/v32.0/sobjects/EventLogFile/0ATD00001SdOAI/LogFile",
                    "LogDate": "2014-03-13T00:00:00.000+0000",
                    "LogFileLength": 1345.0
                }
            ]
        }
        """
        url = self._SALESFORCE_QUERY_URL.format(
            instance_url=self._instance_url,
            api_version=self._latest_api_version,
            query=self._SALESFORCE_QUERY_FILTERS,
            start_time=self._SALESFORCE_CREATE_AFTER.format(
                self._last_timestamp),
            event_type='AND+EventType+=+\'{}\''.format(self._type()))
        success, response = self._make_get_request(url, self._auth_headers)
        if not success:
            LOGGER.error('Failed to get a list of log files.')
            return

        log_files = []
        if response.get('records'):
            log_files.extend([
                record['LogFile'] for record in response['records']
                if record.get('LogFile')
            ])

        LOGGER.debug('Retrived %d log files', len(log_files))
        return log_files