def datalake_requests(self,
                       url: str,
                       method: str,
                       headers: dict,
                       post_body: dict = None):
     """
     Wrapper around the new datalake_requests to keep compatibility with old scrips
     """
     try:
         response = self.endpoint.datalake_requests(url, method, headers,
                                                    post_body)
     except ValueError:
         logger.error(
             'Request failed: Will return nothing for this request')
         return {}
     if 'Content-Type' in response.headers and 'text/csv' in response.headers[
             'Content-Type']:
         return response.text
     else:
         try:
             dict_response = self._load_response(response)
             return dict_response
         except JSONDecodeError:
             logger.error(
                 'Request unexpectedly returned non dict value. Retrying')
    def handle_bulk_task(self, task_uuid, retrieve_bulk_result_url, *, timeout, additional_checks: List[Check] = None) \
            -> Json:
        """
        Handle a generic bulk task, blocking until the task is done or the timeout is up

        :param task_uuid: uuid of the bulk task
        :param retrieve_bulk_result_url: endpoint to query, must contained a task_uuid field
        :param timeout: timeout after which a TimeoutError is raised
        :param additional_checks: functions to call on a potential json, if all checks return True, the Json is returned
        :return: a Json returned on HTTP 200 validating all additional_checks
        """
        retrieve_bulk_result_url = retrieve_bulk_result_url.format(
            task_uuid=task_uuid)

        spinner = None
        if logger.isEnabledFor(logging.INFO):
            spinner = Halo(text=f'Waiting for bulk task {task_uuid} response',
                           spinner='dots')
            spinner.start()

        start_time = time()
        back_off_time = 1

        json_response = None
        while not json_response:
            headers = {'Authorization': self.token_manager.access_token}
            response = requests.get(url=retrieve_bulk_result_url,
                                    headers=headers,
                                    verify=self.requests_ssl_verify)
            if response.status_code == 200:
                potential_json_response = response.json()
                if additional_checks and not all(
                        check(potential_json_response)
                        for check in additional_checks):
                    continue  # the json isn't valid
                if spinner:
                    spinner.succeed(f'bulk task {task_uuid} done')
                json_response = potential_json_response
            elif response.status_code == 401:
                logger.debug('Refreshing expired Token')
                self.token_manager.process_auth_error(
                    response.json().get('messages'))
            elif time() - start_time + back_off_time < timeout:
                sleep(back_off_time)
                back_off_time = min(back_off_time * 2,
                                    self.OCD_DTL_MAX_BACK_OFF_TIME)
            else:
                if spinner:
                    spinner.fail(f'bulk task {task_uuid} timeout')
                logger.error()
                raise TimeoutError(
                    f'No bulk result after waiting {timeout / 60:.0f} mins\n'
                    f'task_uuid: "{task_uuid}"')

        if spinner:
            spinner.stop()
        return json_response
    def get_threats(self, query_hash: str = None, query_body: BaseEngine.Json = None, query_fields: List[str] = None) \
            -> dict:
        body = {"query_fields": query_fields} if query_fields else {}
        if query_body:
            body['query_body'] = self.build_full_query_body(query_body)
        else:
            body['query_hash'] = query_hash

        response = self.datalake_requests(self.url,
                                          'post',
                                          post_body=body,
                                          headers=self._post_headers())
        if not response:
            logger.error(
                'No bulk search created, is the query_hash valid as well as the query_fields ?'
            )
            return {}
        return self._handle_bulk_search_task(task_uuid=response['task_uuid'])
    def fetch_new_token(self):
        logger.debug('Token will be refreshed')
        headers = {'Authorization': self.refresh_token}
        response = requests.post(url=self.url_refresh, headers=headers)

        json_response = response.json()
        if response.status_code == 401 and json_response.get(
                'messages') == 'Token has expired':
            logger.debug('Refreshing the refresh token')
            # Refresh token is also expired, we need to restart the authentication from scratch
            self.get_token()
        elif 'access_token' in json_response:
            self.access_token = f'Token {json_response["access_token"]}'
        else:  # an error occurred
            logger.error(
                f'An error occurred while refreshing the refresh token, for URL: {self.url_refresh}\n'
                f'response of the API: {response.text}')
            raise ValueError(f'Could not refresh the token: {response.text}')
 def load_config(self,
                 args,
                 username=None,
                 password=None) -> Tuple[dict, TokenManager]:
     """Load correct config and generate first tokens"""
     configure_logging(args.loglevel)
     endpoint_config = Config().load_config()
     token_manager = TokenManager(
         endpoint_config,
         username=username,
         password=password,
         environment=args.env,
     )
     try:
         token_manager.get_token()
     except ValueError:
         logger.error(
             "Couldn't generate Tokens, please check the login/password provided"
         )
         exit()
     return endpoint_config, token_manager
    def get_token(self):
        """
        Generate token from user input, with email and password
        """
        self.username = self.username or os.getenv(
            'OCD_DTL_USERNAME') or input('Email: ')
        self.password = self.password or os.getenv(
            'OCD_DTL_PASSWORD') or getpass()
        print()
        data = {'email': self.username, 'password': self.password}

        response = requests.post(url=self.url_token, json=data)
        json_response = json.loads(response.text)
        try:
            self.access_token = f'Token {json_response["access_token"]}'
            self.refresh_token = f'Token {json_response["refresh_token"]}'
        except KeyError:
            logger.error(
                f'An error occurred while retrieving an access token, for URL: {self.url_token}\n'
                f'response of the API: {response.text}')
            raise ValueError(f'Could not login: {response.text}')
Beispiel #7
0
    def datalake_requests(
        self,
        url: str,
        method: str,
        headers: dict,
        post_body: dict = None,
    ) -> Response:
        """
        Use it to request the API
        """
        tries_left = self.SET_MAX_RETRY

        while tries_left > 0:
            headers['Authorization'] = self.token_manager.access_token
            logger.debug(
                self._pretty_debug_request(url, method, post_body, headers))

            response = self._send_request(url, method, headers, post_body)

            logger.debug(f'API response:\n{str(response.text)}')
            if response.status_code == 401:
                logger.warning(
                    'Token expired or Missing authorization header. Updating token'
                )
                self.token_manager.process_auth_error(
                    response.json().get('messages'))
            elif response.status_code == 422:
                logger.warning('Bad authorization header. Updating token')
                logger.debug(f'422 HTTP code: {response.text}')
                self.token_manager.process_auth_error(
                    response.json().get('messages'))
            elif response.status_code < 200 or response.status_code > 299:
                logger.error(
                    f'API returned non 2xx response code : {response.status_code}\n{response.text}\n Retrying'
                )
            else:
                return response
            tries_left -= 1
        logger.error('Request failed')
        raise ValueError(f'{response.status_code}: {response.text.strip()}')
def main(override_args=None):
    logger.debug(f'START: get_query_hash.py')

    # Load initial args
    parser = BaseScripts.start(
        'Retrieve a query hash from a query body (a json used for the Advanced Search).'
    )
    required_named = parser.add_argument_group('required arguments')
    required_named.add_argument(
        'query_body_path',
        help='path to the json file containing the query body',
    )
    if override_args:
        args = parser.parse_args(override_args)
    else:
        args = parser.parse_args()

    # Load api_endpoints and tokens
    with open(args.query_body_path, 'r') as query_body_file:
        query_body = json.load(query_body_file)
    logger.debug(f'Retrieving query hash for query body: {query_body}')

    dtl = Datalake(env=args.env, log_level=args.loglevel)

    resp = dtl.AdvancedSearch.advanced_search_from_query_body(
        query_body, limit=0, offset=0, output=Output.JSON)
    if not resp or 'query_hash' not in resp:
        logger.error(
            "Couldn't retrieve a query hash, is the query body valid ?")
        exit(1)
    query_hash = resp['query_hash']
    if args.output:
        with open(args.output, 'w') as output:
            output.write(query_hash)
        logger.info(f'Query hash saved in {args.output}')
    else:
        logger.info(f'Query hash associated: {query_hash}')
Beispiel #9
0
def main(override_args=None):
    """Method to start the script"""
    # Load initial args
    parser = BaseScripts.start('Submit a new threat to Datalake from a file')
    required_named = parser.add_argument_group('required arguments')
    csv_controle = parser.add_argument_group('CSV control arguments')
    required_named.add_argument(
        '-i',
        '--input',
        help='read threats to add from FILE',
        required=True,
    )
    required_named.add_argument(
        '-a',
        '--atom_type',
        help='set it to define the atom type',
        required=True,
    )
    csv_controle.add_argument(
        '--is_csv',
        help='set if the file input is a CSV',
        action='store_true',
    )
    csv_controle.add_argument(
        '-d',
        '--delimiter',
        help='set the delimiter of the CSV file',
        default=',',
    )
    csv_controle.add_argument(
        '-c',
        '--column',
        help='select column of the CSV file, starting at 1',
        type=int,
        default=1,
    )
    parser.add_argument(
        '-p',
        '--public',
        help='set the visibility to public',
        action='store_true',
    )
    parser.add_argument(
        '-w',
        '--whitelist',
        help='set it to define the added threats as whitelist',
        action='store_true',
    )
    parser.add_argument(
        '-t',
        '--threat_types',
        nargs='+',
        help=
        'choose specific threat types and their score, like: ddos 50 scam 15',
        default=[],
        action='append',
    )
    parser.add_argument(
        '--tag',
        nargs='+',
        help='add a list of tags',
        default=[],
    )
    parser.add_argument(
        '--link',
        help='add link as external_analysis_link',
        nargs='+',
    )
    parser.add_argument(
        '--permanent',
        help=
        'sets override_type to permanent. Scores won\'t be updated by the algorithm. Default is temporary',
        action='store_true',
    )
    parser.add_argument(
        '--lock',
        help=
        'sets override_type to lock. Scores won\'t be updated by the algorithm for three months. Default is '
        'temporary',
        action='store_true',
    )
    parser.add_argument(
        '--no-bulk',
        help=
        'force an api call for each threats, useful to retrieve the details of threats created',
        action='store_true',
    )
    if override_args:
        args = parser.parse_args(override_args)
    else:
        args = parser.parse_args()
    logger.debug(f'START: add_new_threats.py')

    if not args.threat_types and not args.whitelist:
        parser.error(
            "threat types is required if the atom is not for whitelisting")

    if args.permanent and args.lock:
        parser.error("Only one override type is authorized")

    if args.permanent:
        override_type = OverrideType.PERMANENT
    elif args.lock:
        override_type = OverrideType.LOCK
    else:
        override_type = OverrideType.TEMPORARY

    if args.is_csv:
        try:
            list_new_threats = load_csv(args.input, args.delimiter,
                                        args.column - 1)
        except ValueError as ve:
            logger.error(ve)
            exit()
    else:
        list_new_threats = load_list(args.input)
        if not list_new_threats:
            raise parser.error('No atom found in the input file.')
    list_new_threats = defang_threats(list_new_threats, args.atom_type)
    list_new_threats = list(OrderedDict.fromkeys(
        list_new_threats))  # removing duplicates while preserving order
    args.threat_types = flatten_list(args.threat_types)
    threat_types = parse_threat_types(args.threat_types)
    atom_type = AtomType[args.atom_type.upper()]
    dtl = Datalake(env=args.env, log_level=args.loglevel)

    spinner = Halo(text=f'Creating threats', spinner='dots')
    spinner.start()

    threat_response = dtl.Threats.add_threats(list_new_threats, atom_type,
                                              threat_types, override_type,
                                              args.whitelist, args.public,
                                              args.tag, args.link,
                                              args.no_bulk)

    spinner.stop()
    terminal_size = Endpoint._get_terminal_size()
    if args.no_bulk:
        for threat in threat_response:
            logger.info(
                f'{threat["hashkey"].ljust(terminal_size - 6, " ")} \x1b[0;30;42m  OK  \x1b[0m'
            )
    else:
        failed = []
        failed_counter = 0
        created_counter = 0
        for batch_res in threat_response:
            failed.extend(batch_res['failed'])
            for success in batch_res['success']:
                for val_created in success['created_atom_values']:
                    created_counter += 1
                    logger.info(
                        f'{val_created.ljust(terminal_size - 6, " ")} \x1b[0;30;42m  OK  \x1b[0m'
                    )
        for failed_obj in failed:
            for failed_atom_val in failed_obj['failed_atom_values']:
                failed_counter += 1
                logger.info(
                    f'Creation failed for value {failed_atom_val.ljust(terminal_size - 6, " ")} \x1b[0;30;4\
                1m  KO  \x1b[0m')
        logger.info(
            f'Number of batches: {len(threat_response)}\nCreated threats: {created_counter}\nFailed threat creation: {failed_counter}'
        )

    if args.output:
        save_output(args.output, threat_response)
        logger.debug(f'Results saved in {args.output}\n')
    logger.debug(f'END: add_new_threats.py')
def main(override_args=None):
    # Load initial args
    parser = BaseScripts.start('Submit a new threat to Datalake from a file')
    required_named = parser.add_argument_group('required arguments')
    csv_control = parser.add_argument_group('CSV control arguments')

    parser.add_argument(
        'threats',
        help='threats to lookup',
        nargs='*',
    )
    parser.add_argument(
        '-i',
        '--input',
        help='read threats to add from FILE',
    )
    parser.add_argument(
        '-td',
        '--threat_details',
        action='store_true',
        help='set if you also want to have access to the threat details ',
    )
    parser.add_argument(
        '-ot',
        '--output_type',
        default='json',
        help=
        'set to the output type desired {json,csv}. Default is json if not specified',
    )
    required_named.add_argument(
        '-a',
        '--atom_type',
        help='set it to define the atom type',
        required=True,
    )
    csv_control.add_argument(
        '--is_csv',
        help='set if the file input is a CSV',
        action='store_true',
    )
    csv_control.add_argument(
        '-d',
        '--delimiter',
        help='set the delimiter of the CSV file',
        default=',',
    )
    csv_control.add_argument(
        '-c',
        '--column',
        help='select column of the CSV file, starting at 1',
        type=int,
        default=1,
    )
    if override_args:
        args = parser.parse_args(override_args)
    else:
        args = parser.parse_args()
    logger.debug(f'START: lookup_threats.py')

    if not args.threats and not args.input:
        parser.error("either a threat or an input_file is required")

    if args.output_type:
        try:
            args.output_type = BaseEngine.output_type2header(args.output_type)
        except ParserError as e:
            logger.exception(
                f'Exception raised while getting output type from headers # {str(e)}',
                exc_info=False)
            exit(1)

    hashkey_only = not args.threat_details
    dtl = Datalake(env=args.env, log_level=args.loglevel)
    list_threats = list(args.threats) if args.threats else []
    if args.input:
        if args.is_csv:
            try:
                list_threats = list_threats + load_csv(
                    args.input, args.delimiter, args.column - 1)
            except ValueError as ve:
                logger.error(ve)
                exit()
        else:
            list_threats = list_threats + load_list(args.input)

    full_response = {}
    atom_type = parse_atom_type_or_exit(args.atom_type)
    list_threats = list(OrderedDict.fromkeys(
        list_threats))  # removing duplicates while preserving order
    for threat in list_threats:
        response = dtl.Threats.lookup(threat,
                                      atom_type=atom_type,
                                      hashkey_only=hashkey_only)
        found = response.get('threat_found', True)
        text, color = boolean_to_text_and_color[found]
        logger.info('{}{} hashkey:{} {}\x1b[0m'.format(color, threat,
                                                       response['hashkey'],
                                                       text))
        full_response[threat] = response

    if args.output:
        if args.output_type == 'text/csv':
            full_response = CsvBuilder.create_look_up_csv(
                full_response,
                args.atom_type,
                has_details=args.threat_details,
            )
        save_output(args.output, full_response)
        logger.debug(f'Results saved in {args.output}\n')
    logger.debug(f'END: lookup_threats.py')
def main(override_args=None):
    """Method to start the script"""
    logger.debug(f'START: get_threats_from_query_hash.py')

    # Load initial args
    parser = BaseScripts.start(
        'Retrieve a list of response from a given query hash.')
    parser.add_argument(
        '--query_fields',
        help=
        'fields to be retrieved from the threat (default: only the hashkey)\n'
        'If an atom detail isn\'t present in a particular atom, empty string is returned.',
        nargs='+',
        default=['threat_hashkey'],
    )
    parser.add_argument(
        '--list',
        help=
        'Turn the output in a list (require query_fields to be a single element)',
        action='store_true',
    )
    required_named = parser.add_argument_group('required arguments')
    required_named.add_argument(
        'query_hash',
        help=
        'the query hash from which to retrieve the response hashkeys or a path to the query body json file',
    )
    if override_args:
        args = parser.parse_args(override_args)
    else:
        args = parser.parse_args()
    configure_logging(args.loglevel)

    if len(args.query_fields) > 1 and args.list:
        parser.error(
            "List output format is only available if a single element is queried (via query_fields)"
        )

    query_body = {}
    query_hash = args.query_hash
    if len(query_hash) != 32 or os.path.exists(query_hash):
        try:
            with open(query_hash, 'r') as query_body_file:
                query_body = json.load(query_body_file)
        except FileNotFoundError:
            logger.error(
                f"Couldn't understand the given value as a query hash or path to query body: {query_hash}"
            )
            exit(1)

    # Load api_endpoints and tokens
    dtl = Datalake(env=args.env, log_level=args.loglevel)
    logger.debug(
        f'Start to search for threat from the query hash:{query_hash}')
    spinner = None
    if logger.isEnabledFor(logging.INFO):
        spinner = Halo(text=f'Creating bulk task', spinner='dots')
        spinner.start()

    task = dtl.BulkSearch.create_task(query_body=query_body,
                                      query_hash=query_hash,
                                      query_fields=args.query_fields)
    if spinner:
        spinner.text = f'Waiting for bulk task {task.uuid} response'
    response = task.download_sync()
    original_count = response.get('count', 0)
    if spinner:
        spinner.succeed()
        spinner.info(
            f'Number of threat that have been retrieved: {original_count}')

    formatted_output = format_output(response, args.list)
    if args.output:
        with open(args.output, 'w') as output:
            output.write(formatted_output)
    else:
        logger.info(formatted_output)

    if args.output:
        logger.info(f'Threats saved in {args.output}')
    else:
        logger.info('Done')
def main(override_args=None):
    parser = BaseScripts.start(
        'Gets threats from given query body or query hash.',
        output_file_required=True)
    parser.add_argument('-i',
                        '--input',
                        help='read query body from a json file')
    parser.add_argument('--query-hash',
                        help='sets the query hash for the advanced search')
    parser.add_argument(
        '-l',
        '--limit',
        help=
        'defines how many items will be returned in one page slice. Accepted values: 0 to 5000, default is 20',
        type=int,
        default=20)
    parser.add_argument(
        '--offset',
        help=
        'defines an index of the first requested item. Accepted values: 0 and bigger, default is 0.',
        type=int,
        default=0)
    parser.add_argument(
        '-ot',
        '--output-type',
        help=
        'sets the output type desired {json, csv, stix, misp}. Default is json',
        default='json')
    parser.add_argument(
        '--ordering',
        help=
        'threat field to filter on. To sort the results by relevance (if any "search" is applied), just skip '
        'this field. To use the reversed order, use minus, i.e. --ordering="-last_updated" in your command line.'
    )

    if override_args:
        args = parser.parse_args(override_args)
    else:
        args = parser.parse_args()
    logger.debug(f'START: advanced_search.py')
    if bool(args.input) == bool(args.query_hash):
        raise ValueError(
            'Either an input file with a query body or a query hash needs to be provided.'
        )
    try:
        output_type = Output[args.output_type.upper()]
    except KeyError:
        logger.error(
            'Not supported output, please use json, stix, misp or csv')
        exit(1)

    dtl = Datalake(env=args.env, log_level=args.loglevel)
    if args.input:
        query_body = load_json(args.input)
        resp = dtl.AdvancedSearch.advanced_search_from_query_body(
            query_body,
            limit=args.limit,
            offset=args.offset,
            output=output_type,
            ordering=args.ordering)
    else:
        resp = dtl.AdvancedSearch.advanced_search_from_query_hash(
            args.query_hash,
            limit=args.limit,
            offset=args.offset,
            output=output_type,
            ordering=args.ordering)
    save_output(args.output, resp)
    logger.info(
        f'\x1b[0;30;42m OK: MATCHING THREATS SAVED IN {args.output} \x1b[0m')
    logger.debug(f'END: advanced_search.py')