Пример #1
0
    def __init__(self, config_file_path, debug_mode=False, import_dir='', export_dir=''):

        #
        # parse config file and save off the information we need
        #
        config_dict = parse_config(config_file_path)

        self.server_url = config_dict.get('server_url', 'https://127.0.0.1')
        self.api_token = config_dict.get('api_token', '')
        self.sites = config_dict.get('sites', [])
        self.debug = config_dict.get('debug', False)
        self.export_dir = export_dir
        self.import_dir = import_dir
        self.integration_name = 'Cb Taxii Connector 1.6.5'

        self.http_proxy_url = config_dict.get('http_proxy_url', None)
        self.https_proxy_url = config_dict.get('https_proxy_url', None)

        if self.export_dir and not os.path.exists(self.export_dir):
            os.mkdir(self.export_dir)

        #
        # Test Cb Response connectivity
        #
        try:
            self.cb = CbResponseAPI(url=self.server_url,
                                    token=self.api_token,
                                    ssl_verify=False,
                                    integration_name=self.integration_name)
            self.cb.info()
        except:
            logger.error(traceback.format_exc())
            sys.exit(-1)
Пример #2
0
    def start(self):
        """Starts the internal thread (see base class documentation) and
        start the connection to the CB server.
        """
        super().start()
        self._cb = CbResponseAPI(profile=self._profile_name)

        return self
Пример #3
0
    def _dispatch(self, alert, descriptor):
        """Send ban hash command to CarbonBlack

        Publishing:
            There is currently no method to control carbonblack's behavior with publishers.

        Args:
            alert (Alert): Alert instance which triggered a rule
            descriptor (str): Output descriptor

        Returns:
            bool: True if alert was sent successfully, False otherwise
        """
        if not alert.context:
            LOGGER.error('[%s] Alert must contain context to run actions',
                         self.__service__)
            return False

        creds = self._load_creds(descriptor)
        if not creds:
            return False

        client = CbResponseAPI(**creds)
        carbonblack_context = alert.context.get('carbonblack', {})

        # Get md5 hash 'value' passed from the rules engine function
        action = carbonblack_context.get('action')
        if action == 'ban':
            binary_hash = carbonblack_context.get('value')
            # The binary should already exist in CarbonBlack
            binary = client.select(Binary, binary_hash)
            # Determine if the binary is currently listed as banned
            if binary.banned:
                # Determine if the banned action is enabled, if true exit
                if binary.banned.enabled:
                    return True
                # If the binary is banned and disabled, begin the banning hash operation
                banned_hash = client.select(BannedHash, binary_hash)
                banned_hash.enabled = True
                banned_hash.save()
            else:
                # Create a new BannedHash object to be saved
                banned_hash = client.create(BannedHash)
                # Begin the banning hash operation
                banned_hash.md5hash = binary.md5
                banned_hash.text = "Banned from StreamAlert"
                banned_hash.enabled = True
                banned_hash.save()

            return banned_hash.enabled is True
        else:
            LOGGER.error('[%s] Action not supported: %s', self.__service__,
                         action)
            return False
Пример #4
0
def query_watchlists(cb: CbResponseAPI, query: str) -> SimpleQuery:
    """perform watchlist query"""
    try:
        return cb.select(Watchlist).where(query)
    except Exception as e:
        LOGGER.error(f"problem querying watchlists: {e}")
        return []
Пример #5
0
def get_command_result(cb: CbResponseAPI, session_id: str, command_id: str):
    """Get results of a LR session command."""
    try:
        return cb.get_object(f"{CBLR_BASE}/session/{session_id}/command/{command_id}")
    except ObjectNotFoundError:
        LOGGER.warning(f"no live resonse session and/or command combination for {session_id}:{command_id}")
        return None
Пример #6
0
def get_session_commands(cb: CbResponseAPI, session_id: str):
    """List commands for this session."""
    try:
        return cb.get_object(f"{CBLR_BASE}/session/{session_id}/command")
    except ObjectNotFoundError:
        LOGGER.warning(f"no live resonse session by ID={session_id}")
        return None
Пример #7
0
def get_session_by_id(cb: CbResponseAPI, session_id):
    """Get a LR session object by id."""
    try:
        return cb.get_object(f"{CBLR_BASE}/session/{session_id}")
    except ObjectNotFoundError:
        LOGGER.warning(f"no live resonse session by ID={session_id}")
        return None
Пример #8
0
def make_process_query(
    cb: CbResponseAPI,
    query: str,
    start_time: datetime.datetime = None,
    last_time: datetime.datetime = None,
    raise_exceptions=True,
) -> ProcessQuery:
    """Query the CbResponse environment and interface results.

    Args:
        cb: A CbResponseAPI object to use
        query: The correctly formated query
        start_time: Set the minimum last update time (relative to server) for this query.
        last_time: Set the maximum last update time (relative to server) for this query.
        XXX no_warnings: Do not warn before printing large query result sets.
    Returns: cbapi.response.models.ProcessQuery or empty list.
    """

    processes = []
    LOGGER.debug(
        f"buiding query: {query} between '{start_time}' and '{last_time}'")
    try:
        processes = cb.select(Process).where(query).group_by("id")
        processes = processes.min_last_server_update(
            start_time) if start_time else processes
        processes = processes.max_last_server_update(
            last_time) if last_time else processes
        LOGGER.info(f"got {len(processes)} process results grouped by id.")
    except Exception as e:
        if raise_exceptions:
            raise (e)
        LOGGER.error(f"problem querying carbonblack with '{query}' : {e}")

    return processes
Пример #9
0
def listAlerts(q):
    cb = CbResponseAPI(profile=args.instance)
    alerts = cb.select(Alert).where('hostname:' + args.hostname + ' AND (' +
                                    q + ') AND created_time:[' + starttime +
                                    ' TO ' + endtime + ']')
    for alert in alerts:
        if 'binary' in alert.alert_type:
            print(
                "{0} - SCORE: \033[32m{1:d}\033[m - HOST: \033[32m{2:s}\033[m - \033[33mBINARY\033[m: {3:s} - REPORT: {4:s}"
                .format(alert.created_time, alert.report_score, alert.hostname,
                        alert.md5, alert.watchlist_name))
        else:
            print(
                "{0} - SCORE: \033[32m{1:d}\033[m - HOST: \033[32m{2:s}\033[m - \033[31mPROCESS\033[m: {3:s} - REPORT: {4:s}"
                .format(alert.created_time, alert.report_score, alert.hostname,
                        alert.process_name, alert.watchlist_name))
            print("\033[1;30;40m{0:s}\033[m".format(alert.process.webui_link))
Пример #10
0
def search():
    
    global watchlist
    
    save_path = 'C:/Users/SMaiorino/Documents/My_Scripts/Master'
    f_name = os.path.join(save_path, 'Mal_Files_Found_{0}.csv'.format(watchlist))
    my_file = open(f_name,'w',newline='')
    writer = csv.writer(my_file)
    
    with open(os.path.join(save_path, 'vulnerable-names.csv'),'r') as n:
        names = n.readlines()

    with open(os.path.join(save_path, 'vulnerable-files.csv'), 'r') as f:
        files = f.readlines()
    
    print('\n---------------RESULTS---------------\n')
            
    for name in names:
        name = name.replace('\n', '')
        api = CbResponseAPI()
        try:
            sensor = api.select(Sensor).where('hostname:{0}'.format(name)).first()
            if sensor.status == 'Offline':
                writer.writerow([name, 'OFFLINE'])
                continue
            with sensor.lr_session() as session:
                for file in files:
                    file = file.replace('\n', '')
                    try:
                        test_file = session.get_file(r'{0}'.format(file))
                        if test_file is not None:
                            writer.writerow([name,file])
                            print('File: {0} \nComputer: {1} \n'.format(file,name))
                            continue               
                    except (TimeoutError, ObjectNotFoundError, LiveResponseError, 
                            ApiError, ServerError, AttributeError, TypeError):
                        pass
        except (TimeoutError):
            continue
        except (AttributeError):
            if sensor is None:
                continue
            break

        
    my_file.close() 
Пример #11
0
    def __init__(self, config_file_path: str, debug_mode: bool = False, import_dir: str = '',
                 export_dir: Optional[str] = None, strict_mode: bool = False):
        """
        Parse config file and save off the information we need.

        NOTE: At present, import path is unused

        :param config_file_path: configuration file location
        :param debug_mode: If True, operate in debug mode
        :param import_dir: feed import directory
        :param export_dir: export directory (optional)
        :param strict_mode: It True, be harsher wit config
        """
        try:
            config_dict = parse_config(config_file_path, strict_mode=strict_mode)
        except TaxiiConfigurationException as err:
            _logger.error(f"{err}", exc_info=False)
            sys.exit(-1)

        if debug_mode:
            _logger.debug(f"Config: {config_dict}")

        self.server_url = config_dict.get('server_url', 'https://127.0.0.1')
        self.api_token = config_dict.get('api_token', '')
        self.sites = config_dict.get('sites', [])
        self.debug = config_dict.get('debug', False)
        self.export_dir = export_dir
        self.import_dir = import_dir
        self.integration_name = 'Cb Taxii Connector 1.6.5'

        self.http_proxy_url = config_dict.get('http_proxy_url', None)
        self.https_proxy_url = config_dict.get('https_proxy_url', None)

        # if exporting, make sure the directory exists
        if self.export_dir and not os.path.exists(self.export_dir):
            os.mkdir(self.export_dir)

        # Test Cb Response connectivity
        try:
            self.cb = CbResponseAPI(url=self.server_url, token=self.api_token,
                                    ssl_verify=False, integration_name=self.integration_name)
            self.cb.info()
        except Exception as err:
            _logger.error(f"Failed to make connection: {err}", exc_info=True)
            sys.exit(-1)
Пример #12
0
def tempAll(q, instance, hits_bool):
    instance = instance.strip()
    tempCSV = []
    # Debug prints
    #print(instance)
    #print(q)
    cb = CbResponseAPI(profile=instance)
    query = cb.select(Process).where('hostname:' + args.hostname + ' AND (' +
                                     q + ') AND start:[' + starttime + ' TO ' +
                                     endtime +
                                     ']').sort("start asc").max_children(
                                         args.c)
    try:
        print(colorize(instance + " - Total hits: " + str(len(query)),
                       'green'))
    except OSError as e:
        print(e)
    finally:
        if hits_bool == True:
            sys.exit()
        else:
            sleep(3)
            for proc in query:
                print("{0} {1} {2} {3} {4} \n\033[1;30;40m{5}\033[m".format(
                    proc.start, instance, proc.hostname, proc.username,
                    proc.cmdline, proc.webui_link))
                # Show netconns switch
                if args.n is True:
                    # Iterate the CB netconns object
                    for conns in proc.netconns:
                        print("\033[32m{0}\033[m".format(conns))
                # Show child processes switch
                elif int(args.c) > 0:
                    # Iterate the child processes
                    proc.walk_children(visitor)
                elif args.csv is True:
                    tempCSV.append({
                        'proc.start': proc.start,
                        'proc.hostname': proc.hostname,
                        'proc.username': proc.username,
                        'proc.cmdline': proc.cmdline,
                        'proc.webui_link': proc.webui_link
                    })
            if tempCSV != []:
                outputCSV(instance, tempCSV)
Пример #13
0
def cbResponse():
    # Set attributes for csv file
    save_path = 'C:/Users/SMaiorino/Documents/My_Scripts/Master/Computer Lists'
    f_name = os.path.join(save_path, 'List_Comps_Response.csv')
    file = open(f_name, 'w', newline='')
    f_write = csv.writer(file)
    #f_write.writerow(['NAME'])

    # Initialize API var and query parameters
    api = CbResponseAPI()
    query = "ip:172"
    sensor = api.select(Sensor).where(query)

    # Iterate through each object the sensor reads and
    # output the name of each workstation in response that
    # is currently installed.
    for obj in sensor:
        names = obj.hostname
        os_name = obj.os_environment_display_string
        status = obj.status
        uninstall = obj.uninstall
        uninstalled = obj.uninstalled
        group = obj.group_id
        lastComm = str(obj.last_checkin_time)[0:10]
        if not 'Server' in os_name and 'Windows' in os_name \
        and uninstall == False and uninstalled != True \
        and not 'Uninstall' in status and group != 12:
            f_write.writerow([names])

    file.close()

    # Re-open the file to sort the names in alphabetically
    # ascending order
    new_file = csv.reader(
        open(os.path.join(save_path, 'List_Comps_Response.csv')))
    sorted_file = sorted(new_file)

    # Re-write the sorted names into the file
    with open(os.path.join(save_path, 'List_Comps_Response.csv'),
              'w',
              newline='') as f:
        f_write = csv.writer(f)
        for row in sorted_file:
            f_write.writerow(row)
Пример #14
0
def CbSensor_search(profile, hostname):
    cb = CbResponseAPI(profile=profile)
    sensor = None
    logger.info("Getting the sensor object from carbonblack")
    try:
        result = make_sensor_query(cb, f"hostname:{hostname}")
        if len(result) == 1:
            return result[0]
        if isinstance(result[0], Sensor):
            print()
            logger.warn(
                "MoreThanOneResult searching for {0:s}".format(hostname))
            print("\nResult breakdown:")
            sensor_ids = []
            for s in result:
                sensor_ids.append(int(s.id))
                if int(s.id) == max(sensor_ids):
                    sensor = s
                print()
                print("Sensor object - {}".format(s.webui_link))
                print(
                    "-------------------------------------------------------------------------------\n"
                )
                print("\tos_environment_display_string: {}".format(
                    s.os_environment_display_string))
                print()
                print("\tstatus: {}".format(s.status))
                print("\tsensor_id: {}".format(s.id))
                print("\tlast_checkin_time: {}".format(s.last_checkin_time))
                print("\tnext_checkin_time: {}".format(s.next_checkin_time))
                print("\tsensor_health_message: {}".format(
                    s.sensor_health_message))
                print("\tsensor_health_status: {}".format(
                    s.sensor_health_status))
                print("\tnetwork_interfaces:")
            print()
            default_sid = max(sensor_ids)
            choice_string = "Which sensor do you want to use?\n"
            for sid in sensor_ids:
                choice_string += "\t- {}\n".format(sid)
            choice_string += "\nEnter one of the sensor ids above. Default: [{}]".format(
                default_sid)
            user_choice = int(input(choice_string) or default_sid)
            for s in result:
                if user_choice == int(s.id):
                    logger.info("Returning {} sensor".format(s))
                    return s
    except Exception as e:
        if sensor is None:
            logger.warning(
                "A sensor by hostname '{}' wasn't found in this environment".
                format(hostname))
            #return False
        logger.error("{}".format(str(e)))
        return False
def fake_cb_response_api(monkeypatch):
    """Fake CbResponseAPI object."""
    def fake_info(self):
        server_info = {}
        with open(f"{HOME_PATH}/test_data/cb_response_server_info.json",
                  "r") as fp:
            server_info = json.load(fp)
        server_info["version"] = "5.0.0"
        return server_info

    monkeypatch.setattr(CbResponseAPI, "info", fake_info)
    return CbResponseAPI(url="https://fakehost", token="N/A", ssl_verify=False)
Пример #16
0
def these_watchlists_to_list_dict(cb: CbResponseAPI, watchlist_names=[], watchlist_ids=[]) -> List[Dict]:
    """Convert the listed watchlists to a list of their dictionary representations."""
    wl_data = []
    for wl_name in watchlist_names:
        wl = cb.select(Watchlist).where(f"name:{wl_name}")
        if wl:
            if len(wl) > 1:
                LOGGER.warning(f"got {len(wl)} watchlists with name matching {wl_name}. Using first result")
            wl = wl[0]
            wl_data.append(watchlist_to_dict(wl))

    return wl_data
Пример #17
0
def main():
	init()
	parser = argparse.ArgumentParser(description=banner)
	parser.add_argument("-u", "--url", dest="server_url", help="Cb Response Server URL [e.g. \"http://cb.example.com\"]")
	parser.add_argument("-t", "--token", dest="api_token", help="Cb Response API Token")
	parser.add_argument("-m", "--md5hash", dest="md5hash", help="MD5 hash of the binary to be banned")
	parser.add_argument("-n", "--notes", dest="text", help="Notes for banning the binary")
	parser.add_argument("-l", "--list", dest="list_file", help="List file of binaries to be banned. Also accepts csv files.")
	
	opts = parser.parse_args()
	note = "Banned from API"
	global cb
	
	if not opts.server_url and not opts.api_token:
		cb = CbResponseAPI(server_url, token=api_token, ssl_verify=False)
	else:
		cb = CbResponseAPI(opts.server_url, token=opts.api_token, ssl_verify=False)
	
	if opts.text:
		note = opts.text
	
	if opts.md5hash:
		ban_hash(opts.md5hash, note)
	elif opts.list_file:		
		hash_list = open(opts.list_file, 'rb')
		ban_text = "Banning {} hashes. Reading from list file.".format(len(lines))
		print(ban_text)
		root.info(ban_text)
		if os.path.splitext(hash_list.name)[1] == '.csv':
			csv_reader = csv.DictReader(hash_list)
			for h in csv_reader:
				ban_hash(h['md5'], h['Note'])
		else:
			lines = [line.rstrip('\n') for line in hash_list]
			for h in lines:
				ban_hash(h, note)
		hash_list.close()
	else:
		parser.parse_args(['-h'])
		print("Please provide either a hash value or a list of hashes")
Пример #18
0
def main():

    global watchlist, cb

    cb = CbResponseAPI()

    vuln_names = open("vulnerable-names-dupes.csv", 'w', newline='')
    write_names = csv.writer(vuln_names)

    vuln_files = open('vulnerable-files-dupes.csv', 'w', newline='')
    write_files = csv.writer(vuln_files)

    watchlist = input('Watchlist to Search Through: ')

    # NOTE: need to go into the Response Console and click
    # on the WatchList of interest - the watchlist ordinal
    # will appear in the URL field https://172.16.95.214:8443/#watchlist/190/?filterBy=all&sortBy=name
    # pass this ordinal as a command line parameter when invoking this script

    binary_query = cb.select(Binary).where("watchlist_{0}:*".format(watchlist))

    #find all instances of the binary watchlist hits including historical instances
    for binary in binary_query:
        for filename in binary.observed_filename:
            for endpoint in binary.endpoint:
                write_names.writerow([endpoint.split("|")[0]])
                write_files.writerow([filename])

    vuln_names.close()
    vuln_files.close()

    remove_dupes()

    os.remove('vulnerable-names-dupes.csv')
    os.remove('vulnerable-files-dupes.csv')

    #call search() to clean the list of files by verifying their presence or absence on the end point
    search()
def init():
	global script_message
	global cb
	global banner
	global root
	root = logging.getLogger("cbapi")
	logging.basicConfig(filename=datetime.now().strftime('ban_hash_%H_%M_%d_%m_%Y.log'), level=logging.INFO)
	#~ global creds
	#~ global server_url
	#~ global token
	banner = "Script for banning hashes v1"
	#~ creds = CredentialStore("response").get_credentials()
	#~ server_url = creds['url']
	#~ token = creds['token']
	#~ cb = CbApi(server_url, token=token, ssl_verify=False)
	cb = CbResponseAPI()	
Пример #20
0
def make_sensor_query(cb: CbResponseAPI, sensor_query: str) -> SensorQuery:
    """Construct a SensorQuery object."""
    try:
        if ":" not in sensor_query:
            LOGGER.warning(
                "No field specification passed. Fields: ip, hostname, groupid")
            LOGGER.info(
                f"Making assumption and updating query to: 'hostname:{sensor_query}'"
            )
            sensor_query = f"hostname:{sensor_query}"
        sensors = cb.select(Sensor).where(sensor_query)
    except ValueError as e:
        LOGGER.error(f"{e}")
        return False
    LOGGER.info(f"got {len(sensors)} sensor results.")
    return sensors
def cb_connect(server_url, api_token):
    message = "Connected to Cb Response server"
    global cb
    try:
        if server_url and api_token:
            if not (server_url.startswith("http")
                    or server_url.startswith("https")):
                raise InvalidApiTokenError(
                    sys.exc_info(), "Invalid server URL {}".format(server_url))
            elif re.match(r"([a-fA-F\d]{40})", api_token) is None:
                raise InvalidApiTokenError(
                    sys.exc_info(), "Invalid API Token {}".format(api_token))
            cb = CbResponseAPI(server_url, token=api_token, ssl_verify=False)
    except InvalidApiTokenError as iate:
        message = iate.message
    return message
def __init__():
    global root
    global script_message
    global cb
    global banner
    global path
    root = logging.getLogger("cbapi")
    logging.basicConfig(
        filename=datetime.now().strftime('dl_bin_%H_%M_%d_%m_%Y.log'),
        level=logging.INFO)
    #~ logging.basicConfig(level=logging.DEBUG)
    banner = "Script for downloading hashes v1"
    cb = CbResponseAPI()
    path = "files"
    if not os.path.exists(path):
        os.makedirs(path)
Пример #23
0
def get_file_content(cb: CbResponseAPI, session_id: str, file_id: str):
    """Get file content stored in LR session and write the file locally."""
    from cbinterface.helpers import get_os_independent_filepath

    try:
        file_metadata = cb.get_object(f"{CBLR_BASE}/session/{session_id}/file/{file_id}")
        if file_metadata:
            filepath = get_os_independent_filepath(file_metadata["file_name"])
            filename = f"{session_id}_{filepath.name}"
        result = cb.session.get(f"{CBLR_BASE}/session/{session_id}/file/{file_id}/content", stream=True)
        if result.status_code != 200:
            LOGGER.error(
                f"got {result.status_code} from server getting file {file_id} content for session {session_id}"
            )
            return
        with open(filename, "wb") as fp:
            for chunk in result.iter_content(io.DEFAULT_BUFFER_SIZE):
                fp.write(chunk)
        if os.path.exists(filename):
            LOGGER.info(f"wrote: {filename}")
        return os.path.exists(filename)
    except ObjectNotFoundError:
        LOGGER.warning(f"no file {file_id} content with session {session_id}")
        return
Пример #24
0
#!/usr/bin/python

from cbapi.response import CbResponseAPI, Process, Binary, Sensor
#
# Create our CbAPI object
#
c = CbResponseAPI()

query = c.select(Process).first()
print(query)
#
# take the first process that ran notepad.exe, download the binary and read the first two bytes
#
#c.select(Process).where('process_name:notepad.exe').first().binary.file.read(2)'MZ'
#c.select(Process).where('process_name:notepad.exe').first().binary.file.read(2)'MZ'
#
# if you want a specific ID, you can put it straight into the .select() call:
#
#binary = c.select(Binary, "24DA05ADE2A978E199875DA0D859E7EB")
#
# select all sensors that have ran notepad
#
#sensors = set()
#for proc in c.select(Process).where('process_name:evil.exe'):
#    sensors.add(proc.sensor)
#
# iterate over all sensors and isolate
#
#for s in sensors:
#    s.network_isolation_enabled = True
#    s.save()
def main():
    init()
    parser = argparse.ArgumentParser(description=banner)
    parser.add_argument(
        "-u",
        "--url",
        dest="server_url",
        help="Cb Response Server URL [e.g. \"http://cb.example.com\"]")
    parser.add_argument("-t",
                        "--token",
                        dest="api_token",
                        help="Cb Response API Token")
    parser.add_argument("-m",
                        "--md5hash",
                        dest="md5hash",
                        help="MD5 hash of the binary to be banned")
    parser.add_argument("-n",
                        "--notes",
                        dest="text",
                        help="Notes for banning the binary")
    parser.add_argument(
        "-l",
        "--list",
        dest="list_file",
        help="List file of binaries to be banned. Also accepts csv files.")
    parser.add_argument("-x",
                        "--export",
                        help="Export ban list to csv",
                        action='store_true')

    opts = parser.parse_args()
    note = "Banned from API"
    global cb

    try:
        #~ cb = CbResponseAPI(server_url, token=api_token, ssl_verify=False)
        if not opts.server_url or (not opts.server_url.startswith("http://")
                                   and
                                   not opts.server_url.startswith("https://")):
            raise InvalidApiTokenError(
                sys.exc_info(),
                "Invalid server URL {}".format(opts.server_url))
        elif not opts.api_token or (re.match(r"([a-fA-F\d]{40})",
                                             opts.api_token) is None):
            raise InvalidApiTokenError(
                sys.exc_info(), "Invalid API Token {}".format(opts.api_token))
        else:
            cb = CbResponseAPI(opts.server_url,
                               token=opts.api_token,
                               ssl_verify=False)
    except InvalidApiTokenError as iate:
        root.exception(iate)
        sys.exit(iate.exit_code)

    if opts.export:
        export_mode_msg = "Export mode. Fetching banned list from {}".format(
            opts.server_url)
        print(export_mode_msg)
        root.info(export_mode_msg)
        export_to_csv()
        sys.exit()

    if opts.text:
        note = opts.text

    if opts.md5hash:
        single_ban_mode_msg = "Single hash ban mode."
        print(single_ban_mode_msg)
        root.info(single_ban_mode_msg)
        ban_hash(opts.md5hash, note)
    elif opts.list_file:
        list_ban_mode = "Multiple hash ban mode. Reading list file"
        print(list_ban_mode)
        root.info(list_ban_mode)
        hash_list = open(opts.list_file, 'rb')
        ban_text = "Banning {} hashes. Reading from list file.".format(
            len(lines))
        print(ban_text)
        root.info(ban_text)
        if os.path.splitext(hash_list.name)[1] == '.csv':
            csv_reader = csv.DictReader(hash_list)
            found_msg = "Found {0} hashes in {1}".format(
                len(csv_reader), hash_list.name)
            print(found_msg)
            root.info(found_msg)
            for h in csv_reader:
                ban_hash(h['md5'], h['Note'])
        else:
            lines = [line.rstrip('\n') for line in hash_list]
            found_msg = "Found {0} hashes in {1}".format(
                len(lines), hash_list.name)
            print(found_msg)
            root.info(found_msg)
            for h in lines:
                ban_hash(h, note)
        hash_list.close()
        if md5_error_found:
            sys.exit(100)
    else:
        parser.parse_args(['-h'])
Пример #26
0
def get_all_watchlists(cb: CbResponseAPI) -> SimpleQuery:
    """Return a list of all watchlists."""
    return cb.select(Watchlist)
Пример #27
0
def search():

    global watchlist, bin_file, api

    save_path = 'C:/Users/SMaiorino/Documents/My_Scripts/Master'
    f_name = os.path.join(save_path,
                          'Mal_Files_Found_{0}.csv'.format(watchlist))
    my_file = open(f_name, 'w', newline='')
    writer = csv.writer(my_file)

    # Retrieve the necessary filenames and workstations by reading each csv file
    with open(os.path.join(save_path, 'vulnerable-names.csv'), 'r') as n:
        names = n.readlines()

    with open(os.path.join(save_path, 'vulnerable-files.csv'), 'r') as f:
        files = f.readlines()

    print('\n---------------RESULTS---------------\n')

    for name in names:
        name = name.replace('\n', '')
        api = CbResponseAPI()
        try:
            sensor = api.select(Sensor).where(
                'hostname:{0}'.format(name)).first()

            # Record Workstations with an Offline Sensor
            if sensor.status == 'Offline':
                writer.writerow([name, 'OFFLINE'])
                continue

#            We use String manipulation for proper communication between the live response session and the
#            format of each file / workstation.
#            We only use 'bin_file' in the copy_encrypt_binary() function below.
#
#            We then Copy the contents of the file on the current end point to a specified folder to a quarantee folder.
#            The file is then deleted from the current end point and we write the contents to a csv as:
#
#            HOSTNAME            FILE FOUND ON COMPUTER (or OFFLINE)
#
#            We then return to the top of loop and move on to the next end point in our list.

# Start the live response session
            with sensor.lr_session() as session:
                for file in files:
                    bin_file = file.replace('\\', '\\\\')
                    file = file.replace('\n', '')
                    try:
                        # Obtain the contents of each file
                        test_file = session.get_file(r'{0}'.format(file))
                        if test_file is not None:
                            copy_encrypt_binary()
                            session.delete_file(r'{0}'.format(file))
                            writer.writerow([name, file])
                            print('File: {0} \nComputer: {1} \n'.format(
                                file, name))
                            continue
                    except (TimeoutError, ObjectNotFoundError,
                            LiveResponseError, ApiError, ServerError,
                            AttributeError, TypeError):
                        pass
        except (TimeoutError):
            continue
        except (AttributeError):
            if sensor is None:
                continue
            break

    my_file.close()
Пример #28
0
def main():

    parser = argparse.ArgumentParser(
        description="SIP Indicator CbR Search and ACE Alert.")
    parser.add_argument('-d',
                        '--debug',
                        action="store_true",
                        help="set logging to DEBUG",
                        default=False)
    args = parser.parse_args()

    # load config
    config = ConfigParser()
    config.read('etc/config.ini')

    # load SIP indicator specs so we know how to get the indicators we want
    indicator_specs = {}
    with open(config['SIP']['indicator_specifications'], 'r') as stream:
        try:
            indicator_specs = yaml.safe_load(stream)
            logging.info(
                "Successfully loaded indicator specifications: {}".format(
                    indicator_specs))
        except yaml.YAMLError as e:
            logging.error("Couldn't load indicator specs : {}".format(e))
            return

    # Load ACE API
    ace_api.set_default_remote_host(config['ACE']['ace_address'])
    ace_api.set_default_ssl_ca_path(config['ACE']['ca_chain_path'])

    # Create SIP Client and load indicators
    sip_ssl = config['SIP'].getboolean('ssl_verify')
    sc = pysip.Client(config['SIP']['sip_address'],
                      config['SIP']['sip_api_key'],
                      verify=sip_ssl)
    status = indicator_specs[
        'status'] if 'status' in indicator_specs else 'Analyzed'
    indicators = {}
    for i_type in indicator_specs['type']:
        handle_proxy(config['SIP'])
        indicators[i_type] = sc.get('/indicators?type={}&status={}'.format(
            i_type, status))

    # load field mappings
    field_map = ConfigParser()
    field_map.read(config['GLOBAL']['field_mappings'])
    sip_cbr_map = field_map['SIP-TO-CBR']
    sip_ace_map = field_map['SIP-TO-ACE']
    cbr_ace_map = field_map['CBR-TO-ACE']

    submitted_alerts = []

    # Query Carbon Black Response for our indicators
    #cbq = CBquery(profile=config['CbR']['profile'])
    handle_proxy(config['CbR'])
    cb = CbResponseAPI(profile=config['CbR']['profile'])
    for i_type in indicator_specs['type']:
        for i in indicators[i_type]:
            query = '{}:"{}"'.format(sip_cbr_map[i_type], i['value'])
            logging.debug('Querying CbR for indicator:{} query:{}'.format(
                i['id'], query))
            procs = cb.select(Process).where(query).group_by('id')
            if procs:
                # alert ACE
                Alert = ace_api.Analysis(description='CbR - SIP:{}'.format(
                    i['value']),
                                         analysis_mode='correlation',
                                         tool='SipCbrAce')
                print(Alert.description)
                Alert.add_indicator(i['id'])
                # get sip tags and tag Alert
                handle_proxy(config['SIP'])
                i_details = sc.get('/indicators/{}'.format(i['id']))
                handle_proxy(config['CbR'])
                for tag in i_details['tags']:
                    Alert.add_tag(tag)
                alert_details = {}
                alert_details['total_results'] = len(procs)
                max_results = config['GLOBAL'].getint('alert_max_results')
                alert_details['included_results'] = 0
                alert_details['process_details'] = []
                for proc in procs:
                    if alert_details['included_results'] > max_results:
                        break
                    alert_details['process_details'].append(str(proc))
                    alert_details['included_results'] += 1
                    Alert.add_hostname(proc.hostname)
                    Alert.add_md5(proc.process_md5)
                    Alert.add_ipv4(proc.comms_ip)
                    Alert.add_ipv4(proc.interface_ip)
                    Alert.add_process_guid(proc.id)
                    Alert.add_user(proc.username)
                    Alert.add_file_name(proc.process_name)
                    Alert.add_file_path(proc.path)
                    #Alert.add_file_location('{}@{}'.format(proc.hostname, proc.path))
                #Alert.submit_kwargs['details'] = alert_details
                handle_proxy(config['ACE'])
                print(Alert.description)
                submitted_alerts.append(Alert.submit())
                logger.info(
                    "Submitted alert to ACE: {UUID} - URL=https://{HOST}/ace/analysis?direct={UUID}"
                    .format(UUID=Alert.uuid, HOST=Alert.remote_host))

    print(submitted_alerts)
    def __init__(self,
                 access_id="",
                 secret_key="",
                 default_org="",
                 base_url="",
                 out_file="tc.json",
                 sources="",
                 ioc_types="",
                 custom_ioc_key="",
                 feed_url="",
                 cb_server_token="",
                 cb_server_url="https://127.0.0.1",
                 cb_server_ssl_verify=False,
                 ioc_min=None,
                 niceness=None,
                 debug=False,
                 log_file=None,
                 max_iocs=5000):

        logger.info("ThreatConnect Base URL: {0}".format(base_url))

        self.tcapi = ThreatConnect(api_aid=access_id,
                                   api_sec=secret_key,
                                   api_url=base_url,
                                   api_org=default_org)

        self.sources = sources

        self.ioc_min = ioc_min

        self.ioc_types = ioc_types

        logger.info("Configured IOC Types are : {0}".format(self.ioc_types))
        logger.info("Configured IOC Min is  : {0}".format(self.ioc_min))

        self.custom_ioc_key = custom_ioc_key

        self.max_iocs = max_iocs

        if self.sources[0] == "*":
            owners = self.tcapi.owners()
            try:
                # retrieve the Owners
                owners.retrieve()
            except RuntimeError as e:
                logger.error(traceback.format_exc())
                sys.exit(1)
            # iterate through the Owners
            self.sources = [owner.name for owner in owners]

        logger.info("Sources = {0}".format(self.sources))

        self.niceness = niceness
        if self.niceness is not None:
            os.nice(self.niceness)

        self.debug = debug
        if self.debug:
            logger.setLevel(logging.DEBUG)

        self.log_file = log_file

        self.out_file = out_file

        self.feed = None

        self.cb = CbResponseAPI(url=cb_server_url,
                                token=cb_server_token,
                                ssl_verify=cb_server_ssl_verify)

        self.feed_url = feed_url
Пример #30
0
def main():
    """Main entry point for cbinterface."""

    # configure logging #
    logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)s] %(message)s")
    coloredlogs.install(level="INFO", logger=logging.getLogger())

    # set clean exit signal
    signal.signal(signal.SIGINT, clean_exit)

    # load carbonblack environment profiles #
    configured_environments = load_configured_environments()
    environments = []
    # create human friendly options for the CLI
    for product, profiles in configured_environments.items():
        for profile in profiles:
            environments.append(f"{product}:{profile}")

    # chose the default environment
    default_product_name = get_default_cbapi_product()
    default_profile_name = get_default_cbapi_profile()
    default_environments = [env for env in environments if env.startswith(default_product_name)]
    default_environment = f"{default_product_name}:{default_profile_name}"
    default_environment = (
        default_environment if default_environments and default_environment in default_environments else environments[0]
    )

    parser = argparse.ArgumentParser(description="Interface to Carbon Black for IDR teams.")
    parser.add_argument("-d", "--debug", action="store_true", help="Turn on debug logging.")
    parser.add_argument(
        "-e",
        "--environment",
        action="store",
        choices=environments,
        default=default_environment,
        help=f"specify an environment to work with. Default={default_environment}",
    )
    parser.add_argument(
        "-sde",
        "--set-default-environment",
        action="store",
        choices=environments,
        help="configure your default Cb environment",
    )
    parser.add_argument(
        "-tz",
        "--time-zone",
        action="store",
        help='specify the timezone to override defaults. ex. "US/Eastern" or "Europe/Rome"',
    )
    parser.add_argument(
        "--set-default-timezone",
        action="store",
        help='configure your default timezone. ex. "US/Eastern" or "Europe/Rome"',
    )

    subparsers = parser.add_subparsers(dest="command")

    # query parser
    parser_query = subparsers.add_parser(
        "query", aliases=["pq", "q"], help="Execute a process search query. 'query -h' for more"
    )
    parser_query.add_argument("query", help="the process search query you'd like to execute")
    parser_query.add_argument(
        "-s",
        "--start-time",
        action="store",
        help="Start time of the process.  Format:'Y-m-d H:M:S' UTC",
    )
    parser_query.add_argument(
        "-e",
        "--last-time",
        action="store",
        help="Narrow to processes with start times BEFORE this end/last time. Format:'Y-m-d H:M:S' UTC",
    )
    parser_query.add_argument(
        "-nw",
        "--no-warnings",
        action="store_true",
        default=False,
        help="Don't warn before printing large query results",
    )
    parser_query.add_argument(
        "-ad",
        "--all-details",
        action="store_true",
        default=False,
        help="Print all available process info (all fields).",
    )
    parser_query.add_argument(
        "--facets", action="store_true", default=None, help="Retrieve statistical facets for this query."
    )

    # process inspection/investigation parser
    parser_inspect = subparsers.add_parser(
        "investigate", aliases=["proc", "i"], help="Investigate process events and metadata."
    )
    parser_inspect.add_argument(
        "process_guid_options", help="the process GUID/segment to inspect. Segment is optional."
    )
    parser_inspect.add_argument(
        "-i", "--proc-info", dest="inspect_proc_info", action="store_true", help="show binary and process information"
    )
    parser_inspect.add_argument(
        "-w",
        "--walk-tree",
        dest="walk_and_inspect_tree",
        action="store_true",
        help="Recursively walk, print, and inspect the process tree. Specified arguments (ex. filemods) applied at every process in tree. WARNING: can pull large datasets.",
    )
    parser_inspect.add_argument(
        "-t",
        "--process-tree",
        dest="inspect_process_tree",
        action="store_true",
        help="print the process tree with this process as the root.",
    )
    parser_inspect.add_argument(
        "-a",
        "--process-ancestry",
        dest="inspect_process_ancestry",
        action="store_true",
        help="print the the process ancestry",
    )
    parser_inspect.add_argument(
        "-c",
        "--show-children",
        dest="inspect_children",
        action="store_true",
        help="only print process children event details",
    )
    parser_inspect.add_argument(
        "-nc", "--netconns", dest="inspect_netconns", action="store_true", help="print network connections"
    )
    parser_inspect.add_argument(
        "-fm", "--filemods", dest="inspect_filemods", action="store_true", help="print file modifications"
    )
    parser_inspect.add_argument(
        "-rm", "--regmods", dest="inspect_regmods", action="store_true", help="print registry modifications"
    )
    parser_inspect.add_argument(
        "-ml", "--modloads", dest="inspect_modloads", action="store_true", help="print modloads"
    )
    parser_inspect.add_argument(
        "-sl", "--scriptloads", dest="inspect_scriptloads", action="store_true", help="print scriptloads (PSC)"
    )
    parser_inspect.add_argument(
        "-cp", "--crossprocs", dest="inspect_crossprocs", action="store_true", help="print crossprocs"
    )
    parser_inspect.add_argument(
        "-rpe",
        "--raw-print-events",
        action="store_true",
        default=False,
        help="do not format Cb events onto a single line. Print them the way Cb does by default.",
    )
    # parser_inspect.add_argument('-warn', '--give-warnings', action='store_true', default=False,
    #                         help="Warn before printing large datasets/results")
    parser_inspect.add_argument(
        "--json", action="store_true", help="Combine all results into json document and print the result."
    )
    parser_inspect.add_argument(
        "--segment-limit",
        action="store",
        type=int,
        default=None,
        help="stop processing events into json after this many process segments",
    )
    parser_inspect.add_argument("-es", "--event-search", action="store", help="Search process events.")
    parser_inspect.add_argument(
        "-st",
        "--start-time",
        action="store",
        help="Return events that occurred AFTER this start time.  Format:'Y-m-d H:M:S' UTC",
    )
    parser_inspect.add_argument(
        "-et",
        "--end-time",
        action="store",
        help="Return events that occurred BEFORE this end time. Format:'Y-m-d H:M:S' UTC",
    )

    # live response parser
    parser_lr = subparsers.add_parser(
        "live-response", aliases=["lr"], help="Perform live response actions on a device/sensor."
    )
    parser_lr.add_argument("name_or_id", help="the hostname or sensor/device id to go live with.")
    parser_lr.add_argument(
        "-e", "--execute-command", action="store", help="Execute this command on the sensor. NOTE: waits for output."
    )
    parser_lr.add_argument("-cr", "--create-regkey", action="store", help="Create this regkey.")
    parser_lr.add_argument("-sr", "--set-regkey-value", action="append", help="Set this regkey value.")
    if configured_environments["response"]:
        parser_lr.add_argument(
            "-i",
            "--sensor-isolation-toggle",
            action="store_true",
            help="Sensor hostname/ID to isolation/unisolate (on/off). (CB Response)",
        )
    if configured_environments["psc"]:
        parser_lr.add_argument(
            "-q",
            "--quarantine",
            action="store_true",
            default=False,
            help="Quarantine the devices returned by the query. (PSC)",
        )
        parser_lr.add_argument(
            "-uq",
            "--un_quarantine",
            action="store_true",
            default=False,
            help="UN-Quarantine the devices returned by the query. (PSC)",
        )

    # live response subparser
    lr_subparsers = parser_lr.add_subparsers(dest="live_response_command")

    # live response put file parser
    parser_put_file = lr_subparsers.add_parser("put", help="Put a file on the device/sensor.")
    parser_put_file.add_argument("local_filepath", action="store", help="Path to the file.")
    parser_put_file.add_argument("sensor_write_filepath", action="store", help="Path to write the file on the sensor.")

    # live response playbook parser
    parser_playbook = lr_subparsers.add_parser(
        "playbook", aliases=["pb", "play"], help="Execute a live response playbook script."
    )
    parser_playbook.add_argument(
        "-f", "--playbook-configpath", action="store", help="Path to a playbook config file to execute."
    )
    playbook_map = get_playbook_map()
    playbook_names = [p["name"] for _, p in playbook_map.items()]
    parser_playbook.add_argument(
        "-p",
        "--playbook-name",
        action="store",
        choices=playbook_names,
        help="The name of a configured playbook to execute.",
    )
    parser_playbook.add_argument("-l", "--list-playbooks", action="store_true", help="List configured playbooks.")
    parser_playbook.add_argument(
        "--write-template", action="store_true", help="write a playbook template file to use as example."
    )

    # live response collect parser
    parser_collect = lr_subparsers.add_parser("collect", help="Collect artifacts from hosts.")
    parser_collect.add_argument(
        "-i", "--sensor-info", dest="sensor_info", action="store_true", help="print default sensor information"
    )
    parser_collect.add_argument("-p", "--process-list", action="store_true", help="show processes running on sensor")
    parser_collect.add_argument("-f", "--file", action="store", help="collect file at this path on sensor")
    parser_collect.add_argument(
        "-lr", "--regkeypath", action="store", help="List all registry values from the specified registry key."
    )
    parser_collect.add_argument(
        "-r", "--regkeyvalue", action="store", help="Returns the associated value of the specified registry key."
    )
    parser_collect.add_argument(
        "-ld", "--list-directory", action="store", help="List the contents of a directory on the sensor."
    )
    parser_collect.add_argument(
        "-wd", "--walk-directory", action="store", help="List the contents of a directory on the sensor."
    )
    parser_collect.add_argument("--drives", action="store_true", help="Get logical drives on this sensor.")
    parser_collect.add_argument(
        "--memdump", action="store_true", help="Use Cb to dump sensor memory and collect the memdump."
    )

    # live response remediation parser
    parser_remediate = lr_subparsers.add_parser(
        "remediate", help="Perform remdiation (delete/kill) actions on device/sensor."
    )
    parser_remediate.add_argument(
        "-f", "--delete-file-path", action="store", help="delete the file at this path on the sensor"
    )
    parser_remediate.add_argument(
        "-kpname", "--kill-process-name", action="store", help="kill all processes with this name"
    )
    parser_remediate.add_argument("-kpid", "--kill-process-id", action="store", help="kill the process with this ID")
    parser_remediate.add_argument("-drv", "--delete-regkeyvalue", action="store", help="Delete the regkey value.")
    parser_remediate.add_argument(
        "--delete-entire-regkey", action="store", help="Delete the registry key and all values. BE CAREFUL."
    )
    parser_remediate.add_argument("-rs", "--remediation-script", action="store", help="Path to a remediaiton script.")
    parser_remediate.add_argument("--write-template", action="store_true", help="write a remediation template.")

    # session parser - NOTE: functionality is limited on the PSC side, and it's specifically annoying that
    # we can not get a list of active psc lr sessions... or at least I haven't figure out how to do that.
    parser_session = subparsers.add_parser("session", help="Interact with Cb live response server sessions.")
    if configured_environments["response"]:
        parser_session.add_argument(
            "-lss",
            "--list-sensor-sessions",
            action="store",
            help="list all CbLR sessions associated to this sensor ID (Response only).",
        )
    parser_session.add_argument(
        "-gsc", "--get-session-command-list", action="store", help="list commands associated to this session"
    )
    if configured_environments["response"]:
        parser_session.add_argument(
            "-a", "--list-all-sessions", action="store_true", help="list all CbLR sessions (Response only)."
        )
    parser_session.add_argument("-g", "--get-session", action="store", help="get live response session by id.")
    parser_session.add_argument("-c", "--close-session", action="store", help="close live response session by id.")
    parser_session.add_argument(
        "-gcr", "--get-command-result", action="store", help="get any results for this command."
    )
    parser_session.add_argument(
        "-f", "--get-file-content", action="store", help="byte stream any file content to stdout. (use a pipe)"
    )

    # enumeration parser
    parser_enumeration = subparsers.add_parser(
        "enumerate", aliases=["e"], help="Data enumerations for answering common questions."
    )
    parser_enumeration.add_argument(
        "-lh",
        "--logon-history",
        action="store",
        help="Given process username or device name, roughly enumerate logon history (Windows OS).",
    )

    # only add independent product args if product is a configured option
    if configured_environments["response"]:
        add_response_arguments_to_parser(subparsers)
    if configured_environments["psc"]:
        add_psc_arguments_to_parser(subparsers)

    argcomplete.autocomplete(parser)
    args = parser.parse_args()

    if args.debug:
        logging.getLogger("urllib3.connectionpool").setLevel(logging.INFO)
        coloredlogs.install(level="DEBUG", logger=logging.getLogger())

    if args.time_zone:
        set_timezone(args.time_zone)

    if args.set_default_timezone:
        set_timezone(args.set_default_timezone)
        save_configuration()

    if args.set_default_environment:
        product, profile = args.set_default_environment.split(":", 1)
        set_default_cbapi_product(product)
        set_default_cbapi_profile(profile)
        save_configuration()

    # Functionality that doesn't require a Cb connection.
    if args.command and (args.command.lower() == "lr" or args.command.lower().startswith("live")):
        if args.live_response_command and (
            args.live_response_command.startswith("play") or args.live_response_command == "pb"
        ):
            if args.list_playbooks:
                print(f"\nConfigured Playbooks:")
                for pb_key, pb_metadata in playbook_map.items():
                    print(f"\t{pb_metadata['name']} : {pb_metadata['description']}")
                print()
                return True
            if args.write_template:
                template_path = write_playbook_template()
                if os.path.exists(template_path):
                    LOGGER.info(f" + wrote {template_path}")
                return True
        if args.live_response_command and args.live_response_command.startswith("r"):
            if args.write_template:
                template_path = write_remediation_template()
                if os.path.exists(template_path):
                    LOGGER.info(f" + wrote {template_path}")
                return True

    # Connect and execute
    product, profile = args.environment.split(":", 1)
    LOGGER.debug(f"using '{profile}' profile via the configured '{product}' product.")
    try:
        if product == "response":
            cb = CbResponseAPI(profile=profile)
            execute_response_arguments(cb, args)

        elif product == "psc":
            cb = CbThreatHunterAPI(profile=profile)
            execute_threathunter_arguments(cb, args)
    except ConnectionError as e:
        LOGGER.critical(f"Couldn't connect to {product} {profile}: {e}")
    except UnauthorizedError as e:
        LOGGER.critical(f"{e}")
    except ServerError as e:
        LOGGER.critical(f"CB ServerError 😒 (try again) : {e}")
    except TimeoutError as e:
        LOGGER.critical(f"TimeoutError waiting for CB server 🙄 (try again) : {e}")
Пример #31
0
class CbTaxiiFeedConverter(object):
    """
    Class to convert TAXII feeds into EDR feeds.
    """

    def __init__(self, config_file_path: str, debug_mode: bool = False, import_dir: str = '',
                 export_dir: Optional[str] = None, strict_mode: bool = False):
        """
        Parse config file and save off the information we need.

        NOTE: At present, import path is unused

        :param config_file_path: configuration file location
        :param debug_mode: If True, operate in debug mode
        :param import_dir: feed import directory
        :param export_dir: export directory (optional)
        :param strict_mode: It True, be harsher wit config
        """
        try:
            config_dict = parse_config(config_file_path, strict_mode=strict_mode)
        except TaxiiConfigurationException as err:
            _logger.error(f"{err}", exc_info=False)
            sys.exit(-1)

        if debug_mode:
            _logger.debug(f"Config: {config_dict}")

        self.server_url = config_dict.get('server_url', 'https://127.0.0.1')
        self.api_token = config_dict.get('api_token', '')
        self.sites = config_dict.get('sites', [])
        self.debug = config_dict.get('debug', False)
        self.export_dir = export_dir
        self.import_dir = import_dir
        self.integration_name = 'Cb Taxii Connector 1.6.5'

        self.http_proxy_url = config_dict.get('http_proxy_url', None)
        self.https_proxy_url = config_dict.get('https_proxy_url', None)

        # if exporting, make sure the directory exists
        if self.export_dir and not os.path.exists(self.export_dir):
            os.mkdir(self.export_dir)

        # Test Cb Response connectivity
        try:
            self.cb = CbResponseAPI(url=self.server_url, token=self.api_token,
                                    ssl_verify=False, integration_name=self.integration_name)
            self.cb.info()
        except Exception as err:
            _logger.error(f"Failed to make connection: {err}", exc_info=True)
            sys.exit(-1)

    @staticmethod
    def write_to_temp_file(message: AnyStr) -> Tuple[tempfile.NamedTemporaryFile, str]:
        """
        Write text to a temp file for later use.

        :param message: text to be saved
        :return: Tuple of (NamedTemporaryFile, tempfile name)
        """
        temp_file = tempfile.NamedTemporaryFile()
        temp_file.write(message)
        temp_file.flush()
        return temp_file, temp_file.name

    # NOTE: currently unused; retained for future need
    # noinspection PyUnusedFunction
    def read_from_xml(self) -> List[str]:
        """
        Walk the import dir and return all filenames.  We are assuming all xml files.

        :return: List of filenames
        """
        the_list = []
        if self.import_dir is None:  # possible if input dir is not specified
            _logger.warning("Input directory was not specified -- skipping xml read")
            return the_list

        for (dirpath, dirnames, filenames) in os.walk(self.import_dir):
            the_list.extend(filenames)
            break
        return the_list

    def export_xml(self, feed_name: str, start_time: str, end_time: str, block_num: int, message: AnyStr) -> None:
        """
        :param feed_name: name of the feed, for the holding directory name
        :param start_time: start time
        :param end_time: end time
        :param block_num: write block number (for uniqueness)
        :param message: feed text
        """
        # create a directory to store all content blocks
        dir_name = f"{feed_name}".replace(' ', '_')
        full_dir_name = os.path.join(self.export_dir, dir_name)

        # Make sure the directory exists
        if not os.path.exists(os.path.join(self.export_dir, dir_name)):
            os.mkdir(full_dir_name)

        # Actually write the file
        file_name = f"{start_time}-{end_time}-{block_num}".replace(' ', "_")
        full_file_name = os.path.join(full_dir_name, file_name)

        with open(full_file_name, 'wb') as file_handle:
            file_handle.write(message)

    def _import_collection(self, client: Union[Client10, Client11], site: dict, collection: CabbyCollection,
                           data_set: bool = False) -> int:
        """
        Import a taxii client collectio into a feed.

        :param client: Taxii spec client v1.0 or v1.1
        :param site: site definition
        :param collection: cabby collection
        :param data_set: True if DATA_SET, False otherwise
        :return: the EDR feed id, or -1 if not available
        """
        global BINDING_CHOICES

        collection_name = collection.name
        display_name = f"{site.get('site')} {collection_name}"
        sanitized_feed_name = cleanup_string(site.get('site') + collection_name)
        feed_summary = display_name
        available = collection.available
        collection_type = collection.type
        default_score = site.get('default_score')
        _logger.info(f"Working on SITE {site.get('site')}, NAME {collection_name}, FEED {sanitized_feed_name}, "
                     f"AVAIL {available}, TYPE {collection_type}")
        _logger.info('-' * 80)

        # if not available, nothing else to do
        if not available:
            return -1

        # Sanity check on start date; provide a bare minimum
        start_date_str = site.get('start_date')
        if not start_date_str or len(start_date_str) == 0:
            start_date_str = "2019-01-01 00:00:00"

        # Create a feed helper object
        feed_helper = FeedHelper(site.get('output_path'), sanitized_feed_name, site.get('minutes_to_advance'),
                                 start_date_str, reset_start_date=site.get('reset_start_date', False))

        if not data_set:
            _logger.info("Feed start time %s" % feed_helper.start_date)

        #
        # Build up the URI for polling
        #

        if not site.get('poll_path', ''):
            uri: Optional[str] = None
        else:
            uri: str = ''
            if site.get('use_https'):
                uri += 'https://'
            else:
                uri += 'http://'

            uri += site.get('site')
            uri += site.get('poll_path')
            _logger.info(f'Poll path: {uri}')

        # build up all the reports for the feed
        reports: List[Dict[str, Any]] = []
        while True:
            num_times_empty_content_blocks = 0
            try:
                try:
                    _logger.info(f"Polling Collection: {collection.name} ...")
                    content_blocks = client.poll(collection_name=collection.name, begin_date=feed_helper.start_date,
                                                 end_date=feed_helper.end_date, content_bindings=BINDING_CHOICES,
                                                 uri=uri)
                except Exception as e:
                    _logger.info(f"{e}")
                    content_blocks = []

                #
                # Iterate through all content_blocks
                #
                num_blocks = 0

                if not data_set:
                    _logger.info(f" ... start_date: {feed_helper.start_date}, end_date: {feed_helper.end_date}")
                for block in content_blocks:
                    _logger.debug(block.content)

                    #
                    # if in export mode then save off this content block
                    #
                    if self.export_dir:
                        self.export_xml(collection_name,
                                        feed_helper.start_date,
                                        feed_helper.end_date,
                                        num_blocks,
                                        block.content)

                    #
                    # This code accounts for a case found with ThreatCentral.io where the content is url encoded.
                    # etree.fromstring can parse this data.
                    #
                    try:
                        root = etree.fromstring(block.content)
                        content = root.find('.//{http://taxii.mitre.org/messages/taxii_xml_binding-1.1}Content')
                        if content is not None and len(content) == 0 and len(list(content)) == 0:
                            #
                            # Content has no children.  So lets make sure we parse the xml text for content and re-add
                            # it as valid XML so we can parse
                            #
                            new_stix_package = etree.fromstring(root.find(
                                "{http://taxii.mitre.org/messages/taxii_xml_binding-1.1}"
                                "Content_Block/{http://taxii.mitre.org/messages/taxii_xml_binding-1.1}Content").text)
                            content.append(new_stix_package)

                        #
                        # Since we modified the xml, we need create a new xml message string to parse
                        #
                        message = etree.tostring(root)

                        #
                        # Write the content block to disk so we can parse with python stix
                        #
                        file_handle, file_path = self.write_to_temp_file(message)

                        #
                        # Parse STIX data
                        #
                        stix_package = STIXPackage.from_xml(file_path)

                        #
                        # if it is a DATA_SET make feed_summary from the stix_header description
                        # NOTE: this is for RecordedFuture, also note that we only do this for data_sets.
                        #       to date I have only seen RecordedFuture use data_sets
                        #
                        if data_set and stix_package.stix_header and stix_package.stix_header.descriptions:
                            for desc in stix_package.stix_header.descriptions:
                                feed_summary = f"{desc.value}: {collection_name}"
                                break

                        #
                        # Get the timestamp of the STIX Package so we can use this in our feed
                        #
                        timestamp = dt_to_seconds(stix_package.timestamp)

                        # check for empty content in this block; we break out after 10 empty blocks
                        if not stix_package.indicators and not stix_package.observables:
                            num_times_empty_content_blocks += 1
                            if num_times_empty_content_blocks > 10:
                                break

                        # Go through all STIX indicators
                        if stix_package.indicators:
                            for indicator in stix_package.indicators:

                                if not indicator or not indicator.observable:
                                    continue

                                if indicator.confidence:
                                    if str(indicator.confidence.value).isdigit():
                                        #
                                        # Get the confidence score and use it for our score
                                        #
                                        score = int(indicator.confidence.to_dict().get("value", default_score))
                                    else:
                                        if str(indicator.confidence.value).lower() == "high":
                                            score = 75
                                        elif str(indicator.confidence.value).lower() == "medium":
                                            score = 50
                                        elif str(indicator.confidence.value).lower() == "low":
                                            score = 25
                                        else:
                                            score = default_score
                                else:
                                    score = default_score

                                if not indicator.timestamp:
                                    timestamp = 0
                                else:
                                    timestamp = int((indicator.timestamp - EPOCH).total_seconds())

                                # Cybox observable returns a list
                                reports.extend(cybox_parse_observable(indicator.observable, indicator, timestamp,
                                                                      score))

                        #
                        # Now lets find some data.  Iterate through all observables and parse
                        #
                        if stix_package.observables:
                            for observable in stix_package.observables:
                                if not observable:
                                    continue

                                # Cybox observable returns a list
                                reports.extend(cybox_parse_observable(observable, None, timestamp, default_score))

                        #
                        # Delete our temporary file
                        #
                        file_handle.close()

                        # increase block count
                        num_blocks += 1
                    except Exception as e:
                        _logger.info(f"{e}")
                        continue

                _logger.info(f"content blocks read: {num_blocks}")
                _logger.info(f"current number of reports: {len(reports)}")

                if len(reports) > site.get('reports_limit'):
                    _logger.info(f"We have reached the reports limit of {site.get('reports_limit')}")
                    break
            except Exception as e:
                _logger.info(f"{e}")

            # If it is just a data_set, the data is unordered, so we can just break out of the while loop
            if data_set:
                break

            if feed_helper.advance():
                continue
            else:
                break

        _logger.info(f"Found {len(reports)} new reports.")

        if not data_set:
            # We only want to concatenate if we are NOT a data set, otherwise we want to refresh all the reports
            _logger.info("Adding existing reports...")
            reports = feed_helper.load_existing_feed_data() + reports

        _logger.info(f"Total number of reports: {len(reports)}")

        if site.get('reports_limit') < len(reports):
            _logger.info("Truncating reports to length {0}".format(site.get('reports_limit')))
            reports = reports[:site.get('reports_limit')]

        try:
            use_icon = site.get('icon_link')
            if not os.path.exists(use_icon):
                _logger.warning(f"Unable to find feed icon at path {use_icon}")
                use_icon = None
            data = build_feed_data(sanitized_feed_name,
                                   display_name,
                                   feed_summary,
                                   site.get('site'),
                                   use_icon,
                                   reports)
        except Exception as err:
            _logger.warning(f"Failed to create feed data for {sanitized_feed_name}: {err}")
            return -1

        if feed_helper.write_feed(data):
            feed_helper.save_details()

        #
        # Create Cb Response Feed if necessary
        #

        feed_id = None

        try:
            feeds = get_object_by_name_or_id(self.cb, Feed, name=sanitized_feed_name)

            if not feeds:
                _logger.info(f"Feed {sanitized_feed_name} was not found, so we are going to create it...")

            elif len(feeds) > 1:
                _logger.warning(f"Multiple feeds found, selecting Feed id {feeds[0].id}")
                feed_id = feeds[0].id

            elif feeds:
                feed_id = feeds[0].id
                _logger.info(f"Feed `{sanitized_feed_name}` was found as Feed ID {feed_id}")

        except Exception as e:
            _logger.info(f"{e}")

        if not feed_id:
            _logger.info(f" ... creating {sanitized_feed_name} feed for the first time")

            f = self.cb.create(Feed)
            f.feed_url = "file://" + feed_helper.path
            f.enabled = site.get('feeds_enable')
            f.use_proxy = False
            f.validate_server_cert = False
            try:
                f.save()
            except ServerError as se:
                if se.error_code == 500:
                    _logger.warning(" ... could not add feed:")
                    _logger.warning("   Received error code 500 from server. This is usually because "
                                    "the server cannot retrieve the feed.")
                    _logger.warning("   Check to ensure the Cb server has network connectivity "
                                    "and the credentials are correct.")
                    _logger.warning("!" * 80 + "\n")
                else:
                    info = feed_helper.dump_feedinfo()
                    _logger.info(f" ... Could not add feed: {se}\n >> {info}")
            except Exception as e:
                info = feed_helper.dump_feedinfo()
                _logger.warning(f" ... Could not add feed: {e}\n >> {info}")
                _logger.warning("!" * 80 + "\n")
            else:
                _logger.info(f"Feed data: {f}")
                _logger.info(f"Added feed. New feed ID is {f.id}")
                feed_id = f.id

        return feed_id

    def perform(self) -> None:
        """
        Perform the taxii hailing service.
        """
        for site in self.sites:
            client: Union[Client10, Client11] = create_client(site.get('site'),
                                                              use_https=site.get('use_https'),
                                                              discovery_path=site.get('discovery_path'))

            #
            # Set verify_ssl and ca_cert inside the client
            #
            client.set_auth(verify_ssl=site.get('ssl_verify'), ca_cert=site.get('ca_cert'))

            #
            # Proxy Settings
            #
            proxy_dict = dict()

            if self.http_proxy_url:
                _logger.info(f"Found HTTP Proxy: {self.http_proxy_url}")
                proxy_dict['http'] = self.http_proxy_url

            if self.https_proxy_url:
                _logger.info(f"Found HTTPS Proxy: {self.https_proxy_url}")
                proxy_dict['https'] = self.https_proxy_url

            if proxy_dict:
                client.set_proxies(proxy_dict)

            # If a username is supplied use basic authentication
            if site.get('username') or site.get('cert_file'):
                _logger.info("Found Username in config, using basic auth...")
                client.set_auth(username=site.get('username'),
                                password=site.get('password'),
                                verify_ssl=site.get('ssl_verify'),
                                ca_cert=site.get('ca_cert'),
                                cert_file=site.get('cert_file'),
                                key_file=site.get('key_file'))

            if not site.get('collection_management_path', ''):
                collections = client.get_collections()
            else:
                uri = ''
                if site.get('use_https'):
                    uri += 'https://'
                else:
                    uri += 'http://'

                uri += site.get('site')
                uri += site.get('collection_management_path')
                _logger.info('Collection Management Path: {}'.format(uri))

                collections: List[CabbyCollection] = client.get_collections(uri=uri)

            if len(collections) == 0:
                _logger.info('Unable to find any collections.  Exiting...')
                sys.exit(0)

            _logger.info("=" * 80)
            for collection in collections:
                _logger.info(f'Collection Name: {collection.name}, Collection Type: {collection.type}')
            _logger.info("=" * 80 + "\n")

            desired_collections = [x.strip() for x in site.get('collections').lower().split(',')]

            want_all = False
            if '*' in desired_collections:
                want_all = True

            for collection in collections:
                if collection.type != 'DATA_FEED' and collection.type != 'DATA_SET':
                    continue

                if collection.type == 'DATA_SET':
                    data_set = True
                else:
                    data_set = False

                if want_all or collection.name.lower() in desired_collections:
                    self._import_collection(client, site, collection, data_set)