def __init__(self, config_file_path, debug_mode=False, import_dir='', export_dir=''):

        #
        # parse config file and save off the information we need
        #
        config_dict = parse_config(config_file_path)

        self.server_url = config_dict.get('server_url', 'https://127.0.0.1')
        self.api_token = config_dict.get('api_token', '')
        self.sites = config_dict.get('sites', [])
        self.debug = config_dict.get('debug', False)
        self.export_dir = export_dir
        self.import_dir = import_dir
        self.integration_name = 'Cb Taxii Connector 1.6.5'

        self.http_proxy_url = config_dict.get('http_proxy_url', None)
        self.https_proxy_url = config_dict.get('https_proxy_url', None)

        if self.export_dir and not os.path.exists(self.export_dir):
            os.mkdir(self.export_dir)

        #
        # Test Cb Response connectivity
        #
        try:
            self.cb = CbResponseAPI(url=self.server_url,
                                    token=self.api_token,
                                    ssl_verify=False,
                                    integration_name=self.integration_name)
            self.cb.info()
        except:
            logger.error(traceback.format_exc())
            sys.exit(-1)
Exemple #2
0
    def start(self):
        """Starts the internal thread (see base class documentation) and
        start the connection to the CB server.
        """
        super().start()
        self._cb = CbResponseAPI(profile=self._profile_name)

        return self
Exemple #3
0
def CbSensor_search(profile, hostname):
    cb = CbResponseAPI(profile=profile)
    sensor = None
    logger.info("Getting the sensor object from carbonblack")
    try:
        result = make_sensor_query(cb, f"hostname:{hostname}")
        if len(result) == 1:
            return result[0]
        if isinstance(result[0], Sensor):
            print()
            logger.warn(
                "MoreThanOneResult searching for {0:s}".format(hostname))
            print("\nResult breakdown:")
            sensor_ids = []
            for s in result:
                sensor_ids.append(int(s.id))
                if int(s.id) == max(sensor_ids):
                    sensor = s
                print()
                print("Sensor object - {}".format(s.webui_link))
                print(
                    "-------------------------------------------------------------------------------\n"
                )
                print("\tos_environment_display_string: {}".format(
                    s.os_environment_display_string))
                print()
                print("\tstatus: {}".format(s.status))
                print("\tsensor_id: {}".format(s.id))
                print("\tlast_checkin_time: {}".format(s.last_checkin_time))
                print("\tnext_checkin_time: {}".format(s.next_checkin_time))
                print("\tsensor_health_message: {}".format(
                    s.sensor_health_message))
                print("\tsensor_health_status: {}".format(
                    s.sensor_health_status))
                print("\tnetwork_interfaces:")
            print()
            default_sid = max(sensor_ids)
            choice_string = "Which sensor do you want to use?\n"
            for sid in sensor_ids:
                choice_string += "\t- {}\n".format(sid)
            choice_string += "\nEnter one of the sensor ids above. Default: [{}]".format(
                default_sid)
            user_choice = int(input(choice_string) or default_sid)
            for s in result:
                if user_choice == int(s.id):
                    logger.info("Returning {} sensor".format(s))
                    return s
    except Exception as e:
        if sensor is None:
            logger.warning(
                "A sensor by hostname '{}' wasn't found in this environment".
                format(hostname))
            #return False
        logger.error("{}".format(str(e)))
        return False
Exemple #4
0
    def _dispatch(self, alert, descriptor):
        """Send ban hash command to CarbonBlack

        Publishing:
            There is currently no method to control carbonblack's behavior with publishers.

        Args:
            alert (Alert): Alert instance which triggered a rule
            descriptor (str): Output descriptor

        Returns:
            bool: True if alert was sent successfully, False otherwise
        """
        if not alert.context:
            LOGGER.error('[%s] Alert must contain context to run actions',
                         self.__service__)
            return False

        creds = self._load_creds(descriptor)
        if not creds:
            return False

        client = CbResponseAPI(**creds)
        carbonblack_context = alert.context.get('carbonblack', {})

        # Get md5 hash 'value' passed from the rules engine function
        action = carbonblack_context.get('action')
        if action == 'ban':
            binary_hash = carbonblack_context.get('value')
            # The binary should already exist in CarbonBlack
            binary = client.select(Binary, binary_hash)
            # Determine if the binary is currently listed as banned
            if binary.banned:
                # Determine if the banned action is enabled, if true exit
                if binary.banned.enabled:
                    return True
                # If the binary is banned and disabled, begin the banning hash operation
                banned_hash = client.select(BannedHash, binary_hash)
                banned_hash.enabled = True
                banned_hash.save()
            else:
                # Create a new BannedHash object to be saved
                banned_hash = client.create(BannedHash)
                # Begin the banning hash operation
                banned_hash.md5hash = binary.md5
                banned_hash.text = "Banned from StreamAlert"
                banned_hash.enabled = True
                banned_hash.save()

            return banned_hash.enabled is True
        else:
            LOGGER.error('[%s] Action not supported: %s', self.__service__,
                         action)
            return False
def fake_cb_response_api(monkeypatch):
    """Fake CbResponseAPI object."""
    def fake_info(self):
        server_info = {}
        with open(f"{HOME_PATH}/test_data/cb_response_server_info.json",
                  "r") as fp:
            server_info = json.load(fp)
        server_info["version"] = "5.0.0"
        return server_info

    monkeypatch.setattr(CbResponseAPI, "info", fake_info)
    return CbResponseAPI(url="https://fakehost", token="N/A", ssl_verify=False)
Exemple #6
0
def main():
	init()
	parser = argparse.ArgumentParser(description=banner)
	parser.add_argument("-u", "--url", dest="server_url", help="Cb Response Server URL [e.g. \"http://cb.example.com\"]")
	parser.add_argument("-t", "--token", dest="api_token", help="Cb Response API Token")
	parser.add_argument("-m", "--md5hash", dest="md5hash", help="MD5 hash of the binary to be banned")
	parser.add_argument("-n", "--notes", dest="text", help="Notes for banning the binary")
	parser.add_argument("-l", "--list", dest="list_file", help="List file of binaries to be banned. Also accepts csv files.")
	
	opts = parser.parse_args()
	note = "Banned from API"
	global cb
	
	if not opts.server_url and not opts.api_token:
		cb = CbResponseAPI(server_url, token=api_token, ssl_verify=False)
	else:
		cb = CbResponseAPI(opts.server_url, token=opts.api_token, ssl_verify=False)
	
	if opts.text:
		note = opts.text
	
	if opts.md5hash:
		ban_hash(opts.md5hash, note)
	elif opts.list_file:		
		hash_list = open(opts.list_file, 'rb')
		ban_text = "Banning {} hashes. Reading from list file.".format(len(lines))
		print(ban_text)
		root.info(ban_text)
		if os.path.splitext(hash_list.name)[1] == '.csv':
			csv_reader = csv.DictReader(hash_list)
			for h in csv_reader:
				ban_hash(h['md5'], h['Note'])
		else:
			lines = [line.rstrip('\n') for line in hash_list]
			for h in lines:
				ban_hash(h, note)
		hash_list.close()
	else:
		parser.parse_args(['-h'])
		print("Please provide either a hash value or a list of hashes")
def __init__():
    global root
    global script_message
    global cb
    global banner
    global path
    root = logging.getLogger("cbapi")
    logging.basicConfig(
        filename=datetime.now().strftime('dl_bin_%H_%M_%d_%m_%Y.log'),
        level=logging.INFO)
    #~ logging.basicConfig(level=logging.DEBUG)
    banner = "Script for downloading hashes v1"
    cb = CbResponseAPI()
    path = "files"
    if not os.path.exists(path):
        os.makedirs(path)
def init():
	global script_message
	global cb
	global banner
	global root
	root = logging.getLogger("cbapi")
	logging.basicConfig(filename=datetime.now().strftime('ban_hash_%H_%M_%d_%m_%Y.log'), level=logging.INFO)
	#~ global creds
	#~ global server_url
	#~ global token
	banner = "Script for banning hashes v1"
	#~ creds = CredentialStore("response").get_credentials()
	#~ server_url = creds['url']
	#~ token = creds['token']
	#~ cb = CbApi(server_url, token=token, ssl_verify=False)
	cb = CbResponseAPI()	
def cb_connect(server_url, api_token):
    message = "Connected to Cb Response server"
    global cb
    try:
        if server_url and api_token:
            if not (server_url.startswith("http")
                    or server_url.startswith("https")):
                raise InvalidApiTokenError(
                    sys.exc_info(), "Invalid server URL {}".format(server_url))
            elif re.match(r"([a-fA-F\d]{40})", api_token) is None:
                raise InvalidApiTokenError(
                    sys.exc_info(), "Invalid API Token {}".format(api_token))
            cb = CbResponseAPI(server_url, token=api_token, ssl_verify=False)
    except InvalidApiTokenError as iate:
        message = iate.message
    return message
Exemple #10
0
def listAlerts(q):
    cb = CbResponseAPI(profile=args.instance)
    alerts = cb.select(Alert).where('hostname:' + args.hostname + ' AND (' +
                                    q + ') AND created_time:[' + starttime +
                                    ' TO ' + endtime + ']')
    for alert in alerts:
        if 'binary' in alert.alert_type:
            print(
                "{0} - SCORE: \033[32m{1:d}\033[m - HOST: \033[32m{2:s}\033[m - \033[33mBINARY\033[m: {3:s} - REPORT: {4:s}"
                .format(alert.created_time, alert.report_score, alert.hostname,
                        alert.md5, alert.watchlist_name))
        else:
            print(
                "{0} - SCORE: \033[32m{1:d}\033[m - HOST: \033[32m{2:s}\033[m - \033[31mPROCESS\033[m: {3:s} - REPORT: {4:s}"
                .format(alert.created_time, alert.report_score, alert.hostname,
                        alert.process_name, alert.watchlist_name))
            print("\033[1;30;40m{0:s}\033[m".format(alert.process.webui_link))
Exemple #11
0
def search():
    
    global watchlist
    
    save_path = 'C:/Users/SMaiorino/Documents/My_Scripts/Master'
    f_name = os.path.join(save_path, 'Mal_Files_Found_{0}.csv'.format(watchlist))
    my_file = open(f_name,'w',newline='')
    writer = csv.writer(my_file)
    
    with open(os.path.join(save_path, 'vulnerable-names.csv'),'r') as n:
        names = n.readlines()

    with open(os.path.join(save_path, 'vulnerable-files.csv'), 'r') as f:
        files = f.readlines()
    
    print('\n---------------RESULTS---------------\n')
            
    for name in names:
        name = name.replace('\n', '')
        api = CbResponseAPI()
        try:
            sensor = api.select(Sensor).where('hostname:{0}'.format(name)).first()
            if sensor.status == 'Offline':
                writer.writerow([name, 'OFFLINE'])
                continue
            with sensor.lr_session() as session:
                for file in files:
                    file = file.replace('\n', '')
                    try:
                        test_file = session.get_file(r'{0}'.format(file))
                        if test_file is not None:
                            writer.writerow([name,file])
                            print('File: {0} \nComputer: {1} \n'.format(file,name))
                            continue               
                    except (TimeoutError, ObjectNotFoundError, LiveResponseError, 
                            ApiError, ServerError, AttributeError, TypeError):
                        pass
        except (TimeoutError):
            continue
        except (AttributeError):
            if sensor is None:
                continue
            break

        
    my_file.close() 
Exemple #12
0
    def __init__(self, config_file_path: str, debug_mode: bool = False, import_dir: str = '',
                 export_dir: Optional[str] = None, strict_mode: bool = False):
        """
        Parse config file and save off the information we need.

        NOTE: At present, import path is unused

        :param config_file_path: configuration file location
        :param debug_mode: If True, operate in debug mode
        :param import_dir: feed import directory
        :param export_dir: export directory (optional)
        :param strict_mode: It True, be harsher wit config
        """
        try:
            config_dict = parse_config(config_file_path, strict_mode=strict_mode)
        except TaxiiConfigurationException as err:
            _logger.error(f"{err}", exc_info=False)
            sys.exit(-1)

        if debug_mode:
            _logger.debug(f"Config: {config_dict}")

        self.server_url = config_dict.get('server_url', 'https://127.0.0.1')
        self.api_token = config_dict.get('api_token', '')
        self.sites = config_dict.get('sites', [])
        self.debug = config_dict.get('debug', False)
        self.export_dir = export_dir
        self.import_dir = import_dir
        self.integration_name = 'Cb Taxii Connector 1.6.5'

        self.http_proxy_url = config_dict.get('http_proxy_url', None)
        self.https_proxy_url = config_dict.get('https_proxy_url', None)

        # if exporting, make sure the directory exists
        if self.export_dir and not os.path.exists(self.export_dir):
            os.mkdir(self.export_dir)

        # Test Cb Response connectivity
        try:
            self.cb = CbResponseAPI(url=self.server_url, token=self.api_token,
                                    ssl_verify=False, integration_name=self.integration_name)
            self.cb.info()
        except Exception as err:
            _logger.error(f"Failed to make connection: {err}", exc_info=True)
            sys.exit(-1)
Exemple #13
0
def tempAll(q, instance, hits_bool):
    instance = instance.strip()
    tempCSV = []
    # Debug prints
    #print(instance)
    #print(q)
    cb = CbResponseAPI(profile=instance)
    query = cb.select(Process).where('hostname:' + args.hostname + ' AND (' +
                                     q + ') AND start:[' + starttime + ' TO ' +
                                     endtime +
                                     ']').sort("start asc").max_children(
                                         args.c)
    try:
        print(colorize(instance + " - Total hits: " + str(len(query)),
                       'green'))
    except OSError as e:
        print(e)
    finally:
        if hits_bool == True:
            sys.exit()
        else:
            sleep(3)
            for proc in query:
                print("{0} {1} {2} {3} {4} \n\033[1;30;40m{5}\033[m".format(
                    proc.start, instance, proc.hostname, proc.username,
                    proc.cmdline, proc.webui_link))
                # Show netconns switch
                if args.n is True:
                    # Iterate the CB netconns object
                    for conns in proc.netconns:
                        print("\033[32m{0}\033[m".format(conns))
                # Show child processes switch
                elif int(args.c) > 0:
                    # Iterate the child processes
                    proc.walk_children(visitor)
                elif args.csv is True:
                    tempCSV.append({
                        'proc.start': proc.start,
                        'proc.hostname': proc.hostname,
                        'proc.username': proc.username,
                        'proc.cmdline': proc.cmdline,
                        'proc.webui_link': proc.webui_link
                    })
            if tempCSV != []:
                outputCSV(instance, tempCSV)
Exemple #14
0
def cbResponse():
    # Set attributes for csv file
    save_path = 'C:/Users/SMaiorino/Documents/My_Scripts/Master/Computer Lists'
    f_name = os.path.join(save_path, 'List_Comps_Response.csv')
    file = open(f_name, 'w', newline='')
    f_write = csv.writer(file)
    #f_write.writerow(['NAME'])

    # Initialize API var and query parameters
    api = CbResponseAPI()
    query = "ip:172"
    sensor = api.select(Sensor).where(query)

    # Iterate through each object the sensor reads and
    # output the name of each workstation in response that
    # is currently installed.
    for obj in sensor:
        names = obj.hostname
        os_name = obj.os_environment_display_string
        status = obj.status
        uninstall = obj.uninstall
        uninstalled = obj.uninstalled
        group = obj.group_id
        lastComm = str(obj.last_checkin_time)[0:10]
        if not 'Server' in os_name and 'Windows' in os_name \
        and uninstall == False and uninstalled != True \
        and not 'Uninstall' in status and group != 12:
            f_write.writerow([names])

    file.close()

    # Re-open the file to sort the names in alphabetically
    # ascending order
    new_file = csv.reader(
        open(os.path.join(save_path, 'List_Comps_Response.csv')))
    sorted_file = sorted(new_file)

    # Re-write the sorted names into the file
    with open(os.path.join(save_path, 'List_Comps_Response.csv'),
              'w',
              newline='') as f:
        f_write = csv.writer(f)
        for row in sorted_file:
            f_write.writerow(row)
Exemple #15
0
def main():

    global watchlist, cb

    cb = CbResponseAPI()

    vuln_names = open("vulnerable-names-dupes.csv", 'w', newline='')
    write_names = csv.writer(vuln_names)

    vuln_files = open('vulnerable-files-dupes.csv', 'w', newline='')
    write_files = csv.writer(vuln_files)

    watchlist = input('Watchlist to Search Through: ')

    # NOTE: need to go into the Response Console and click
    # on the WatchList of interest - the watchlist ordinal
    # will appear in the URL field https://172.16.95.214:8443/#watchlist/190/?filterBy=all&sortBy=name
    # pass this ordinal as a command line parameter when invoking this script

    binary_query = cb.select(Binary).where("watchlist_{0}:*".format(watchlist))

    #find all instances of the binary watchlist hits including historical instances
    for binary in binary_query:
        for filename in binary.observed_filename:
            for endpoint in binary.endpoint:
                write_names.writerow([endpoint.split("|")[0]])
                write_files.writerow([filename])

    vuln_names.close()
    vuln_files.close()

    remove_dupes()

    os.remove('vulnerable-names-dupes.csv')
    os.remove('vulnerable-files-dupes.csv')

    #call search() to clean the list of files by verifying their presence or absence on the end point
    search()
Exemple #16
0
 def __init__(self, args):
     self.args = args
     self.cb_response_session = CbResponseAPI()
Exemple #17
0
def main():

    # configure logging #
    logging.basicConfig(
        level=logging.DEBUG,
        format='%(asctime)s - %(name)s - [%(levelname)s] %(message)s')
    coloredlogs.install(level='INFO', logger=logger)

    # load carbonblack environment profiles #
    configured_environments = load_configured_environments()
    environments = []
    # create human friendly options for the CLI
    for product, profiles in configured_environments.items():
        for profile in profiles:
            environments.append(f"{product}:{profile}")

    # chose the default environment
    default_product_name = get_default_cbapi_product()
    default_profile_name = get_default_cbapi_profile()
    default_environments = [
        env for env in environments if env.startswith(default_product_name)
    ]
    default_environment = f"{default_product_name}:{default_profile_name}"
    default_environment = (default_environment if default_environments
                           and default_environment in default_environments else
                           environments[0])

    # get the config items we need
    required_keys = ['client_installer', 'lerc_install_cmd']
    config = lerc_api.load_config(required_keys=required_keys)
    default_lerc_path = config['default'][
        'client_installer']  #'/opt/lerc/lercSetup.msi'
    lerc_environments = config['default']['environments'].split(
        ',') if 'environments' in config['default'] else ['default']

    parser = argparse.ArgumentParser(
        description="Use existing tools to install LERC")
    parser.add_argument("-d",
                        "--debug",
                        action="store_true",
                        help="Turn on debug logging.")
    parser.add_argument(
        '--lerc-env',
        choices=lerc_environments,
        default='default',
        help='Specify the LERC environment the client should belong in.')
    parser.add_argument(
        '--cbc-env',
        choices=environments,
        default=default_environment,
        help=
        f'Specify the Carbon Black environment to work with. default={default_environment}'
    )
    parser.add_argument('hostname',
                        help="the name of the host to deploy the client to")
    parser.add_argument('-p',
                        '--package',
                        default=default_lerc_path,
                        help="the msi lerc package to install")
    args = parser.parse_args()

    print(time.ctime() + "... starting")

    if args.debug:
        logging.getLogger("urllib3.connectionpool").setLevel(logging.INFO)
        coloredlogs.install(level="DEBUG", logger=logger)

    product, profile = args.cbc_env.split(":", 1)

    install_command = config[args.lerc_env]['lerc_install_cmd']

    device_or_sensor = None

    logger.debug(
        f"using '{profile}' profile via the configured '{product}' product.")
    cb = None
    try:
        if product == "response":
            cb = CbResponseAPI(profile=profile)
            device_or_sensor = CbSensor_search(profile, args.hostname)
        elif product == "psc" or product == "cbc":
            cb = CbThreatHunterAPI(profile=profile)
            logger.info(f"searching for device...")
            device_or_sensor = find_device_by_hostname(cb, args.hostname)
    except ConnectionError as e:
        logger.critical(f"Couldn't connect to {product} {profile}: {e}")
    except UnauthorizedError as e:
        logger.critical(f"{e}")
    except ServerError as e:
        logger.critical(f"CB ServerError 😒 (try again) : {e}")
    except TimeoutError as e:
        logger.critical(
            f"TimeoutError waiting for CB server 🙄 (try again) : {e}")

    if not device_or_sensor:
        logger.error("could not get device or sensor by hostname.")
        return

    result = deploy_lerc(device_or_sensor,
                         install_command,
                         default_lerc_path,
                         interactive=True)
    if result:
        print(result)
Exemple #18
0
def main():

    parser = argparse.ArgumentParser(
        description="SIP Indicator CbR Search and ACE Alert.")
    parser.add_argument('-d',
                        '--debug',
                        action="store_true",
                        help="set logging to DEBUG",
                        default=False)
    args = parser.parse_args()

    # load config
    config = ConfigParser()
    config.read('etc/config.ini')

    # load SIP indicator specs so we know how to get the indicators we want
    indicator_specs = {}
    with open(config['SIP']['indicator_specifications'], 'r') as stream:
        try:
            indicator_specs = yaml.safe_load(stream)
            logging.info(
                "Successfully loaded indicator specifications: {}".format(
                    indicator_specs))
        except yaml.YAMLError as e:
            logging.error("Couldn't load indicator specs : {}".format(e))
            return

    # Load ACE API
    ace_api.set_default_remote_host(config['ACE']['ace_address'])
    ace_api.set_default_ssl_ca_path(config['ACE']['ca_chain_path'])

    # Create SIP Client and load indicators
    sip_ssl = config['SIP'].getboolean('ssl_verify')
    sc = pysip.Client(config['SIP']['sip_address'],
                      config['SIP']['sip_api_key'],
                      verify=sip_ssl)
    status = indicator_specs[
        'status'] if 'status' in indicator_specs else 'Analyzed'
    indicators = {}
    for i_type in indicator_specs['type']:
        handle_proxy(config['SIP'])
        indicators[i_type] = sc.get('/indicators?type={}&status={}'.format(
            i_type, status))

    # load field mappings
    field_map = ConfigParser()
    field_map.read(config['GLOBAL']['field_mappings'])
    sip_cbr_map = field_map['SIP-TO-CBR']
    sip_ace_map = field_map['SIP-TO-ACE']
    cbr_ace_map = field_map['CBR-TO-ACE']

    submitted_alerts = []

    # Query Carbon Black Response for our indicators
    #cbq = CBquery(profile=config['CbR']['profile'])
    handle_proxy(config['CbR'])
    cb = CbResponseAPI(profile=config['CbR']['profile'])
    for i_type in indicator_specs['type']:
        for i in indicators[i_type]:
            query = '{}:"{}"'.format(sip_cbr_map[i_type], i['value'])
            logging.debug('Querying CbR for indicator:{} query:{}'.format(
                i['id'], query))
            procs = cb.select(Process).where(query).group_by('id')
            if procs:
                # alert ACE
                Alert = ace_api.Analysis(description='CbR - SIP:{}'.format(
                    i['value']),
                                         analysis_mode='correlation',
                                         tool='SipCbrAce')
                print(Alert.description)
                Alert.add_indicator(i['id'])
                # get sip tags and tag Alert
                handle_proxy(config['SIP'])
                i_details = sc.get('/indicators/{}'.format(i['id']))
                handle_proxy(config['CbR'])
                for tag in i_details['tags']:
                    Alert.add_tag(tag)
                alert_details = {}
                alert_details['total_results'] = len(procs)
                max_results = config['GLOBAL'].getint('alert_max_results')
                alert_details['included_results'] = 0
                alert_details['process_details'] = []
                for proc in procs:
                    if alert_details['included_results'] > max_results:
                        break
                    alert_details['process_details'].append(str(proc))
                    alert_details['included_results'] += 1
                    Alert.add_hostname(proc.hostname)
                    Alert.add_md5(proc.process_md5)
                    Alert.add_ipv4(proc.comms_ip)
                    Alert.add_ipv4(proc.interface_ip)
                    Alert.add_process_guid(proc.id)
                    Alert.add_user(proc.username)
                    Alert.add_file_name(proc.process_name)
                    Alert.add_file_path(proc.path)
                    #Alert.add_file_location('{}@{}'.format(proc.hostname, proc.path))
                #Alert.submit_kwargs['details'] = alert_details
                handle_proxy(config['ACE'])
                print(Alert.description)
                submitted_alerts.append(Alert.submit())
                logger.info(
                    "Submitted alert to ACE: {UUID} - URL=https://{HOST}/ace/analysis?direct={UUID}"
                    .format(UUID=Alert.uuid, HOST=Alert.remote_host))

    print(submitted_alerts)
    def __init__(self,
                 access_id="",
                 secret_key="",
                 default_org="",
                 base_url="",
                 out_file="tc.json",
                 sources="",
                 ioc_types="",
                 custom_ioc_key="",
                 feed_url="",
                 cb_server_token="",
                 cb_server_url="https://127.0.0.1",
                 cb_server_ssl_verify=False,
                 ioc_min=None,
                 niceness=None,
                 debug=False,
                 log_file=None,
                 max_iocs=5000):

        logger.info("ThreatConnect Base URL: {0}".format(base_url))

        self.tcapi = ThreatConnect(api_aid=access_id,
                                   api_sec=secret_key,
                                   api_url=base_url,
                                   api_org=default_org)

        self.sources = sources

        self.ioc_min = ioc_min

        self.ioc_types = ioc_types

        logger.info("Configured IOC Types are : {0}".format(self.ioc_types))
        logger.info("Configured IOC Min is  : {0}".format(self.ioc_min))

        self.custom_ioc_key = custom_ioc_key

        self.max_iocs = max_iocs

        if self.sources[0] == "*":
            owners = self.tcapi.owners()
            try:
                # retrieve the Owners
                owners.retrieve()
            except RuntimeError as e:
                logger.error(traceback.format_exc())
                sys.exit(1)
            # iterate through the Owners
            self.sources = [owner.name for owner in owners]

        logger.info("Sources = {0}".format(self.sources))

        self.niceness = niceness
        if self.niceness is not None:
            os.nice(self.niceness)

        self.debug = debug
        if self.debug:
            logger.setLevel(logging.DEBUG)

        self.log_file = log_file

        self.out_file = out_file

        self.feed = None

        self.cb = CbResponseAPI(url=cb_server_url,
                                token=cb_server_token,
                                ssl_verify=cb_server_ssl_verify)

        self.feed_url = feed_url
Exemple #20
0
def search():

    global watchlist, bin_file, api

    save_path = 'C:/Users/SMaiorino/Documents/My_Scripts/Master'
    f_name = os.path.join(save_path,
                          'Mal_Files_Found_{0}.csv'.format(watchlist))
    my_file = open(f_name, 'w', newline='')
    writer = csv.writer(my_file)

    # Retrieve the necessary filenames and workstations by reading each csv file
    with open(os.path.join(save_path, 'vulnerable-names.csv'), 'r') as n:
        names = n.readlines()

    with open(os.path.join(save_path, 'vulnerable-files.csv'), 'r') as f:
        files = f.readlines()

    print('\n---------------RESULTS---------------\n')

    for name in names:
        name = name.replace('\n', '')
        api = CbResponseAPI()
        try:
            sensor = api.select(Sensor).where(
                'hostname:{0}'.format(name)).first()

            # Record Workstations with an Offline Sensor
            if sensor.status == 'Offline':
                writer.writerow([name, 'OFFLINE'])
                continue

#            We use String manipulation for proper communication between the live response session and the
#            format of each file / workstation.
#            We only use 'bin_file' in the copy_encrypt_binary() function below.
#
#            We then Copy the contents of the file on the current end point to a specified folder to a quarantee folder.
#            The file is then deleted from the current end point and we write the contents to a csv as:
#
#            HOSTNAME            FILE FOUND ON COMPUTER (or OFFLINE)
#
#            We then return to the top of loop and move on to the next end point in our list.

# Start the live response session
            with sensor.lr_session() as session:
                for file in files:
                    bin_file = file.replace('\\', '\\\\')
                    file = file.replace('\n', '')
                    try:
                        # Obtain the contents of each file
                        test_file = session.get_file(r'{0}'.format(file))
                        if test_file is not None:
                            copy_encrypt_binary()
                            session.delete_file(r'{0}'.format(file))
                            writer.writerow([name, file])
                            print('File: {0} \nComputer: {1} \n'.format(
                                file, name))
                            continue
                    except (TimeoutError, ObjectNotFoundError,
                            LiveResponseError, ApiError, ServerError,
                            AttributeError, TypeError):
                        pass
        except (TimeoutError):
            continue
        except (AttributeError):
            if sensor is None:
                continue
            break

    my_file.close()
    def validate_config(self):
        config_valid = True

        if self.validated_config:
            return True

        self.validated_config = True
        logger.debug("Validating configuration file ...")

        if 'bridge' in self.options:
            self.bridge_options = self.options['bridge']
        else:
            sys.stderr.write(
                "Configuration does not contain a [bridge] section\n")
            logger.error("Configuration does not contain a [bridge] section")
            return False

        self.debug = self.bridge_options.get(
            'debug', 'F') in ['1', 't', 'T', 'True', 'true']
        log_level = self.bridge_options.get('log_level', 'INFO').upper()
        log_level = log_level if log_level in [
            "INFO", "WARNING", "DEBUG", "ERROR"
        ] else "INFO"
        self.logger.setLevel(
            logging.DEBUG if self.debug else logging.getLevelName(log_level))

        tc_options = self.options.get('threatconnect', {})
        if not tc_options:
            sys.stderr.write(
                "Configuration does not contain a [threatconnect] section or section is empty.\n"
            )
            logger.error(
                "configuration does not contain a [threatconnect] section or section is empty."
            )
            return False
        try:
            self.tc_config = ThreatConnectConfig(**tc_options)
        except Exception as e:
            sys.stderr.write("Error: {0}\n".format(e))
            logger.error(e)
            return False

        self.pretty_print_json = self.bridge_options.get(
            'pretty_print_json', 'F') in ['1', 't', 'T', 'True', 'true']

        ca_file = os.environ.get("REQUESTS_CA_BUNDLE", None)
        if ca_file:
            logger.info("Using CA Cert file: {0}".format(ca_file))
        else:
            logger.info("No CA Cert file found.")

        opts = self.bridge_options
        msgs = []

        item = 'listener_port'
        if not (item in opts and opts[item].isdigit()
                and 0 < int(opts[item]) <= 65535):
            msgs.append(
                'the config option listener_port is required and must be a valid port number'
            )
            config_valid = False
        else:
            opts[item] = int(opts[item])

        item = 'listener_address'
        if not (item in opts and opts[item]):
            msgs.append(
                'the config option listener_address is required and cannot be empty'
            )
            config_valid = False

        item = 'feed_retrieval_minutes'
        if not (item in opts and opts[item].isdigit() and 0 < int(opts[item])):
            msgs.append(
                'the config option feed_retrieval_minutes is required and must be greater than 1'
            )
            config_valid = False
        else:
            opts[item] = int(opts[item])

        # Create a cbapi instance
        server_url = self.get_config_string("carbonblack_server_url",
                                            "https://127.0.0.1")
        server_token = self.get_config_string("carbonblack_server_token", "")
        try:
            self.cb = CbResponseAPI(url=server_url,
                                    token=server_token,
                                    ssl_verify=False,
                                    integration_name=self.integration_name)
            self.cb.info()
        except Exception:
            logger.error(traceback.format_exc())
            return False

        if not config_valid:
            for msg in msgs:
                sys.stderr.write("%s\n" % msg)
                logger.error(msg)
            return False
        else:
            return True
def main():
    init()
    parser = argparse.ArgumentParser(description=banner)
    parser.add_argument(
        "-u",
        "--url",
        dest="server_url",
        help="Cb Response Server URL [e.g. \"http://cb.example.com\"]")
    parser.add_argument("-t",
                        "--token",
                        dest="api_token",
                        help="Cb Response API Token")
    parser.add_argument("-m",
                        "--md5hash",
                        dest="md5hash",
                        help="MD5 hash of the binary to be banned")
    parser.add_argument("-n",
                        "--notes",
                        dest="text",
                        help="Notes for banning the binary")
    parser.add_argument(
        "-l",
        "--list",
        dest="list_file",
        help="List file of binaries to be banned. Also accepts csv files.")
    parser.add_argument("-x",
                        "--export",
                        help="Export ban list to csv",
                        action='store_true')

    opts = parser.parse_args()
    note = "Banned from API"
    global cb

    try:
        #~ cb = CbResponseAPI(server_url, token=api_token, ssl_verify=False)
        if not opts.server_url or (not opts.server_url.startswith("http://")
                                   and
                                   not opts.server_url.startswith("https://")):
            raise InvalidApiTokenError(
                sys.exc_info(),
                "Invalid server URL {}".format(opts.server_url))
        elif not opts.api_token or (re.match(r"([a-fA-F\d]{40})",
                                             opts.api_token) is None):
            raise InvalidApiTokenError(
                sys.exc_info(), "Invalid API Token {}".format(opts.api_token))
        else:
            cb = CbResponseAPI(opts.server_url,
                               token=opts.api_token,
                               ssl_verify=False)
    except InvalidApiTokenError as iate:
        root.exception(iate)
        sys.exit(iate.exit_code)

    if opts.export:
        export_mode_msg = "Export mode. Fetching banned list from {}".format(
            opts.server_url)
        print(export_mode_msg)
        root.info(export_mode_msg)
        export_to_csv()
        sys.exit()

    if opts.text:
        note = opts.text

    if opts.md5hash:
        single_ban_mode_msg = "Single hash ban mode."
        print(single_ban_mode_msg)
        root.info(single_ban_mode_msg)
        ban_hash(opts.md5hash, note)
    elif opts.list_file:
        list_ban_mode = "Multiple hash ban mode. Reading list file"
        print(list_ban_mode)
        root.info(list_ban_mode)
        hash_list = open(opts.list_file, 'rb')
        ban_text = "Banning {} hashes. Reading from list file.".format(
            len(lines))
        print(ban_text)
        root.info(ban_text)
        if os.path.splitext(hash_list.name)[1] == '.csv':
            csv_reader = csv.DictReader(hash_list)
            found_msg = "Found {0} hashes in {1}".format(
                len(csv_reader), hash_list.name)
            print(found_msg)
            root.info(found_msg)
            for h in csv_reader:
                ban_hash(h['md5'], h['Note'])
        else:
            lines = [line.rstrip('\n') for line in hash_list]
            found_msg = "Found {0} hashes in {1}".format(
                len(lines), hash_list.name)
            print(found_msg)
            root.info(found_msg)
            for h in lines:
                ban_hash(h, note)
        hash_list.close()
        if md5_error_found:
            sys.exit(100)
    else:
        parser.parse_args(['-h'])
Exemple #23
0
    def validate_config(self):
        self.validated_config = True
        logger.info("Validating configuration file ...")

        if 'bridge' in self.options:
            self.bridge_options = self.options['bridge']
        else:
            logger.error("Configuration does not contain a [bridge] section")
            return False

        if 'auth' in self.options:
            self.bridge_auth = self.options['auth']
        else:
            logger.error("configuration does not contain a [auth] section")
            return False

        if 'sources' in self.options:
            self.api_urns = self.options["sources"]
        else:
            logger.error("configuration does not contain a [sources] section")
            return False

        opts = self.bridge_options
        auth = self.bridge_auth
        config_valid = True
        msgs = []

        if len(self.api_urns) <= 0:
            msgs.append('No data sources are configured under [sources]')
            config_valid = False

        item = 'listener_port'
        if not (item in opts and opts[item].isdigit()
                and 0 < int(opts[item]) <= 65535):
            msgs.append(
                'the config option listener_port is required and must be a valid port number'
            )
            config_valid = False
        else:
            opts[item] = int(opts[item])

        item = 'listener_address'
        if not (item in opts and opts[item] is not ""):
            msgs.append(
                'the config option listener_address is required and cannot be empty'
            )
            config_valid = False

        item = 'feed_retrieval_minutes'
        if not (item in opts and opts[item].isdigit() and 0 < int(opts[item])):
            msgs.append(
                'the config option feed_retrieval_minutes is required and must be greater than 1'
            )
            config_valid = False
        else:
            opts[item] = int(opts[item])

        item = 'ioc_min_score'
        if item in opts:
            if not (opts[item].isdigit() and 0 <= int(opts[item]) <= 100):
                msgs.append(
                    'The config option ioc_min_score must be a number in the range 0 - 100'
                )
                config_valid = False
            else:
                opts[item] = int(opts[item])
        else:
            logger.warning("No value provided for ioc_min_score. Using 1")
            opts[item] = 1

        item = 'api_key'
        if not (item in auth and auth[item].isdigit()):
            msgs.append(
                'The config option api_key is required under section [auth] and must be a numeric value'
            )
            config_valid = False

        item = 'url'
        if not (item in auth and auth[item] is not ""):
            msgs.append(
                'The config option url is required under section [auth] and cannot be blank'
            )
            config_valid = False

        if 'secret_key_encrypted' in auth and 'secret_key' not in auth:
            msgs.append(
                "Encrypted API secret key no longer supported. Use unencrypted 'secret_key' form."
            )
            config_valid = False
        elif 'secret_key' in auth and auth['secret_key'] != "":
            auth['api_secret_key'] = self.bridge_auth.get("secret_key")
        else:
            msgs.append(
                'The config option secret_key under section [auth] must be provided'
            )
            config_valid = False

        # Convert all 1 or 0 values to true/false
        opts["ignore_ioc_md5"] = opts.get("disable_ioc_md5", "0") == "1"
        opts["ignore_ioc_ip"] = opts.get("disable_ioc_ip", "0") == "1"
        opts["ignore_ioc_host"] = opts.get("disable_ioc_host", "0") == "1"

        # create a cbapi instance
        ssl_verify = self.get_config_boolean("carbonblack_server_sslverify",
                                             False)
        server_url = self.get_config_string("carbonblack_server_url",
                                            "https://127.0.0.1")
        server_token = self.get_config_string("carbonblack_server_token", "")
        try:
            self.cb = CbResponseAPI(url=server_url,
                                    token=server_token,
                                    ssl_verify=False,
                                    integration_name=self.integration_name)
            self.cb.info()
        except:
            logger.error(traceback.format_exc())
            return False

        if not config_valid:
            for msg in msgs:
                sys.stderr.write("%s\n" % msg)
                logger.error(msg)
            return False
        else:
            return True
Exemple #24
0
def main():
    """Main entry point for cbinterface."""

    # configure logging #
    logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - [%(levelname)s] %(message)s")
    coloredlogs.install(level="INFO", logger=logging.getLogger())

    # set clean exit signal
    signal.signal(signal.SIGINT, clean_exit)

    # load carbonblack environment profiles #
    configured_environments = load_configured_environments()
    environments = []
    # create human friendly options for the CLI
    for product, profiles in configured_environments.items():
        for profile in profiles:
            environments.append(f"{product}:{profile}")

    # chose the default environment
    default_product_name = get_default_cbapi_product()
    default_profile_name = get_default_cbapi_profile()
    default_environments = [env for env in environments if env.startswith(default_product_name)]
    default_environment = f"{default_product_name}:{default_profile_name}"
    default_environment = (
        default_environment if default_environments and default_environment in default_environments else environments[0]
    )

    parser = argparse.ArgumentParser(description="Interface to Carbon Black for IDR teams.")
    parser.add_argument("-d", "--debug", action="store_true", help="Turn on debug logging.")
    parser.add_argument(
        "-e",
        "--environment",
        action="store",
        choices=environments,
        default=default_environment,
        help=f"specify an environment to work with. Default={default_environment}",
    )
    parser.add_argument(
        "-sde",
        "--set-default-environment",
        action="store",
        choices=environments,
        help="configure your default Cb environment",
    )
    parser.add_argument(
        "-tz",
        "--time-zone",
        action="store",
        help='specify the timezone to override defaults. ex. "US/Eastern" or "Europe/Rome"',
    )
    parser.add_argument(
        "--set-default-timezone",
        action="store",
        help='configure your default timezone. ex. "US/Eastern" or "Europe/Rome"',
    )

    subparsers = parser.add_subparsers(dest="command")

    # query parser
    parser_query = subparsers.add_parser(
        "query", aliases=["pq", "q"], help="Execute a process search query. 'query -h' for more"
    )
    parser_query.add_argument("query", help="the process search query you'd like to execute")
    parser_query.add_argument(
        "-s",
        "--start-time",
        action="store",
        help="Start time of the process.  Format:'Y-m-d H:M:S' UTC",
    )
    parser_query.add_argument(
        "-e",
        "--last-time",
        action="store",
        help="Narrow to processes with start times BEFORE this end/last time. Format:'Y-m-d H:M:S' UTC",
    )
    parser_query.add_argument(
        "-nw",
        "--no-warnings",
        action="store_true",
        default=False,
        help="Don't warn before printing large query results",
    )
    parser_query.add_argument(
        "-ad",
        "--all-details",
        action="store_true",
        default=False,
        help="Print all available process info (all fields).",
    )
    parser_query.add_argument(
        "--facets", action="store_true", default=None, help="Retrieve statistical facets for this query."
    )

    # process inspection/investigation parser
    parser_inspect = subparsers.add_parser(
        "investigate", aliases=["proc", "i"], help="Investigate process events and metadata."
    )
    parser_inspect.add_argument(
        "process_guid_options", help="the process GUID/segment to inspect. Segment is optional."
    )
    parser_inspect.add_argument(
        "-i", "--proc-info", dest="inspect_proc_info", action="store_true", help="show binary and process information"
    )
    parser_inspect.add_argument(
        "-w",
        "--walk-tree",
        dest="walk_and_inspect_tree",
        action="store_true",
        help="Recursively walk, print, and inspect the process tree. Specified arguments (ex. filemods) applied at every process in tree. WARNING: can pull large datasets.",
    )
    parser_inspect.add_argument(
        "-t",
        "--process-tree",
        dest="inspect_process_tree",
        action="store_true",
        help="print the process tree with this process as the root.",
    )
    parser_inspect.add_argument(
        "-a",
        "--process-ancestry",
        dest="inspect_process_ancestry",
        action="store_true",
        help="print the the process ancestry",
    )
    parser_inspect.add_argument(
        "-c",
        "--show-children",
        dest="inspect_children",
        action="store_true",
        help="only print process children event details",
    )
    parser_inspect.add_argument(
        "-nc", "--netconns", dest="inspect_netconns", action="store_true", help="print network connections"
    )
    parser_inspect.add_argument(
        "-fm", "--filemods", dest="inspect_filemods", action="store_true", help="print file modifications"
    )
    parser_inspect.add_argument(
        "-rm", "--regmods", dest="inspect_regmods", action="store_true", help="print registry modifications"
    )
    parser_inspect.add_argument(
        "-ml", "--modloads", dest="inspect_modloads", action="store_true", help="print modloads"
    )
    parser_inspect.add_argument(
        "-sl", "--scriptloads", dest="inspect_scriptloads", action="store_true", help="print scriptloads (PSC)"
    )
    parser_inspect.add_argument(
        "-cp", "--crossprocs", dest="inspect_crossprocs", action="store_true", help="print crossprocs"
    )
    parser_inspect.add_argument(
        "-rpe",
        "--raw-print-events",
        action="store_true",
        default=False,
        help="do not format Cb events onto a single line. Print them the way Cb does by default.",
    )
    # parser_inspect.add_argument('-warn', '--give-warnings', action='store_true', default=False,
    #                         help="Warn before printing large datasets/results")
    parser_inspect.add_argument(
        "--json", action="store_true", help="Combine all results into json document and print the result."
    )
    parser_inspect.add_argument(
        "--segment-limit",
        action="store",
        type=int,
        default=None,
        help="stop processing events into json after this many process segments",
    )
    parser_inspect.add_argument("-es", "--event-search", action="store", help="Search process events.")
    parser_inspect.add_argument(
        "-st",
        "--start-time",
        action="store",
        help="Return events that occurred AFTER this start time.  Format:'Y-m-d H:M:S' UTC",
    )
    parser_inspect.add_argument(
        "-et",
        "--end-time",
        action="store",
        help="Return events that occurred BEFORE this end time. Format:'Y-m-d H:M:S' UTC",
    )

    # live response parser
    parser_lr = subparsers.add_parser(
        "live-response", aliases=["lr"], help="Perform live response actions on a device/sensor."
    )
    parser_lr.add_argument("name_or_id", help="the hostname or sensor/device id to go live with.")
    parser_lr.add_argument(
        "-e", "--execute-command", action="store", help="Execute this command on the sensor. NOTE: waits for output."
    )
    parser_lr.add_argument("-cr", "--create-regkey", action="store", help="Create this regkey.")
    parser_lr.add_argument("-sr", "--set-regkey-value", action="append", help="Set this regkey value.")
    if configured_environments["response"]:
        parser_lr.add_argument(
            "-i",
            "--sensor-isolation-toggle",
            action="store_true",
            help="Sensor hostname/ID to isolation/unisolate (on/off). (CB Response)",
        )
    if configured_environments["psc"]:
        parser_lr.add_argument(
            "-q",
            "--quarantine",
            action="store_true",
            default=False,
            help="Quarantine the devices returned by the query. (PSC)",
        )
        parser_lr.add_argument(
            "-uq",
            "--un_quarantine",
            action="store_true",
            default=False,
            help="UN-Quarantine the devices returned by the query. (PSC)",
        )

    # live response subparser
    lr_subparsers = parser_lr.add_subparsers(dest="live_response_command")

    # live response put file parser
    parser_put_file = lr_subparsers.add_parser("put", help="Put a file on the device/sensor.")
    parser_put_file.add_argument("local_filepath", action="store", help="Path to the file.")
    parser_put_file.add_argument("sensor_write_filepath", action="store", help="Path to write the file on the sensor.")

    # live response playbook parser
    parser_playbook = lr_subparsers.add_parser(
        "playbook", aliases=["pb", "play"], help="Execute a live response playbook script."
    )
    parser_playbook.add_argument(
        "-f", "--playbook-configpath", action="store", help="Path to a playbook config file to execute."
    )
    playbook_map = get_playbook_map()
    playbook_names = [p["name"] for _, p in playbook_map.items()]
    parser_playbook.add_argument(
        "-p",
        "--playbook-name",
        action="store",
        choices=playbook_names,
        help="The name of a configured playbook to execute.",
    )
    parser_playbook.add_argument("-l", "--list-playbooks", action="store_true", help="List configured playbooks.")
    parser_playbook.add_argument(
        "--write-template", action="store_true", help="write a playbook template file to use as example."
    )

    # live response collect parser
    parser_collect = lr_subparsers.add_parser("collect", help="Collect artifacts from hosts.")
    parser_collect.add_argument(
        "-i", "--sensor-info", dest="sensor_info", action="store_true", help="print default sensor information"
    )
    parser_collect.add_argument("-p", "--process-list", action="store_true", help="show processes running on sensor")
    parser_collect.add_argument("-f", "--file", action="store", help="collect file at this path on sensor")
    parser_collect.add_argument(
        "-lr", "--regkeypath", action="store", help="List all registry values from the specified registry key."
    )
    parser_collect.add_argument(
        "-r", "--regkeyvalue", action="store", help="Returns the associated value of the specified registry key."
    )
    parser_collect.add_argument(
        "-ld", "--list-directory", action="store", help="List the contents of a directory on the sensor."
    )
    parser_collect.add_argument(
        "-wd", "--walk-directory", action="store", help="List the contents of a directory on the sensor."
    )
    parser_collect.add_argument("--drives", action="store_true", help="Get logical drives on this sensor.")
    parser_collect.add_argument(
        "--memdump", action="store_true", help="Use Cb to dump sensor memory and collect the memdump."
    )

    # live response remediation parser
    parser_remediate = lr_subparsers.add_parser(
        "remediate", help="Perform remdiation (delete/kill) actions on device/sensor."
    )
    parser_remediate.add_argument(
        "-f", "--delete-file-path", action="store", help="delete the file at this path on the sensor"
    )
    parser_remediate.add_argument(
        "-kpname", "--kill-process-name", action="store", help="kill all processes with this name"
    )
    parser_remediate.add_argument("-kpid", "--kill-process-id", action="store", help="kill the process with this ID")
    parser_remediate.add_argument("-drv", "--delete-regkeyvalue", action="store", help="Delete the regkey value.")
    parser_remediate.add_argument(
        "--delete-entire-regkey", action="store", help="Delete the registry key and all values. BE CAREFUL."
    )
    parser_remediate.add_argument("-rs", "--remediation-script", action="store", help="Path to a remediaiton script.")
    parser_remediate.add_argument("--write-template", action="store_true", help="write a remediation template.")

    # session parser - NOTE: functionality is limited on the PSC side, and it's specifically annoying that
    # we can not get a list of active psc lr sessions... or at least I haven't figure out how to do that.
    parser_session = subparsers.add_parser("session", help="Interact with Cb live response server sessions.")
    if configured_environments["response"]:
        parser_session.add_argument(
            "-lss",
            "--list-sensor-sessions",
            action="store",
            help="list all CbLR sessions associated to this sensor ID (Response only).",
        )
    parser_session.add_argument(
        "-gsc", "--get-session-command-list", action="store", help="list commands associated to this session"
    )
    if configured_environments["response"]:
        parser_session.add_argument(
            "-a", "--list-all-sessions", action="store_true", help="list all CbLR sessions (Response only)."
        )
    parser_session.add_argument("-g", "--get-session", action="store", help="get live response session by id.")
    parser_session.add_argument("-c", "--close-session", action="store", help="close live response session by id.")
    parser_session.add_argument(
        "-gcr", "--get-command-result", action="store", help="get any results for this command."
    )
    parser_session.add_argument(
        "-f", "--get-file-content", action="store", help="byte stream any file content to stdout. (use a pipe)"
    )

    # enumeration parser
    parser_enumeration = subparsers.add_parser(
        "enumerate", aliases=["e"], help="Data enumerations for answering common questions."
    )
    parser_enumeration.add_argument(
        "-lh",
        "--logon-history",
        action="store",
        help="Given process username or device name, roughly enumerate logon history (Windows OS).",
    )

    # only add independent product args if product is a configured option
    if configured_environments["response"]:
        add_response_arguments_to_parser(subparsers)
    if configured_environments["psc"]:
        add_psc_arguments_to_parser(subparsers)

    argcomplete.autocomplete(parser)
    args = parser.parse_args()

    if args.debug:
        logging.getLogger("urllib3.connectionpool").setLevel(logging.INFO)
        coloredlogs.install(level="DEBUG", logger=logging.getLogger())

    if args.time_zone:
        set_timezone(args.time_zone)

    if args.set_default_timezone:
        set_timezone(args.set_default_timezone)
        save_configuration()

    if args.set_default_environment:
        product, profile = args.set_default_environment.split(":", 1)
        set_default_cbapi_product(product)
        set_default_cbapi_profile(profile)
        save_configuration()

    # Functionality that doesn't require a Cb connection.
    if args.command and (args.command.lower() == "lr" or args.command.lower().startswith("live")):
        if args.live_response_command and (
            args.live_response_command.startswith("play") or args.live_response_command == "pb"
        ):
            if args.list_playbooks:
                print(f"\nConfigured Playbooks:")
                for pb_key, pb_metadata in playbook_map.items():
                    print(f"\t{pb_metadata['name']} : {pb_metadata['description']}")
                print()
                return True
            if args.write_template:
                template_path = write_playbook_template()
                if os.path.exists(template_path):
                    LOGGER.info(f" + wrote {template_path}")
                return True
        if args.live_response_command and args.live_response_command.startswith("r"):
            if args.write_template:
                template_path = write_remediation_template()
                if os.path.exists(template_path):
                    LOGGER.info(f" + wrote {template_path}")
                return True

    # Connect and execute
    product, profile = args.environment.split(":", 1)
    LOGGER.debug(f"using '{profile}' profile via the configured '{product}' product.")
    try:
        if product == "response":
            cb = CbResponseAPI(profile=profile)
            execute_response_arguments(cb, args)

        elif product == "psc":
            cb = CbThreatHunterAPI(profile=profile)
            execute_threathunter_arguments(cb, args)
    except ConnectionError as e:
        LOGGER.critical(f"Couldn't connect to {product} {profile}: {e}")
    except UnauthorizedError as e:
        LOGGER.critical(f"{e}")
    except ServerError as e:
        LOGGER.critical(f"CB ServerError 😒 (try again) : {e}")
    except TimeoutError as e:
        LOGGER.critical(f"TimeoutError waiting for CB server 🙄 (try again) : {e}")
    def run(self):
        self.validate_config()

        try:
            logger.warn("CB Infoblox Bridge Starting")
            sslverify = False if self.bridge_options.get(
                'carbonblack_server_sslverify', "0") == "0" else True

            self.cb = CbResponseAPI(
                url=self.bridge_options['carbonblack_server_url'],
                token=self.bridge_options['carbonblack_server_token'],
                ssl_verify=sslverify,
                integration_name=self.integration_name)
            self.cb.info()

            self.streaming_host = self.bridge_options.get(
                'carbonblack_streaming_host')
            self.streaming_username = self.bridge_options.get(
                'carbonblack_streaming_username')
            self.streaming_password = self.bridge_options.get(
                'carbonblack_streaming_password')

            self.use_cloud_api = True if int(
                self.bridge_options.get('use_cloud_api', '0')) != 0 else False

            #start the syslog server normally , otherwise start the rest poller
            if not (self.use_cloud_api):
                syslog_server = SyslogServer(10240, self.worker_queue)
            else:
                self.api_token = self.bridge_options.get(
                    'api_token', "PASSWORD")
                self.poll_interval = self.bridge_options.get(
                    'rest_poll_interval', "5M")
                self.api_route = self.bridge_options.get('api_route', "")
                logger.info("starting rest poller")
                rest_poller = RestPoller(self.api_route,
                                         self.api_token,
                                         worker_queue=self.worker_queue,
                                         time_increment=self.poll_interval)

            message_broker = FanOutMessage(self.cb, self.worker_queue)

            # Set up the built-in feed
            feed_thread = FeedAction(self.cb, self.bridge_options)
            feed_thread.start()

            ctx = feed_thread.flask_feed.app.test_request_context()
            ctx.push()
            feed_thread.flask_feed.app.preprocess_request()
            ctx.pop()

            logger.info("flask ready")

            feed_id = feed_thread.get_or_create_feed()

            #TODO revisit
            #if self.bridge_options.get('do_alert', False):
            #    self._set_alert_action(feed_id)

            # Note: it is important to keep the relative order stable here.
            # we want to make sure that the Cb sensor flush occurs first, before the feed entry is created
            # and before any other actions are taken (isolation or process termination)

            # We will always flush the sensor that triggered the action, so that we get the most up-to-date
            # information into the Cb console.
            flusher = FlushAction(self.cb)

            message_broker.add_response_action(flusher)
            message_broker.add_response_action(feed_thread)

            # Conditionally create a kill-process action based on the configuration file.
            kill_option = self.bridge_options.get('do_kill', None)
            if kill_option == 'api':
                kill_process_thread = ApiKillProcessAction(self.cb)
                kill_process_thread.start()
                message_broker.add_response_action(kill_process_thread)
            elif kill_option == 'streaming':
                #
                # For some reason this must be imported here otherwise the event registry thread does not start
                #
                from streaming_kill_process import StreamingKillProcessAction
                kill_streaming_action = StreamingKillProcessAction(
                    self.cb, self.streaming_host, self.streaming_username,
                    self.streaming_password)
                message_broker.add_response_action(kill_streaming_action)

            if self.bridge_options.get('do_isolate', False):
                isolator = IsolateAction(self.cb)
                message_broker.add_response_action(isolator)

            # once everything is up & running, start the message broker then the syslog server
            message_broker.start()
            if (self.use_cloud_api):
                rest_poller.start()
            else:
                syslog_server.start()

            logger.info("Starting event loop")

            try:
                while True:
                    time.sleep(5)
            except KeyboardInterrupt:
                logger.warn("Stopping Cb Infoblox Connector due to Control-C")

            logger.warn("Cb Infoblox Connector Stopping")
        except:
            logger.error(traceback.format_exc())

        sys.exit(1)