def _get_or_create_session(self, sensor_id): sensor_sessions = [ s for s in self._cb.get_object( "{cblr_base}/session?active_only=true".format( cblr_base=self.cblr_base)) if s["sensor_id"] == sensor_id and s["status"] in ("pending", "active") ] if len(sensor_sessions) > 0: session_id = sensor_sessions[0]["id"] else: session_id = self._create_session(sensor_id) try: res = poll_status(self._cb, "{cblr_base}/session/{0}".format( session_id, cblr_base=self.cblr_base), desired_status="active", delay=1, timeout=360) except (ObjectNotFoundError, TimeoutError): # "close" the session, otherwise it will stay in a pending state self._close_session(session_id) # the Cb server will return a 404 if we don't establish a session in time, so convert this to a "timeout" raise TimeoutError( uri="{cblr_base}/session/{0}".format(session_id, cblr_base=self.cblr_base), message="Could not establish session with sensor {0}".format( sensor_id), error_code=404) else: return session_id, res
def _get_or_create_session(self, sensor_id): sensor_sessions = [ s for s in self._cb.get_object("/api/v1/cblr/session") if s["sensor_id"] == sensor_id and s["status"] in ("pending", "active") ] if len(sensor_sessions) > 0: session_id = sensor_sessions[0]["id"] else: session_id = self._create_session(sensor_id) try: res = poll_status(self._cb, "/api/v1/cblr/session/{0}".format(session_id), desired_status="active") except ObjectNotFoundError: # the Cb server will return a 404 if we don't establish a session in time, so convert this to a "timeout" raise TimeoutError( uri="/api/v1/cblr/session/{0}".format(session_id), message="Could not establish session with sensor {0}".format( sensor_id), error_code=404) else: return session_id, res
def _lr_post_command(self, data): retries = self.MAX_RETRY_COUNT if "name" in data and data["name"] not in self.session_data["supported_commands"]: raise ApiError("Command {0} not supported by this sensor".format(data["name"])) while retries: try: data["session_id"] = self.session_id resp = self._cb.post_object("{cblr_base}/session/{0}/command".format(self.session_id, cblr_base=self.cblr_base), data) except ObjectNotFoundError as e: if e.message.startswith("Sensor") or e.message.startswith("Session"): self.session_id, self.session_data = self._cblr_manager._get_or_create_session(self.sensor_id) retries -= 1 continue else: try: error_message = json.loads(e.message) if error_message["status"] == "NOT_FOUND": self.session_id, self.session_data = self._cblr_manager._get_or_create_session(self.sensor_id) retries -= 1 continue except: pass raise ApiError("Received 404 error from server: {0}".format(e.message)) else: return resp raise TimeoutError(message="Command {0} failed after {1} retries".format(data["name"], self.MAX_RETRY_COUNT))
def _search(self, start=0, rows=0): if not self._query_token: self._submit() while self._still_querying(): time.sleep(.5) if self._timed_out: raise TimeoutError( message= "user-specified timeout exceeded while waiting for results") log.debug("Pulling results, timed_out={}".format(self._timed_out)) current = start rows_fetched = 0 still_fetching = True result_url_template = "/threathunter/search/v1/orgs/{}/processes/search_jobs/{}/results".format( self._cb.credentials.org_key, self._query_token) query_parameters = {} while still_fetching: result_url = '{}?start={}&rows={}'.format( result_url_template, current, 10 # Batch gets to reduce API calls ) result = self._cb.get_object(result_url, query_parameters=query_parameters) self._total_results = result.get('response_header', {}).get('num_available', 0) self._count_valid = True results = result.get('data', []) for item in results: yield item current += 1 rows_fetched += 1 if rows and rows_fetched >= rows: still_fetching = False break if current >= self._total_results: still_fetching = False log.debug("current: {}, total_results: {}".format( current, self._total_results))
def poll_status(cb, url, desired_status="complete", timeout=120, delay=0.5): start_time = time.time() status = None while status != desired_status and time.time() - start_time < timeout: res = cb.get_object(url) if res["status"] == desired_status: return res elif res["status"] == "error": raise LiveResponseError(res) else: time.sleep(delay) raise TimeoutError(url, message="timeout polling for Live Response")
def _get_or_create_session(self, sensor_id): session_id = self._create_session(sensor_id) try: res = poll_status(self._cb, "{cblr_base}/session/{0}".format(session_id, cblr_base=self.cblr_base), desired_status="ACTIVE", delay=1, timeout=360) except Exception: # "close" the session, otherwise it will stay in a pending state self._close_session(session_id) # the Cb server will return a 404 if we don't establish a session in time, so convert this to a "timeout" raise TimeoutError(uri="{cblr_base}/session/{0}".format(session_id, cblr_base=self.cblr_base), message="Could not establish session with sensor {0}".format(sensor_id), error_code=404) else: return session_id, res
def _count(self): if self._count_valid: return self._total_results while self._still_querying(): time.sleep(.5) if self._timed_out: raise TimeoutError(message="user-specified timeout exceeded while waiting for results") args = {"query_id": self._query_token, "row_count": 0} result = self._cb.get_object("/pscr/query/v1/results", query_parameters=args) self._total_results = result.get('response_header', {}).get('num_found', 0) self._count_valid = True return self._total_results
def _search(self, start=0, rows=0): if not self._query_token: self._submit() query_id = {"query_id": self._query_token} while self._still_querying(): time.sleep(.5) if self._timed_out: raise TimeoutError(message="user-specified timeout exceeded while waiting for results") log.debug("Pulling results, timed_out={}".format(self._timed_out)) query_id["start_row"] = start query_id["row_count"] = self._batch_size current = start rows_fetched = 0 still_fetching = True while still_fetching: result = self._cb.get_object("/pscr/query/v1/results", query_parameters=query_id) self._total_results = result.get('response_header', {}).get('num_found', 0) self._count_valid = True results = result.get('data', []) for item in results: yield item current += 1 rows_fetched += 1 if rows and rows_fetched >= rows: still_fetching = False break if current >= self._total_results: still_fetching = False log.debug("current: {}, total_results: {}".format(current, self._total_results)) query_id["start_row"] = current
def poll_status(cb, url, desired_status="complete", timeout=None, delay=None): start_time = time.time() status = None if not timeout: timeout = 120 if not delay: delay = 0.5 while status != desired_status and time.time() - start_time < timeout: res = cb.get_object(url) if res["status"] == desired_status: log.debug(json.dumps(res)) return res elif res["status"] == "error": raise LiveResponseError(res) else: time.sleep(delay) raise TimeoutError(uri=url, message="timeout polling for Live Response")
def _count(self): if self._count_valid: return self._total_results while self._still_querying(): time.sleep(.5) if self._timed_out: raise TimeoutError(message="user-specified timeout exceeded while waiting for results") result_url = "/threathunter/search/v1/orgs/{}/processes/search_jobs/{}/results".format( self._cb.credentials.org_key, self._query_token, ) result = self._cb.get_object(result_url) self._total_results = result.get('response_header', {}).get('num_available', 0) self._count_valid = True return self._total_results
def poll_status(cb, url, desired_status="complete", timeout=None, delay=None): """ Poll the status of a Live Response query. Args: cb (BaseAPI): The CBAPI object reference. url (str): The URL to poll. desired_status (str): The status we're looking for. timeout (int): The timeout value in seconds. delay (float): The delay between attempts in seconds. Returns: object: The result of the Live Response query that has the desired status. Raises: LiveResponseError: If an error response was encountered. """ start_time = time.time() status = None if not timeout: timeout = 120 if not delay: delay = 0.5 while status != desired_status and time.time() - start_time < timeout: res = cb.get_object(url) if res["status"] == desired_status: log.debug(json.dumps(res)) return res elif res["status"] == "error": raise LiveResponseError(res) else: time.sleep(delay) raise TimeoutError(uri=url, message="timeout polling for Live Response")
def _cb_delete_file_kill_if_necessary_function(self, event, *args, **kwargs): results = {} results["was_successful"] = False results["hostname"] = None results["deleted"] = [] lock_acquired = False try: # Get the function parameters: incident_id = kwargs.get("incident_id") # number hostname = kwargs.get("hostname") # text path_or_file = kwargs.get("path_or_file") # text log = logging.getLogger(__name__) # Establish logging days_later_timeout_length = datetime.datetime.now() + datetime.timedelta(days=DAYS_UNTIL_TIMEOUT) # Max duration length before aborting hostname = hostname.upper()[:15] # CB limits hostname to 15 characters sensor = cb.select(Sensor).where('hostname:' + hostname) # Query CB for the hostname's sensor timeouts = 0 # Number of timeouts that have occurred if len(sensor) <= 0: # Host does not have CB agent, abort yield StatusMessage("[FATAL ERROR] CB could not find hostname: " + str(hostname)) yield FunctionResult(results) return sensor = sensor[0] # Get the sensor object from the query results["hostname"] = str(hostname).upper() deleted = [] while timeouts <= MAX_TIMEOUTS: # Max timeouts before aborting try: now = datetime.datetime.now() # Check if the sensor is queued to restart, wait up to 90 seconds before checking online status three_minutes_passed = datetime.datetime.now() + datetime.timedelta(minutes=3) while (sensor.restart_queued is True) and (three_minutes_passed >= now): time.sleep(3) # Give the CPU a break, it works hard! now = datetime.datetime.now() sensor = (cb.select(Sensor).where('hostname:' + hostname))[0] # Retrieve the latest sensor vitals # Check online status if sensor.status != "Online": yield StatusMessage('[WARNING] Hostname: ' + str(hostname) + ' is offline. Will attempt for ' + str(DAYS_UNTIL_TIMEOUT) + ' days...') # Check lock status if os.path.exists('/home/integrations/.resilient/cb_host_locks/{}.lock'.format(hostname)): yield StatusMessage('[WARNING] A running action has a lock on ' + str(hostname) + '. Will attempt for ' + str(DAYS_UNTIL_TIMEOUT) + ' days...') # Wait for offline and locked hosts for days_later_timeout_length while (sensor.status != "Online" or os.path.exists('/home/integrations/.resilient/cb_host_locks/{}.lock'.format(hostname))) and (days_later_timeout_length >= now): time.sleep(3) # Give the CPU a break, it works hard! now = datetime.datetime.now() sensor = (cb.select(Sensor).where('hostname:' + hostname))[0] # Retrieve the latest sensor vitals # Abort after DAYS_UNTIL_TIMEOUT if sensor.status != "Online" or os.path.exists('/home/integrations/.resilient/cb_host_locks/{}.lock'.format(hostname)): yield StatusMessage('[FATAL ERROR] Hostname: ' + str(hostname) + ' is still offline!') yield FunctionResult(results) return # Check if the sensor is queued to restart, wait up to 90 seconds before continuing three_minutes_passed = datetime.datetime.now() + datetime.timedelta(minutes=3) while (sensor.restart_queued is True) and (three_minutes_passed >= now): # If the sensor is queued to restart, wait up to 90 seconds time.sleep(3) # Give the CPU a break, it works hard! now = datetime.datetime.now() sensor = (cb.select(Sensor).where('hostname:' + hostname))[0] # Retrieve the latest sensor vitals # Verify the incident still exists and is reachable, if not abort try: incident = self.rest_client().get('/incidents/{0}?text_content_output_format=always_text&handle_format=names'.format(str(incident_id))) except Exception as err: if err.message and "not found" in err.message.lower(): log.info('[FATAL ERROR] Incident ID ' + str(incident_id) + ' no longer exists.') log.info('[FAILURE] Fatal error caused exit!') else: log.info('[FATAL ERROR] Incident ID ' + str(incident_id) + ' could not be reached, Resilient instance may be down.') log.info('[FAILURE] Fatal error caused exit!') return # Acquire host lock try: f = os.fdopen(os.open('/home/integrations/.resilient/cb_host_locks/{}.lock'.format(hostname), os.O_CREAT | os.O_WRONLY | os.O_EXCL), 'w') f.close() lock_acquired = True except OSError: continue # Establish a session to the host sensor yield StatusMessage('[INFO] Establishing session to CB Sensor #' + str(sensor.id) + ' (' + sensor.hostname + ')') session = cb.live_response.request_session(sensor.id) yield StatusMessage('[SUCCESS] Connected on Session #' + str(session.session_id) + ' to CB Sensor #' + str(sensor.id) + ' (' + sensor.hostname + ')') path = session.walk(path_or_file, False) # Walk everything. False = performs a bottom->up walk, not top->down exe_files = [] # List of executable files, used for killing if necessary prior to deletion other_files = [] # List of all other files count = 0 # Will remain at 0 if path_or_file is a file and not a path for item in path: # For each subdirectory in the path count = count + 1 directory = os.path.normpath((str(item[0]))).replace(r'//', '\\') file_list = item[2] # List of files in the subdirectory if str(file_list) != '[]': # If the subdirectory is not empty for f in file_list: # For each file in the subdirectory file_path = os.path.normpath(directory + '\\' + f).replace(r'//', '\\') if f.endswith('.exe'): exe_files.append(file_path) else: other_files.append(file_path) other_files.append(directory) for e in exe_files: # For each executable file process_list = session.list_processes() for pr in process_list: if (e.lower()) in str((pr['path']).lower()): # If the executable is running as a process yield StatusMessage('[SUCCESS] Found running process: ' + e + ' (killing it now...)') try: session.kill_process((pr['pid'])) # Kill the process except TimeoutError: raise except Exception as err: yield StatusMessage('[ERROR] Failed to kill process! Encountered: ' + str(err)) try: session.delete_file(e) # Delete the executable file deleted.append(e) yield StatusMessage('[INFO] Deleted: ' + e) except TimeoutError: raise TimeoutError(message=err) except: yield StatusMessage('[ERROR] Deletion failed for: ' + e) for o in other_files: # For each non-executable file try: session.delete_file(o) # Delete the file deleted.append(o) yield StatusMessage('[INFO] Deleted: ' + o) except TimeoutError: raise TimeoutError(message=err) except: yield StatusMessage('[ERROR] Deletion failed for: ' + o) if count == 0: # path_or_file was a file try: session.delete_file(path_or_file) # Delete the file deleted.append(path_or_file) yield StatusMessage('[INFO] Deleted: ' + path_or_file) except TimeoutError: raise TimeoutError(message=err) except: yield StatusMessage('[ERROR] Deletion failed for: ' + path_or_file) except TimeoutError: # Catch TimeoutError and handle timeouts = timeouts + 1 if timeouts <= MAX_TIMEOUTS: yield StatusMessage('[ERROR] TimeoutError was encountered. Reattempting... (' + str(timeouts) + '/' + str(MAX_TIMEOUTS) + ')') try: session.close() except: pass sensor = (cb.select(Sensor).where('hostname:' + hostname))[0] # Retrieve the latest sensor vitals sensor.restart_sensor() # Restarting the sensor may avoid a timeout from occurring again time.sleep(30) # Sleep to apply sensor restart sensor = (cb.select(Sensor).where('hostname:' + hostname))[0] # Retrieve the latest sensor vitals else: yield StatusMessage('[FATAL ERROR] TimeoutError was encountered. The maximum number of retries was reached. Aborting!') yield StatusMessage('[FAILURE] Fatal error caused exit!') continue except(ApiError, ProtocolError, NewConnectionError, ConnectTimeoutError, MaxRetryError) as err: # Catch urllib3 connection exceptions and handle if 'ApiError' in str(type(err).__name__) and 'network connection error' not in str(err): raise # Only handle ApiError involving network connection error timeouts = timeouts + 1 if timeouts <= MAX_TIMEOUTS: yield StatusMessage('[ERROR] Carbon Black was unreachable. Reattempting in 30 minutes... (' + str(timeouts) + '/' + str(MAX_TIMEOUTS) + ')') time.sleep(1800) # Sleep for 30 minutes, backup service may have been running. else: yield StatusMessage('[FATAL ERROR] ' + str(type(err).__name__) + ' was encountered. The maximum number of retries was reached. Aborting!') yield StatusMessage('[FAILURE] Fatal error caused exit!') continue except Exception as err: # Catch all other exceptions and abort yield StatusMessage('[FATAL ERROR] Encountered: ' + str(err)) yield StatusMessage('[FAILURE] Fatal error caused exit!') results["deleted"] = deleted else: results["was_successful"] = True results["deleted"] = deleted try: session.close() except: pass yield StatusMessage('[INFO] Session has been closed to CB Sensor #' + str(sensor.id) + '(' + sensor.hostname + ')') break # Release the host lock if acquired if lock_acquired is True: os.remove('/home/integrations/.resilient/cb_host_locks/{}.lock'.format(hostname)) # Produce a FunctionResult with the results yield FunctionResult(results) except Exception: yield FunctionError()
def _cb_retrieve_user_accounts_data_function(self, event, *args, **kwargs): results = {} results["was_successful"] = False results["hostname"] = None lock_acquired = False try: # Get the function parameters: incident_id = kwargs.get("incident_id") # number hostname = kwargs.get("hostname") # text log = logging.getLogger(__name__) # Establish logging days_later_timeout_length = datetime.datetime.now( ) + datetime.timedelta( days=DAYS_UNTIL_TIMEOUT) # Max duration length before aborting hostname = hostname.upper( )[:15] # CB limits hostname to 15 characters sensor = cb.select(Sensor).where( 'hostname:' + hostname) # Query CB for the hostname's sensor timeouts = 0 # Number of timeouts that have occurred if len(sensor) <= 0: # Host does not have CB agent, abort yield StatusMessage( "[FATAL ERROR] CB could not find hostname: " + str(hostname)) yield FunctionResult(results) return sensor = sensor[0] # Get the sensor object from the query results["hostname"] = str(hostname).upper() while timeouts <= MAX_TIMEOUTS: # Max timeouts before aborting try: now = datetime.datetime.now() # Check if the sensor is queued to restart, wait up to 90 seconds before checking online status three_minutes_passed = datetime.datetime.now( ) + datetime.timedelta(minutes=3) while (sensor.restart_queued is True) and (three_minutes_passed >= now): time.sleep(3) # Give the CPU a break, it works hard! now = datetime.datetime.now() sensor = (cb.select(Sensor).where('hostname:' + hostname) )[0] # Retrieve the latest sensor vitals # Check online status if sensor.status != "Online": yield StatusMessage('[WARNING] Hostname: ' + str(hostname) + ' is offline. Will attempt for ' + str(DAYS_UNTIL_TIMEOUT) + ' days...') # Check lock status if os.path.exists( '/home/integrations/.resilient/cb_host_locks/{}.lock' .format(hostname)): yield StatusMessage( '[WARNING] A running action has a lock on ' + str(hostname) + '. Will attempt for ' + str(DAYS_UNTIL_TIMEOUT) + ' days...') # Wait for offline and locked hosts for days_later_timeout_length while (sensor.status != "Online" or os.path.exists( '/home/integrations/.resilient/cb_host_locks/{}.lock' .format(hostname))) and (days_later_timeout_length >= now): time.sleep(3) # Give the CPU a break, it works hard! now = datetime.datetime.now() sensor = (cb.select(Sensor).where('hostname:' + hostname) )[0] # Retrieve the latest sensor vitals # Abort after DAYS_UNTIL_TIMEOUT if sensor.status != "Online" or os.path.exists( '/home/integrations/.resilient/cb_host_locks/{}.lock' .format(hostname)): yield StatusMessage('[FATAL ERROR] Hostname: ' + str(hostname) + ' is still offline!') yield FunctionResult(results) return # Check if the sensor is queued to restart, wait up to 90 seconds before continuing three_minutes_passed = datetime.datetime.now( ) + datetime.timedelta(minutes=3) while (sensor.restart_queued is True) and ( three_minutes_passed >= now ): # If the sensor is queued to restart, wait up to 90 seconds time.sleep(3) # Give the CPU a break, it works hard! now = datetime.datetime.now() sensor = (cb.select(Sensor).where('hostname:' + hostname) )[0] # Retrieve the latest sensor vitals # Verify the incident still exists and is reachable, if not abort try: incident = self.rest_client().get( '/incidents/{0}?text_content_output_format=always_text&handle_format=names' .format(str(incident_id))) except Exception as err: if err.message and "not found" in err.message.lower(): log.info('[FATAL ERROR] Incident ID ' + str(incident_id) + ' no longer exists.') log.info('[FAILURE] Fatal error caused exit!') else: log.info( '[FATAL ERROR] Incident ID ' + str(incident_id) + ' could not be reached, Resilient instance may be down.' ) log.info('[FAILURE] Fatal error caused exit!') return # Acquire host lock try: f = os.fdopen( os.open( '/home/integrations/.resilient/cb_host_locks/{}.lock' .format(hostname), os.O_CREAT | os.O_WRONLY | os.O_EXCL), 'w') f.close() lock_acquired = True except OSError: continue # Establish a session to the host sensor yield StatusMessage( '[INFO] Establishing session to CB Sensor #' + str(sensor.id) + ' (' + sensor.hostname + ')') session = cb.live_response.request_session(sensor.id) yield StatusMessage('[SUCCESS] Connected on Session #' + str(session.session_id) + ' to CB Sensor #' + str(sensor.id) + ' (' + sensor.hostname + ')') try: session.create_directory( 'C:\Windows\CarbonBlack\Reports') except TimeoutError: raise TimeoutError(message=err) except Exception: pass # Existed already try: session.create_directory( r'C:\Windows\CarbonBlack\Tools') except TimeoutError: raise TimeoutError(message=err) except Exception: pass # Existed already try: session.delete_file( r'C:\Windows\CarbonBlack\Tools\UPV.exe') except TimeoutError: raise TimeoutError(message=err) except Exception: pass # Didn't exist already session.put_file(open(PATH_TO_UTILITY, 'rb'), r'C:\Windows\CarbonBlack\Tools\UPV.exe' ) # Place the utility on the endpoint session.create_process( r'C:\Windows\CarbonBlack\Tools\UPV.exe /shtml "C:\Windows\CarbonBlack\Reports\ua-dump.html" /sort "User Name"', True) # Execute the utility yield StatusMessage( '[SUCCESS] Executed UPV.exe on Sensor!') with tempfile.NamedTemporaryFile( delete=False ) as temp_file: # Create temporary temp_file for HTML file try: temp_file.write( session.get_file( r'C:\Windows\CarbonBlack\Reports\ua-dump.html' ) ) # Write the HTML file from the endpoint to temp_file temp_file.close() yield StatusMessage( '[SUCCESS] Retrieved HTML data file from Sensor!' ) self.rest_client().post_attachment( '/incidents/{0}/attachments'.format( incident_id), temp_file.name, '{0}-user_accounts.html'.format( sensor.hostname) ) # Post temp_file to incident yield StatusMessage( '[SUCCESS] Posted HTML data file to the incident as an attachment!' ) finally: os.unlink( temp_file.name) # Delete temporary temp_file session.delete_file( r'C:\Windows\CarbonBlack\Tools\UPV.exe') session.delete_file( r'C:\Windows\CarbonBlack\Reports\ua-dump.html') except TimeoutError: # Catch TimeoutError and handle timeouts = timeouts + 1 if timeouts <= MAX_TIMEOUTS: yield StatusMessage( '[ERROR] TimeoutError was encountered. Reattempting... (' + str(timeouts) + '/' + str(MAX_TIMEOUTS) + ')') try: session.close() except: pass sensor = (cb.select(Sensor).where('hostname:' + hostname) )[0] # Retrieve the latest sensor vitals sensor.restart_sensor( ) # Restarting the sensor may avoid a timeout from occurring again time.sleep(30) # Sleep to apply sensor restart sensor = (cb.select(Sensor).where('hostname:' + hostname) )[0] # Retrieve the latest sensor vitals else: yield StatusMessage( '[FATAL ERROR] TimeoutError was encountered. The maximum number of retries was reached. Aborting!' ) yield StatusMessage( '[FAILURE] Fatal error caused exit!') continue except ( ApiError, ProtocolError, NewConnectionError, ConnectTimeoutError, MaxRetryError ) as err: # Catch urllib3 connection exceptions and handle if 'ApiError' in str( type(err).__name__ ) and 'network connection error' not in str(err): raise # Only handle ApiError involving network connection error timeouts = timeouts + 1 if timeouts <= MAX_TIMEOUTS: yield StatusMessage( '[ERROR] Carbon Black was unreachable. Reattempting in 30 minutes... (' + str(timeouts) + '/' + str(MAX_TIMEOUTS) + ')') time.sleep( 1800 ) # Sleep for 30 minutes, backup service may have been running. else: yield StatusMessage( '[FATAL ERROR] ' + str(type(err).__name__) + ' was encountered. The maximum number of retries was reached. Aborting!' ) yield StatusMessage( '[FAILURE] Fatal error caused exit!') continue except Exception as err: # Catch all other exceptions and abort yield StatusMessage('[FATAL ERROR] Encountered: ' + str(err)) yield StatusMessage('[FAILURE] Fatal error caused exit!') else: results["was_successful"] = True try: session.close() except: pass yield StatusMessage( '[INFO] Session has been closed to CB Sensor #' + str(sensor.id) + '(' + sensor.hostname + ')') break # Release the host lock if acquired if lock_acquired is True: os.remove( '/home/integrations/.resilient/cb_host_locks/{}.lock'. format(hostname)) # Produce a FunctionResult with the results yield FunctionResult(results) except Exception: yield FunctionError()