def __init__(self, buf_before, buf_after, num_channels=1, sample_rate=16000, bytes_per_sample=2, dir=TOP_DIR, file_prefix="recording-", delete_active_recording=False): self.buf_before = buf_before self.buf_after = buf_after self._bytes_per_second = num_channels * sample_rate * bytes_per_sample self._file_prefix = file_prefix self._delete_active_recording = delete_active_recording self.clean_up = False self._will_stop_capture = False self._actual_before_length = buf_before.length( ) / self._bytes_per_second self._desired_after_length = buf_after.max_length( ) / self._bytes_per_second self._desired_length = self._actual_before_length + self._desired_after_length Log.debug( self._tag, "Will record for %d (%d before, %f after)" % (self._desired_length, self._actual_before_length, self._desired_after_length)) # File setup self.filename = "%s%d.wav" % (self._file_prefix, int(time.time())) self.filepath = os.path.join(dir, self.filename) self._file = wave.open(self.filepath, "w") self._file.setnchannels(num_channels) self._file.setframerate(sample_rate) self._file.setsampwidth(bytes_per_sample)
class SytemInputFileReader(threading.Thread): def __init__(self, decode_work_queue): threading.Thread.__init__(self) self._stop = threading.Event() self.decode_work_queue = decode_work_queue self.logger = Log().getLogger() self.logger.debug("SytemInputFileReader(decode_work_queue:'%s')" % (decode_work_queue)) def run(self): self.logger.debug("Thread started.") for line in sys.stdin: line = line.strip() if (line): self.logger.debug("Got new file to decode from system.in. Add '%s' to the decode work queue." % (line)) self.decode_work_queue.put(line) self.logger.debug("Put the last None element to the queue.") self.decode_work_queue.put(None) self._stop.set() self.logger.debug("Thread stopped.") def stop (self): self._stop.set() sys.stdin.flush() def stopped (self): return self._stop.isSet()
def terminate(self, terminate=False): """ Terminate the audio system. Cannot be recovered from :return: None """ if self.is_running: self.stop() Log.debug(self._tag, "Will terminate audio stream") if self.is_terminated: return self.is_terminated = True # Shutdown the audio streams try: self.stream_in.stop_stream() self.stream_in.close() except AttributeError: pass try: self.audio.terminate() except AttributeError: pass
def select_doctor(self): """选择合适的大夫""" hospital_id = self.config.hospital_id department_id = self.config.department_id duty_code = self.config.duty_code duty_date = self.config.date # log current date Log.debug("当前挂号日期: " + self.config.date) preload = { 'hospitalId':hospital_id , 'departmentId': department_id, 'dutyCode': duty_code, 'dutyDate': duty_date, 'isAjax': True } response = self.browser.post(self.get_doctor_url , data=preload) Log.debug("response data:" + response.text) try: data = json.loads(response.text) if data["msg"] == "OK" and data["hasError"] == False and data["code"] == 200: self.dutys = data["data"] except Exception, e: Log.exit(repr(e))
class SytemInputFileReader(ExceptionLoggingThread): def __init__(self, decode_work_queue): ExceptionLoggingThread.__init__(self) self._stop = threading.Event() self.decode_work_queue = decode_work_queue self.logger = Log().getLogger() self.logger.debug("SytemInputFileReader(decode_work_queue:'%s')" % (decode_work_queue)) def run_logic(self): self.logger.debug("Thread started.") for line in sys.stdin: line = line.strip() if (line): self.logger.debug("Got new file to decode from system.in. Add '%s' to the decode work queue." % (line)) self.decode_work_queue.put(line) self.logger.debug("Put the last None element to the queue.") self.decode_work_queue.put(None) self._stop.set() self.logger.debug("Thread stopped.") def stop (self): self._stop.set() sys.stdin.flush() def stopped (self): return self._stop.isSet()
def __init__(self, name, workerTimeout=0, numWorkerThreads=16, jobQueueMaxSize=100): """ Service constructor. [in] name - Service name. [in] workerTimeout - Service worker timeout.. [in] numWorkerThreads - Count of service worker threads. [in] jobQueueMaxSize - Service job queue max size. """ Log.debug(name) self.name = name self.__run = False self.__workerThreads = [] self.__workerTimeout = workerTimeout self.__jobs = Queue(maxsize=jobQueueMaxSize) try: for _ in range(numWorkerThreads): workerThread = Thread(target=self.__worker) self.__workerThreads.append(workerThread) except Exception as e: Log.error(str(e))
def read_stdout(self): """ Reads JSON responses from stack-ide and dispatch them to various main thread handlers. """ while self._process.poll() is None: try: raw = self._process.stdout.readline().decode('UTF-8') if not raw: return data = None try: data = json.loads(raw) except: Log.debug("Got a non-JSON response: ", raw) continue #todo: try catch ? self._response_handler(data) except: Log.warning( "Stack-IDE stdout process ending due to exception: ", sys.exc_info()) self._process.terminate() self._process = None return Log.info("Stack-IDE stdout process ended.")
def __request(self, package): """ Executes request method. [in] package - Contained method and arguments to call in request service. """ method = getattr(self, package.requestMethod, None) if method == None: Log.warning("Method " + package.requestMethod + " is not found in " + self.name) return Log.debug(self.name + " call " + package.requestMethod) if package.requestArguments == None: requestResult = method() else: requestResult = method(*package.requestArguments) if requestResult == None: package.responseArguments = None elif isinstance(requestResult, Iterable) == True: package.responseArguments = requestResult else: package.responseArguments = [requestResult] self.sendResponse(package)
def cluster_data(data, n_clusters_interval: Tuple) -> Tuple: logger = Log("cluster_data") optimal_n_clusters = 1 optimal_score = -1 if n_clusters_interval[0] != 1: range_n_clusters = np.arange(n_clusters_interval[0], n_clusters_interval[1]) optimal_score = -1 optimal_n_clusters = -1 for n_clusters in range_n_clusters: clusterer = KMeans(n_clusters=n_clusters) cluster_labels = clusterer.fit_predict(data) try: silhouette_avg = silhouette_score(data, cluster_labels) logger.debug("For n_clusters = {}, the average silhouette score is: {}".format(n_clusters, silhouette_avg)) if silhouette_avg > optimal_score: optimal_score = silhouette_avg optimal_n_clusters = n_clusters except ValueError: break assert optimal_n_clusters != -1, "Error in silhouette analysis" logger.debug("Best score is {} for n_cluster = {}".format(optimal_score, optimal_n_clusters)) clusterer = KMeans(n_clusters=optimal_n_clusters).fit(data) labels = clusterer.labels_ centers = clusterer.cluster_centers_ return clusterer, labels, centers, optimal_score
def auth_login(self): """ 登陆 """ Log.info("开始登陆") password = self.config.password mobile_no = self.config.mobile_no preload = { 'mobileNo': mobile_no, 'password': password, 'yzm':'', 'isAjax': True, } response = self.browser.post(self.login_url, data=preload) Log.debug("response data:" + response.text) try: data = json.loads(response.text) if data["msg"] == "OK" and data["hasError"] == False and data["code"] == 200: cookies_file = os.path.join(self.browser.root_path, self.config.mobile_no + ".cookies") self.browser.save_cookies(cookies_file) Log.info("登陆成功") return True else: Log.error(data["msg"]) raise Exception() except Exception, e: Log.error("登陆失败") Log.exit(repr(e))
def read_stdout(self): """ Reads JSON responses from stack-ide and dispatch them to various main thread handlers. """ while self._process.poll() is None: try: raw = self._process.stdout.readline().decode('UTF-8') if not raw: return data = None try: data = json.loads(raw) except: Log.debug("Got a non-JSON response: ", raw) continue #todo: try catch ? self._response_handler(data) except: Log.warning("Stack-IDE stdout process ending due to exception: ", sys.exc_info()) self._process.terminate() self._process = None return Log.info("Stack-IDE stdout process ended.")
def printSummary(): log = Log() log.debug("Displaying summary for queue called '" + config.AZURE_STORAGE_QUEUE_NAME + "' and table called '" + config.AZURE_STORAGE_SUMMARY_TABLE_NAME + "' in storage account called '" + config.AZURE_STORAGE_ACCOUNT_NAME + "'") table_service = getTableService() queue_service = getQueueService() summary = "Queue Length is approximately: " + queue_service.getLength( ) + "\n\n" summary = summary + "Processed events:\n" summary = summary + "Errors: " + str( table_service.getCount("ERROR")) + "\n" summary = summary + "Warnings: " + str( table_service.getCount("WARNING")) + "\n" summary = summary + "Infos: " + str(table_service.getCount("INFO")) + "\n" summary = summary + "Debugs: " + str( table_service.getCount("DEBUG")) + "\n" summary = summary + "Correct: " + str( table_service.getCount("CORRECT")) + "\n" summary = summary + "Incorrect: " + str( table_service.getCount("INCORRECT")) + "\n" summary = summary + "Others: " + str( table_service.getCount("OTHER")) + "\n" print(summary) notify.info(summary)
def check_process_running(self, process): self.check_process_enabled = True if process: while True: if not self.check_process_enabled: return sleep(1) exists = False if not settings.is_background: if self.pid_exists(settings.current_pid): exists = True else: if not self.key: Log.debug('waiting for process to run') continue out, _ = subprocess.Popen( f'ps a | grep {self.key}', shell=True, stdout=subprocess.PIPE).communicate() if out: key = 'openconnect' items = out.decode().split('\n') for item in items: if not item: continue if key in item: exists = True break self.is_process_running = exists
def processAll(self): Log.debug("processAll()") docArray = UserChangeBillingType.objects(startDate__lte=date.today(), isUpdated__ne=True) Log.info("{0} record(s) found".format(len(docArray))) for doc in docArray: self.updateOne(doc)
def post(self, url, data): """ http post """ Log.debug("post data :" + str(data)) response = self.session.post(url, data=data) if response.status_code == 200: self.session.headers['Referer'] = response.url return response
def __del__(self): """ Service destructor. Joins worker threads. """ Log.debug(self.name) if self.__run == True: self.stop()
def __init__(self, name): #@todo: to be defined. #:name: @todo. self.name = name log = Log() log.debug(self, 'f**k')
class PendulumEvalCallback(EnvEvalCallback): # i.e. performance is not adequate when there is a 30% degradation wrt reward threshold def __init__(self, reward_threshold=-145.0, unacceptable_pct_degradation=30.0): self.reward_threshold = reward_threshold self.unacceptable_pct_degradation = unacceptable_pct_degradation self.logger = Log("PendulumEvalCallback") def evaluate_env(self, model, env, n_eval_episodes, sb_version="sb2") -> Tuple[bool, Dict]: if sb_version == "sb2": mean_reward, std_reward = evaluate_policy( model, env, n_eval_episodes=n_eval_episodes, render=False, deterministic=True) elif sb_version == "sb3": mean_reward, std_reward = evaluate_policy_sb3( model, env, n_eval_episodes=n_eval_episodes, render=False, deterministic=True) else: raise NotImplemented( "sb_version can be either sb2 or sb3. Found: {}".format( sb_version)) percentage_drop = (abs(100.0 - (100.0 * mean_reward) / self.reward_threshold) if mean_reward < self.reward_threshold else 0.0) self.logger.debug( "Mean reward: {}, Std reward: {}, Percentage drop: {}".format( mean_reward, std_reward, percentage_drop)) adequate_performance = mean_reward > (self.reward_threshold - abs( (self.reward_threshold * self.unacceptable_pct_degradation / 100.0))) info = dict() info["mean_reward"] = mean_reward info["std_reward"] = std_reward info["percentage_drop"] = percentage_drop # release resources env.close() return adequate_performance, info def get_reward_threshold(self) -> float: return self.reward_threshold
def scrub_anonymous(gr): inv_adj = gr._inverse_adjacency() to_remove = [] for unq in gr.uniques: if len(inv_adj[unq]) == 0: Log.debug("Removing:", unq) to_remove.append(unq) for remove in to_remove: gr.remove(remove)
def freeTrial(self, startDate, endDate, activeOnly=True): Log.debug("freeTrial() : {0} - {1}".format(startDate, endDate)) userList = self.getUserList("Free trial") Log.info("{0} user(s) found".format(len(userList))) for user in userList: if (not activeOnly) or (activeOnly and user.isActive == True): self.createUserInvoice(user.id, int(startDate.strftime("%Y%m")), 0.0)
def run(self): """ Start writing the file from the buffers in this thread. """ self._is_writing_interrupted = False # copy back buffer buf_before = self.buf_before.get_copy() self._buf_before_length = len(buf_before) / self._bytes_per_second self._file.writeframes("".join(buf_before)) buf_before = None self._time_written = self._buf_before_length Log.debug( self._tag, "Writen %.2f seconds from before the hotword" % self._buf_before_length) # copy forward buffer while True: if self._is_writing_interrupted: Log.debug(self._tag, "Interrupt detected") break bytes = self.buf_after.get() self._file.writeframes("".join(bytes)) additional_time_written = (len(bytes) / self._bytes_per_second) Log.debug(self._tag, "Written %.2f seconds" % additional_time_written) self._time_written += additional_time_written if self.buf_after.capture_stopped() and self.buf_after.length( ) == 0: break time.sleep(3) self._file.close() if self._is_writing_interrupted and self._delete_active_recording: try: os.remove(self.filepath) Log.debug( self._tag, "Writing of %s interrupted after %.2f seconds of audio so file was deleted" % (self.filename, self._time_written)) except OSError: Log.error( self._tag, "Writing of %s interrupted after %.2f seconds of audio, but COULDNT DELETE" % (self.filename, self._time_written)) else: Log.debug( self._tag, "Written %.2f seconds of audio in %s" % (self._time_written, self.filename)) self.clean_up = True
def createLastMonth(self): Log.debug("createLastMonth()") first_day_of_current_month = date.today().replace(day=1) last_day_of_previous_month = first_day_of_current_month - timedelta( days=1) first_day_of_previous_month = last_day_of_previous_month.replace(day=1) self.freeTrial(first_day_of_previous_month, last_day_of_previous_month) self.monthly(first_day_of_previous_month, last_day_of_previous_month) self.metered(first_day_of_previous_month, last_day_of_previous_month)
def stack_ide_loadtargets(project_path, package): Log.debug("Requesting load targets for ", package) proc = subprocess.Popen(["stack", "ide", "load-targets", package], stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=project_path, env=env, universal_newlines=True, creationflags=CREATE_NO_WINDOW) outs, errs = proc.communicate() # TODO: check response! return outs.splitlines()
class BufferExecutionsSkipped: def __init__(self, save_dir: str = None): assert save_dir, "save_dir should have a value: {}".format(save_dir) self.save_dir = save_dir self.executions_skipped: List[ExecutionSkipped] = [] self.logger = Log("BufferEnvPredicatePairs") def append(self, execution_skipped: ExecutionSkipped): if not self.is_already_evaluated(other_execution_skipped=execution_skipped): self.executions_skipped.append(execution_skipped) def is_already_evaluated(self, other_execution_skipped: ExecutionSkipped) -> bool: for execution_skipped in self.executions_skipped: evaluated_env_variables = execution_skipped.env_predicate_pair_skipped.get_env_variables() if evaluated_env_variables.is_equal(other_execution_skipped.env_predicate_pair_skipped.get_env_variables()): self.logger.debug( "Env {} was already evaluated".format( other_execution_skipped.env_predicate_pair_skipped.get_env_variables().get_params_string() ) ) return True return False def save(self, current_iteration: int) -> None: if len(self.executions_skipped) > 0: self.logger.debug("Saving buffer of env executions skipped at iteration {}".format(current_iteration)) filename = self.save_dir + "/executions_skipped_" + str(current_iteration) + ".txt" with open(filename, "w+", encoding="utf-8") as f: executions_skipped_by_search_component = dict() for execution_skipped in self.executions_skipped: if execution_skipped.search_component not in executions_skipped_by_search_component: executions_skipped_by_search_component[execution_skipped.search_component] = 0 executions_skipped_by_search_component[execution_skipped.search_component] += 1 if execution_skipped.env_predicate_pair_executed.is_predicate(): f.write( "env {} dominated by True env {} in {} \n".format( execution_skipped.env_predicate_pair_skipped.get_env_variables().get_params_string(), execution_skipped.env_predicate_pair_executed.get_env_variables().get_params_string(), execution_skipped.search_component, ) ) elif not execution_skipped.env_predicate_pair_executed.is_predicate(): f.write( "env {} dominates False env {} in {} \n".format( execution_skipped.env_predicate_pair_skipped.get_env_variables().get_params_string(), execution_skipped.env_predicate_pair_executed.get_env_variables().get_params_string(), execution_skipped.search_component, ) ) f.write("\n") for key in executions_skipped_by_search_component.keys(): f.write("executions skipped by {}: {} \n".format(key, executions_skipped_by_search_component[key]))
def monthly(self, startDate, endDate, activeOnly=True): Log.debug("monthly() : {0} - {1}".format(startDate, endDate)) price = self.findPrice("Monthly", startDate) Log.info("price:" + str(price)) userList = self.getUserList("Monthly") Log.info("{0} user(s) found".format(len(userList))) for user in userList: if (not activeOnly) or (activeOnly and user.isActive == True): self.createUserInvoice(user.id, int(startDate.strftime("%Y%m")), price)
def get_sms_verify_code(self): """获取短信验证码""" response = self.browser.post(self.send_code_url, "") data = json.loads(response.text) Log.debug(response.text) if data["msg"] == "OK." and data["code"] == 200: Log.info("获取验证码成功") return raw_input("输入短信验证码: ") elif data["msg"] == "短信发送太频繁" and data["code"] == 812: Log.exit(data["msg"]) else: Log.error(data["msg"]) return None
def updateOne(self, doc): Log.debug("updateOne()") Log.debug("user id:{0}, billing type id:{1}".format( doc.userId, doc.billingType)) try: User.objects(id=ObjectId(doc.userId)).update( billingType=doc.billingType) doc.isUpdated = True doc.updatedAt = datetime.now doc.save() except: Log.error(traceback.format_exc())
def _handle_welcome(self, welcome): """ Identifies if we support the current version of the stack ide api """ expected_version = (0,1,1) version_got = tuple(welcome) if type(welcome) is list else welcome if expected_version > version_got: Log.error("Old stack-ide protocol:", version_got, '\n', 'Want version:', expected_version) complain("wrong-stack-ide-version", "Please upgrade stack-ide to a newer version.") elif expected_version < version_got: Log.warning("stack-ide protocol may have changed:", version_got) else: Log.debug("stack-ide protocol version:", version_got)
class Controller: def __init__(self): self.view_student = None self.finded_students = None self.logger = Log().get_logger() self.logger.debug("Controller is starting") self.logger.debug("Controller start database") self._database = DataBase(self.logger) self.logger.debug("Controller start view") self._view = View(self.logger, self.student_data_store, self.majeur_data_store, self.nearest_data_store, self.notebook_data_store) self._view.connect('button-show-add-student', self.show_add_student_win) #'button-clear' self._view.connect('destroy', Gtk.main_quit) self._view.show_all() def __del__(self): self.logger.debug("Controlleris deleted") self.newClass = None self.database = None self.menu = None
class Server: def __init__(self, serialNumber): self.listSensor = dict() self.log = Log() if (DEBUG): self.log.info("Run in debug mode") self.serial = serial.Serial() self.serial.baudrate = 115200 self.serial.port = '/dev/pts/' + str(serialNumber) def start(self): self.log.info("Start server") self.serial.open() def listen(self): line = self._readSerial() while (line): matchRegex = re.match("\[DATA\] ([0-9\.]*) - ([0-9]{1,2})", line) if (matchRegex and len(matchRegex.groups()) == 2): address, value = matchRegex.groups() self.log.info("Get value " + str(value) + " from " + str(address)) if (address not in self.listSensor): self.listSensor[address] = Sensor(address) self.listSensor[address].addValue(int(value)) openValve = self.listSensor[address].checkIfValveMustBeOpen() if (openValve != None and openValve): self._writeSerial(address) line = self._readSerial() def stop(self): self.log.info("Stopping server") self.serial.close() def _readSerial(self): line = str(self.serial.readline().decode('ascii')).strip() # if(DEBUG): # print("[DEBUG] Read line: " + line) return line def _writeSerial(self, message): self.log.debug("Send data: " + str(message)) self.serial.write((str(message).strip() + '\n').encode('ascii'))
def createUserInvoice(self, userId, month, amount): Log.debug("createUserInvoice()") Log.debug("user id:{0}, month:{1}, amount:{2}".format( userId, month, amount)) try: UserInvoice.objects(userId=userId, month=month).update( set__userId=userId, set__month=month, set__invoiceAmount=amount, set__createdAt=self.curTime, upsert=True) except: Log.error(traceback.format_exc())
def stack_ide_start(project_path, package, response_handler): """ Start up a stack-ide subprocess for the window, and a thread to consume its stdout. """ Log.debug("Calling stack ide start with PATH:", env['PATH'] if env else os.environ['PATH']) process = subprocess.Popen(["stack", "ide", "start", package], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=project_path, env=env, creationflags=CREATE_NO_WINDOW ) return JsonProcessBackend(process, response_handler)
def apply_binary_simplification(gr, sym): op = gr._adj[sym] op_type = graph.get_opname(op) args = graph.get_args(op) types = gr._types(args) this_op = gr._op_table[op_type][types] Log.debug("simplifying :", sym) Log.debug(op) if 'zero' in this_op.keys(): identities = this_op['zero'] Log.warn(identities, args) for n, (ident, arg) in enumerate(zip(identities, args)): if ident is None: continue if arg == ident(*args): Log.debug("Shortcut: {} <- {}".format(sym, args[n])) gr.replace(sym, args[n]) if 'identity' in this_op.keys(): identities = this_op['identity'] for n, (ident, arg) in enumerate(zip(identities, args)): if ident is None: continue if arg == ident(*args): Log.debug("Shortcut: {} <- {}".format(sym, args[int(not n)])) gr.replace(sym, args[int(not n)])
def execute_train( agent: AbstractAgent, current_iteration: int, search_suffix: str, current_env_variables: EnvVariables, _start_time: float, random_search: bool = False, ) -> Tuple[EnvPredicatePair, float, float]: env_predicate_pairs = [] communication_queue = Queue() logger = Log("execute_train") # agent.train sets seed globally (for tf, np and random) seed = np.random.randint(2 ** 32 - 1) # order of argument matters in the args param; must match the order of args in the train method of agent thread = threading.Thread( target=agent.train, args=(seed, communication_queue, current_iteration, search_suffix, current_env_variables, random_search,), ) thread.start() sum_training_time = 0.0 sum_regression_time = 0.0 while True: data: ExecutionResult = communication_queue.get() # blocking code logger.debug( "Env: {}, evaluates to {}".format(current_env_variables.get_params_string(), data.is_adequate_performance(),) ) logger.debug("Info: {}".format(data.get_info())) env_predicate_pairs.append( EnvPredicatePair( env_variables=current_env_variables, predicate=data.is_adequate_performance(), regression=data.is_regression(), execution_info=data.get_info(), model_dirs=[search_suffix], ) ) sum_regression_time += data.get_regression_time() sum_training_time += data.get_training_time() if data.is_task_completed(): break while thread.is_alive(): time.sleep(1.0) logger.info("TIME ELAPSED: {}".format(str(datetime.timedelta(seconds=(time.time() - _start_time))))) return env_predicate_pairs[-1], sum_training_time, sum_regression_time
def read_stderr(self): """ Reads any errors from the stack-ide process. """ while self._process.poll() is None: try: error = self._process.stderr.readline().decode('UTF-8') if len(error) > 0: Log.warning("Stack-IDE error: ", error) except: Log.error("Stack-IDE stderr process ending due to exception: ", sys.exc_info()) return Log.debug("Stack-IDE stderr process ended.")
def send_request(self, request): try: Log.debug("Sending request: ", request) encodedString = json.JSONEncoder().encode(request) + "\n" self._process.stdin.write(bytes(encodedString, 'UTF-8')) self._process.stdin.flush() except BrokenPipeError as e: Log.error("stack-ide unexpectedly died:",e) # self.die() # Ideally we would like to die(), so that, if the error is transient, # we attempt to reconnect on the next check_windows() call. The problem # is that the stack-ide (ide-backend, actually) is not cleaning up those # session.* directories and they would keep accumulating, one per second! # So instead we do: self.is_active = False
def render_GET(self, request): try: if 'msg' not in request.args: return bjson.dumps({'error': 'No msg'}) msg = bjson.loads(request.args['msg'][0]) except ValueError as e: print e.message else: Log.debug("handling " + str(msg)) if not self._check_credentials(msg): return bjson.dumps({'error': "Invalid credentials"}) if 'method' in msg: if msg['method'] not in self.handlers: return bjson.dumps({'error': 'No handler for method: ' + msg['method']}) else: return self.handlers[msg['method']](self, msg) else: return bjson.dumps({'error': "No method"})
class Process(object): """ class process use to deal with process use to get handles for the process """ def __init__(self, name='explorer.exe'): """ take by default explorer.exe argument it can be process process name """ self.log = Log() self.name = name self.pid = self.get_process_id() def get_process_id(self): """ function returns the process id of the process """ for proc in psutil.process_iter(): if proc.name == self.name: return proc.pid def log_process_handles(self): """ function to get the list of the file handles open by process """ res = [] proc = psutil.Process(self.pid) # or PID of process for fh in proc.get_open_files(): if self.check_escape_dir(fh.path): continue if fh.path not in res: res.append(fh.path) if res: self.log.debug("File handle path open by process:") for path in res: self.log.debug(str(path)) return res def check_escape_dir(self, path): """ filter the system files handles from process accept path as input and check in ESCAPES_DIRS if present then return True else False """ for edir in ESCAPE_DIRS: if edir in path: return True return False
class CapfileWorker(threading.Thread): def __init__(self, worker_id, decode_work_queue): threading.Thread.__init__(self) self.decode_work_queue = decode_work_queue self.worker_id = worker_id self._stop = threading.Event() self.logger = Log().getLogger() self.logger.debug("CapfileWorker(worker_id:%s, decode_work_queue:%s)." % (worker_id, decode_work_queue)) def run(self): self.logger.debug("Thread started.") while not self._stop.isSet(): try: filename = self.decode_work_queue.get() if (filename is None): self.logger.debug("Became None element, put a None element to the queue for the others workers and break the work.") self.decode_work_queue.put(None) break self.process(filename) finally: self.decode_work_queue.task_done() self._stop.set() self.logger.debug("Thread stopped.") def process(self, filename): self.logger.info("Decode process started (worker_id:%s, file:'%s')" % (self.worker_id,filename)) g711 = G711Decoder(filename, mix=1, linearize=1) PcapParser(filename, g711.decode).parse() g711.finalize() self.logger.info("Decode process finished (worker_id:%s, file:'%s')" % (self.worker_id,filename)) def stop (self): self.logger.debug("Received signal to stop the thread.") self._stop.set() self.decode_work_queue.put(None) def stopped (self): return self._stop.isSet()
def handle_response(self, data): """ Handles JSON responses from the backend """ Log.debug("Got response: ", data) tag = data.get("tag") contents = data.get("contents") seq_id = data.get("seq") if seq_id is not None: self._send_to_handler(contents, seq_id) elif tag == "ResponseWelcome": self._handle_welcome(contents) elif tag == "ResponseUpdateSession": self._handle_update_session(contents) elif tag == "ResponseShutdownSession": Log.debug("Stack-ide process has shut down") elif tag == "ResponseLog": Log.debug(contents.rstrip()) else: Log.normal("Unhandled response: ", data)
class Tracer(threading.Thread): def __init__(self, url, filename): self.url = url self.i = 0 self.filename = filename threading.Thread.__init__(self) self.logger = Log().getLogger() def monitor(self, n1, n2, n3): # n1: running number, n2: chunk size, n3: file size or -1 if unknown #print ["|", "/", "-", "\\"][self.i], "\r", self.i += 1 self.i %= 4 def run(self): if platform.system() == "Windows": w32 = ctypes.windll.kernel32 tid = w32.GetCurrentThreadId() # Raise thread priority handle = w32.OpenThread(THREAD_SET_INFORMATION, False, tid) result = w32.SetThreadPriority(handle, THREAD_PRIORITY_ABOVE_NORMAL) w32.CloseHandle(handle) try: self.logger.debug("Trace started (url:'%s', filename:'%s')" % (self.url, self.filename)) urllib.urlretrieve(self.url, self.filename, self.monitor) self.logger.debug("Trace finished (url:'%s', filename:'%s')" % (self.url, self.filename)) except: self.logger.debug("Could not open Trace (url:'%s', filename:'%s')" % (self.url, self.filename))
def main(): # If all else fails, socket.setdefaulttimeout(60) sys.stderr.write("Griffonbot starting up...\n") log = Log(config.log) log.debug("Main: Setting up...") bot = IRCBot(config.irc, log) if config.twitter.enable: stream = Stream(config.twitter, bot.queue_message, log) if config.mail.enable: mail = Mail(config.mail, bot.queue_message, log) if config.stdin.enable: stdin = Stdin(config.stdin, bot.queue_message, log) log.info("Main: Starting...") bot.start() if config.twitter.enable: stream.start() if config.mail.enable: mail.start() if config.stdin.enable: stdin.start() log.debug("Main: Now waiting...") wait() log.notice("Dead: Exiting...") sys.exit(1)
class Analyzer: def __init__(self): self.log = Log() self.log.debug("Storage account for analyzer: {0}".format(config.AZURE_STORAGE_ACCOUNT_NAME)) self.msgQueue = Queue(account_name = config.AZURE_STORAGE_ACCOUNT_NAME, account_key=config.AZURE_STORAGE_ACCOUNT_KEY, queue_name=config.AZURE_STORAGE_QUEUE_NAME) self.summary = SummaryTable(config.AZURE_STORAGE_ACCOUNT_NAME, config.AZURE_STORAGE_ACCOUNT_KEY, config.AZURE_STORAGE_SUMMARY_TABLE_NAME) self.keep_running = bool(config.ANALYZER_KEEP_RUNNING) self.log.debug("Analyzer keep running: {0}".format(self.keep_running)) if self.keep_running: self.sleep_time = float(config.ANALYZER_SLEEP_TIME) self.log.debug("Sleep time between analyses: {0}".format(self.sleep_time)) def incrementCount(self, event_type): count = self.summary.getCount(event_type) count = count + 1 self.summary.updateCount(event_type, count) self.log.info(event_type + " count is now " + str(count)) def processEvent(self, message): msg = message.message_text split = msg.find(" - ") if (not split): event_type = "OTHER" else: event_type = msg[:split] # Sleep to simulated a longer running process time.sleep(self.sleep_time) self.incrementCount(event_type) def fullAnalysis(self): hostname = socket.gethostname() msg = hostname + ': Analyzing log event queue' notify.info(msg) while True: events = self.msgQueue.dequeue() if len(events) > 0: for event in events: self.log.info("Dequeued: " + event.message_text) try: self.processEvent(event) self.msgQueue.delete(event) self.log.info("Counted and deleted: " + event.message_text) except: e = sys.exc_info()[0] self.log.error("Could not process: " + event.message_text + " because %s" % e) traceback.print_exc(file=sys.stdout) else: break time.sleep(self.sleep_time)
def simulate(): log = Log() hostname = socket.gethostname() msgQueue = Queue(account_name = config.AZURE_STORAGE_ACCOUNT_NAME, account_key=config.AZURE_STORAGE_ACCOUNT_KEY, queue_name=config.AZURE_STORAGE_QUEUE_NAME) if int(config.SIMULATION_ACTIONS) > 0: msg = hostname + ': Attempting to simulate ' + str(config.SIMULATION_ACTIONS) + ' actions' log.debug(msg) notify.info(msg) else: msg = hostname + ': Simulating until stopped' log.debug(msg) notify.info(msg) temp = 70; _actions = 0 while int(config.SIMULATION_ACTIONS) == 0 or int(config.SIMULATION_ACTIONS) - _actions > 0: change = random.randint(-1, 1) if temp <= _too_cold: change = 1 elif temp >= _too_hot: change = -1 msgQueue.enqueue("Change since last reading: " + str(change), level="INFO") temp = temp + change msgQueue.enqueue("Current temperature: " + str(temp), level="INFO") if temp == _just_right: msgQueue.enqueue("That's perfect", level="INFO") elif temp < _just_right and temp > _too_cold: msgQueue.enqueue('Getting a little chilly', level="WARNING") elif temp > _just_right and temp < _too_hot: msgQueue.enqueue('Getting a touch warm', level="WARNING") elif temp <= _too_cold: msgQueue.enqueue('Too cold, how did this happen?', level="ERROR") elif temp >= _too_hot: msgQueue.enqueue('Too hot, how did this happen?', level="ERROR") else: msgQueue.enqueue('Can''t tell if it''s hot or cold', level="ERROR") msgQueue.close() _actions = _actions + 1 time.sleep(int(config.SIMULATION_DELAY)) msg = hostname + ": Simulated " + str(config.SIMULATION_ACTIONS) + " actions and added them to the queue" log.debug(msg) notify.info(msg)
class G711Decoder: # Packet lengths we recognize lenmap = [ # PacketLength, AudioChunkLength, Offset of audio data, descriptive text {'len': 306, 'chunk': 240, 'offs': 66, 'encap' : 'VDSL' }, # VDSL {'len': 226, 'chunk': 160, 'offs': 66, 'encap' : 'VDSL' }, # VDSL # VDSL ComfortNoise ?? not seen yet {'len': 232, 'chunk': 160, 'offs': 72, 'encap' : 'DSLPPPoE' }, # DSL (PPPoE) {'len': 312, 'chunk': 240, 'offs': 72, 'encap' : 'DSLPPPoE' }, # DSL (PPPoE) {'len': 73, 'chunk': 0, 'offs': 72, 'encap' : 'DSLPPPoE' }, # DSL (PPPoE) ComfortNoise {'len': 214, 'chunk': 160, 'offs': 54, 'encap' : 'DSLETH' }, # DSL (ETH) {'len': 294, 'chunk': 240, 'offs': 54, 'encap' : 'DSLETH' }, # DSL (ETH) {'len': 55, 'chunk': 0, 'offs': 54, 'encap' : 'DSLETH' }, # DSL (ETH) ComfortNoise ] # Table lookup G.711 A -> PCM 16 alaw2linear = array.array('h', [ -5504, -5248, -6016, -5760, -4480, -4224, -4992, -4736, -7552, -7296, -8064, -7808, -6528, -6272, -7040, -6784, -2752, -2624, -3008, -2880, -2240, -2112, -2496, -2368, -3776, -3648, -4032, -3904, -3264, -3136, -3520, -3392, -22016,-20992,-24064,-23040,-17920,-16896,-19968,-18944, -30208,-29184,-32256,-31232,-26112,-25088,-28160,-27136, -11008,-10496,-12032,-11520, -8960, -8448, -9984, -9472, -15104,-14592,-16128,-15616,-13056,-12544,-14080,-13568, -344, -328, -376, -360, -280, -264, -312, -296, -472, -456, -504, -488, -408, -392, -440, -424, -88, -72, -120, -104, -24, -8, -56, -40, -216, -200, -248, -232, -152, -136, -184, -168, -1376, -1312, -1504, -1440, -1120, -1056, -1248, -1184, -1888, -1824, -2016, -1952, -1632, -1568, -1760, -1696, -688, -656, -752, -720, -560, -528, -624, -592, -944, -912, -1008, -976, -816, -784, -880, -848, 5504, 5248, 6016, 5760, 4480, 4224, 4992, 4736, 7552, 7296, 8064, 7808, 6528, 6272, 7040, 6784, 2752, 2624, 3008, 2880, 2240, 2112, 2496, 2368, 3776, 3648, 4032, 3904, 3264, 3136, 3520, 3392, 22016, 20992, 24064, 23040, 17920, 16896, 19968, 18944, 30208, 29184, 32256, 31232, 26112, 25088, 28160, 27136, 11008, 10496, 12032, 11520, 8960, 8448, 9984, 9472, 15104, 14592, 16128, 15616, 13056, 12544, 14080, 13568, 344, 328, 376, 360, 280, 264, 312, 296, 472, 456, 504, 488, 408, 392, 440, 424, 88, 72, 120, 104, 24, 8, 56, 40, 216, 200, 248, 232, 152, 136, 184, 168, 1376, 1312, 1504, 1440, 1120, 1056, 1248, 1184, 1888, 1824, 2016, 1952, 1632, 1568, 1760, 1696, 688, 656, 752, 720, 560, 528, 624, 592, 944, 912, 1008, 976, 816, 784, 880, 848 ]) # Table lookup G.711 A -> PCM 16 ulaw2linear = array.array('h', [ -32124,-31100,-30076,-29052,-28028,-27004,-25980,-24956, -23932,-22908,-21884,-20860,-19836,-18812,-17788,-16764, -15996,-15484,-14972,-14460,-13948,-13436,-12924,-12412, -11900,-11388,-10876,-10364, -9852, -9340, -8828, -8316, -7932, -7676, -7420, -7164, -6908, -6652, -6396, -6140, -5884, -5628, -5372, -5116, -4860, -4604, -4348, -4092, -3900, -3772, -3644, -3516, -3388, -3260, -3132, -3004, -2876, -2748, -2620, -2492, -2364, -2236, -2108, -1980, -1884, -1820, -1756, -1692, -1628, -1564, -1500, -1436, -1372, -1308, -1244, -1180, -1116, -1052, -988, -924, -876, -844, -812, -780, -748, -716, -684, -652, -620, -588, -556, -524, -492, -460, -428, -396, -372, -356, -340, -324, -308, -292, -276, -260, -244, -228, -212, -196, -180, -164, -148, -132, -120, -112, -104, -96, -88, -80, -72, -64, -56, -48, -40, -32, -24, -16, -8, 0, 32124, 31100, 30076, 29052, 28028, 27004, 25980, 24956, 23932, 22908, 21884, 20860, 19836, 18812, 17788, 16764, 15996, 15484, 14972, 14460, 13948, 13436, 12924, 12412, 11900, 11388, 10876, 10364, 9852, 9340, 8828, 8316, 7932, 7676, 7420, 7164, 6908, 6652, 6396, 6140, 5884, 5628, 5372, 5116, 4860, 4604, 4348, 4092, 3900, 3772, 3644, 3516, 3388, 3260, 3132, 3004, 2876, 2748, 2620, 2492, 2364, 2236, 2108, 1980, 1884, 1820, 1756, 1692, 1628, 1564, 1500, 1436, 1372, 1308, 1244, 1180, 1116, 1052, 988, 924, 876, 844, 812, 780, 748, 716, 684, 652, 620, 588, 556, 524, 492, 460, 428, 396, 372, 356, 340, 324, 308, 292, 276, 260, 244, 228, 212, 196, 180, 164, 148, 132, 120, 112, 104, 96, 88, 80, 72, 64, 56, 48, 40, 32, 24, 16, 8, 0 ]) ''' Constructor ''' def __init__(self, capfile, mix=1, linearize=1): self.mix = mix self.file = capfile.split('.')[0] self.linearize = linearize # Stream descriptor array self.sda = [] self.logger = Log().getLogger() ''' Write RIFF header ''' def write_RIFF_header(self, sd): sd['fo'].seek(0,0) # Write RIFF header sd['fo'].write(struct.pack("<BBBBLBBBBBBBBBBBBHHLLHHBBBBBBBBBBLBBBBL", 0x52, 0x49, 0x46, 0x46, # 'RIFF' (sd['blockalign'] * sd['nr_samples']) + 50, # total file size (data + sizeof(waveheader) - 8) 0x57, 0x41, 0x56, 0x45, 0x66, 0x6d, 0x74, 0x20, # 'WAVEfmt ' 0x12, 0x00, 0x00, 0x00, # chunk data size sd['format'], # format tag sd['channels'], # number of channels sd['samplerate'], # sample rate in Hz = 8000 sd['samplerate'] * sd['blockalign'], # avg bytes per second equals to sample rate for 8 bit, twice sample rate for 16 bit sd['blockalign'], # blockalign sd['bitspersample'], # significant bits per sample 0x00, 0x00, # extra format bytes 0x66, 0x61, 0x63, 0x74, # 'fact' 0x04, 0x00, 0x00, 0x00, # chunk data size sd['nr_samples'], # fact chunk, unsigned long #samples, offset 46 0x64, 0x61, 0x74, 0x61, # 'data' sd['blockalign'] * sd['nr_samples'], # data chunk, unsigned long #samples in byte, offset 54 ) ) ''' Find the SSI in our stream descriptor array. If not found, allocate a new entry ''' def find_sd_slot(self, ssi): # Search SSI. Slot already allocated? for i in range(len(self.sda)): if self.sda[i]['ssi'] == ssi: return self.sda[i] # Not found, allocate a new slot i = len(self.sda) self.sda.append({ 'ssi' : ssi, 'index' : i, 'errors' : 0, 'nr_samples': 0, }) return self.sda[i] ''' Decoder, capable of extracting and mixing G.711 audio streams from a packet capture file ''' def decode(self, time_sec, time_usec, packet, len): # This is a simple parser. The payload is not fully decoded. Instead some assumptions based upon the # length of the captured package are checked for candidate in self.lenmap: if len == candidate['len']: # Read 12 byte RTP header data in front of the audio payload and analyze it # Important: SSI (Synchronization Source Identifier), sequence number, payload type and timestamp (streamsetup, payloadtype, seqnr, timestamp, ssi) = struct.unpack('>BBHLL', packet[candidate['offs']-12 : candidate['offs']]) # Probably we need some more assertions here, but up to now it worked well if payloadtype not in [8, 0, 13]: self.logger.debug('Unsupported payload type %s' % payloadtype) return # Find the SSI of current stream in stream descriptor array. If not contained, allocate a slot and add it sd = self.find_sd_slot(ssi) # Set time of first appearance of this particular stream if not sd.has_key('first_seen_at'): sd['first_seen_at'] = datetime.datetime.fromtimestamp(time_sec) + datetime.timedelta(microseconds = time_usec) # Take over ip source/destination information (src_ip, dst_ip) = struct.unpack('>LL', packet[candidate['offs']-28 : candidate['offs']-20]) (src_port, dst_port) = struct.unpack('>HH', packet[candidate['offs']-20 : candidate['offs']-16]) sd['source'] = str(src_ip) + str(src_port) sd['destination'] = str(dst_ip) + str(dst_port) sd['payloadtype'] = payloadtype sd['chunksize'] = candidate['chunk'] sd['channels'] = 1 # Mono in any case sd['samplerate'] = 8000 # 8kHz if self.linearize: sd['format'] = 1 # PCM sd['bitspersample'] = 16 # 16 bit else: sd['format'] = 6 # CCITT G.711 A default if sd['payloadtype'] == 0: sd['format'] = 7 # CCITT G.711 u sd['bitspersample'] = 8 # 8 bit sd['blockalign'] = sd['bitspersample'] / 8 * sd['channels'] # Open target file for stream sd['fo'] = open("%s_%d_.wav" % (self.file, sd['index']), 'wb+') # Reserve place for RIFF header self.write_RIFF_header(sd) # Do we already have received a sequence number? If yes, check whether we are in sequence if sd.has_key('expected') and seqnr != sd['expected']: sd['errors'] += 1 lost = abs(sd['expected']-seqnr) else: lost = 0 # Advance sequence number sd['expected'] = (seqnr + 1) & 0xFFFF # Comfort Noise? if payloadtype == 13: if not sd.has_key('ts_cn'): sd['ts_cn'] = timestamp # Indicates the number of samples return # Just react on the first seen CN packet else: # Was there a CN sequence before? if sd.has_key('ts_cn'): pad = array.array('B', (0 for i in range(sd['blockalign']*(timestamp - sd['ts_cn'])))) # Silence sd['fo'].write(pad) sd['nr_samples'] += timestamp - sd['ts_cn'] del sd['ts_cn'] # Something lost? Pad with silence while lost > 0: pad = array.array('B', (0 for i in range(sd['blockalign']*candidate['chunk']))) sd['fo'].write(pad) sd['nr_samples'] += candidate['chunk'] lost -= 1 # Write the audio out off the packet, this is LAME!!! start = candidate['offs'] end = candidate['offs']+candidate['chunk'] # This takes a lot of time... if payloadtype == 8: if self.linearize: audio = array.array('h', (self.alaw2linear[ord(packet[i])] for i in range(start,end))) else: audio = array.array('B', (ord(packet[i]) for i in range(start,end))) else: if self.linearize: audio = array.array('h', (self.ulaw2linear[ord(packet[i])] for i in range(start,end))) else: audio = array.array('B', (ord(packet[i]) for i in range(start,end))) sd['fo'].write(audio) sd['nr_samples'] += candidate['chunk'] return ''' Finalize the extracted audio wav files ''' def finalize(self): sdfound = [] # Finalize wave file, patch header for sd in self.sda: # Finalize RIFF header self.write_RIFF_header(sd) if self.mix: # Find associated streams to mix for sdother in self.sda: if sd['source'] == sdother['destination'] and sdother['source'] == sd['destination']: if sd['index'] not in sdfound and sdother['index'] not in sdfound: # Prevent second find sdfound.append(sd['index']) sdfound.append(sdother['index']) # Check: Input linearized already? if not self.linearize: self.logger.error("Can't mix streams that are not linearized") break self.logger.debug("Streams %d (ssi=%d) and %d (ssi=%d) are belonging together" % (sd["index"], sd['ssi'], sdother['index'], sdother['ssi'])) # Calculate deltaT between both streams timediff = abs(sd['first_seen_at']-sdother['first_seen_at']) if sd['first_seen_at'] <= sdother['first_seen_at']: leader = sd follower = sdother else: leader = sdother follower = sd # Calculate missing samples for follower and resulting total sample count for the mix (125 usec per sample at 8000 Hz) follower_samples_behind = int((timediff.seconds*1000000.0 + timediff.microseconds) / 125) total_samples = follower_samples_behind + max(leader['nr_samples'] - follower_samples_behind, follower['nr_samples']) self.logger.debug("Stream %d is %s h behind stream %d (delta=%d samples). Total samples: %d" % (follower['index'], timediff, leader['index'], follower_samples_behind, total_samples)) # Open mix file sdmix = {'channels':1, 'samplerate':8000, 'format':1, 'bitspersample':16, 'blockalign':2, 'nr_samples':total_samples} sdmix['fo'] = open("%s_mix_%d_%d.wav" % (self.file, leader['index'], follower['index']), 'wb+') self.write_RIFF_header(sdmix) # Seek both files to start of audio data (skip RIFF header) leader['fo'].seek(58, 0) follower['fo'].seek(58, 0) self.logger.debug("leader: %d, follower: %d, total: %d, follower_behind: %d" % (leader['nr_samples'], follower['nr_samples'], total_samples, follower_samples_behind)) # Write trailing overhead from leader first sdmix['fo'].write(leader['fo'].read(leader['blockalign']*follower_samples_behind)) total_samples_written = follower_samples_behind # Calculate the count of samples which have to be mixed from both sources mixed_cnt = min(leader['nr_samples'] - follower_samples_behind, follower['nr_samples']) self.logger.debug("Mixed samples count: %d" % mixed_cnt) # Mix it, save it for sample in range(0, mixed_cnt): # Read sample, -3 db leader_sample = float(struct.unpack("<h", leader['fo'].read(leader['blockalign']))[0]) * 0.707 follower_sample = float(struct.unpack("<h", follower['fo'].read(follower['blockalign']))[0]) * 0.707 # Add both samples sum = leader_sample + follower_sample # Clamp to prevent integer overflow if sum > 32767: sum = 32767 elif sum < - 32768: sum = -32768 # Write the sum. Would be faster to write it into an array here and flush that to disk afterwards # But the array could blow the memory, hence we write it as we have it sdmix['fo'].write(struct.pack("<h", sum)) total_samples_written += mixed_cnt # Handle the remaining rest, wherever a rest is leader_rest = leader['nr_samples'] - follower_samples_behind - mixed_cnt follower_rest = follower['nr_samples'] - mixed_cnt if leader_rest: tmp = leader['fo'].read(leader['blockalign']*leader_rest) sdmix['fo'].write(tmp) total_samples_written += leader_rest self.logger.debug("Rest taken from leader: %d" % leader_rest) else: if follower_rest: tmp = follower['fo'].read(follower['blockalign']*follower_rest) sdmix['fo'].write(tmp) total_samples_written += follower_rest self.logger.debug("Rest taken from follower: %d" % follower_rest) sdmix['fo'].close() duration_in_seconds = int((total_samples_written * 125) / 1000000.0) self.logger.debug("Total mixed samples written: %d (duration %s h)" % (total_samples_written, str(datetime.time(duration_in_seconds // 3600, (duration_in_seconds % 3600) // 60, duration_in_seconds % 60)))) # Close sd['fo'].close()
class Queue: def __init__(self, account_name, account_key, queue_name="logqueue"): """Initialiaze a queue. The type is set by the 'ACS_LOGGING_QUEUE_TYPE' environment variable. If it is set to 'AzureStorageQueue' then values must be provided for 'account_name' and 'account_key' which are values associated with the Azure Storage account. 'queue_name' is optional and defaults to 'logqueue'. """ self.log = Log() self.queue_type = config.ACS_LOGGING_QUEUE_TYPE self.queue_name = queue_name # self.log.debug("Queue type: " + self.queue_type + " / " + self.queue_name) if self.queue_type == "AzureStorageQueue": self.createAzureQueues(account_name, account_key) elif self.queue_type == "LocalFile": self.file_queue = open(config.UNPROCESSED_LOG_FILE, "w+") else: self.log.error("Unknown queue type: " + queue_type) def createAzureQueues(self, account_name, account_key): """ Create a queue for unprocessed log messages. Entries in the queue will be dequeued, processed and deleted upon success. """ self.queue_service = QueueService(account_name, account_key) self.queue_service.create_queue(self.queue_name) def close(self): """Perform any necessary clearnup on the queue at the end of a run. """ if self.queue_type == "AzureStorageQueue": pass elif self.queue_type == "LocalFile": self.file_queue.close() else: self.log.error("Unknown queue type: " + queue_type) def enqueue(self, msg, level="INFO"): msg = level + " - " + msg if self.queue_type == "LocalFile": file_queue.write(msg + "\n") elif self.queue_type == "AzureStorageQueue": self.queue_service.put_message(self.queue_name, msg) self.log.debug(msg) def dequeue(self): messages = [] if self.queue_type == "LocalFile": with open(config.UNPROCESSED_LOG_FILE, "r") as f: messages = f.readlines()[1] elif self.queue_type == "AzureStorageQueue": messages = self.queue_service.get_messages(self.queue_name) return messages def delete(self, message): self.queue_service.delete_message(self.queue_name, message.message_id, message.pop_receipt) # with open(config.PROCESSED_LOG_FILE, 'a') as processed: # processed.write(log) # os.remove(config.UNPROCESSED_LOG_FILE) def delete_queue(self, queue_name): self.queue_service.delete_queue(queue_name, False) def getLength(self): """ Get the approximate length of the queue """ queue_metadata = self.queue_service.get_queue_metadata(self.queue_name) count = queue_metadata["x-ms-approximate-messages-count"] return count
orig_img_path = './data/AaHeiTi/65E2.bmp' skel_img_path = './data/output/AaHeiTi_65E2_skeleton.pgm' report_path = './data/output/65E2 Branch information.csv' info = read_branch_info(report_path) dist_lst = [i.length for i in info] # !=== Calculate average_stroke_width = 面积S / 底边b, 即 NPB/NSP im = cv2.imread(orig_img_path, cv2.IMREAD_GRAYSCALE) NPB = np.count_nonzero(im) NSP = sum(dist_lst) average_stroke_width = round(float(NPB) / NSP, 3) wrong_stroke_factor = 0.433333333333 stroke_thresh = wrong_stroke_factor * average_stroke_width log.debug('w=S/b: %s=%s/%s' % (average_stroke_width, NPB, NSP)) log.debug('stroke_thresh:', stroke_thresh) # !=== Abandon branch shorter than stroke_thresh def drop_wrong_branch(branch): if branch.length > stroke_thresh: return True else: log.debug('dist=%s, %s droped' % (branch.dist, branch)) return False info3 = filter(drop_wrong_branch, info) log.debug('%s branches droped' % (len(info) - len(info3))) skel = Image.open(skel_img_path) log.debug('skel size:', skel.size)
class Drives(object): """ class to get the drive list. """ def __init__(self): """ init function will call get drives function and make list of all the device that are available and add it to Drives.drives which is static variable further require to computing. """ self.log = Log() Drives.drives = self.get_drives() def get_drives(self): """ function to drive list """ drives = [] bitmask = windll.kernel32.GetLogicalDrives() for letter in string.uppercase: if bitmask & 1: drives.append(letter) bitmask >>= 1 return drives def get_drive_stats(self, drive): """ function to get the drive information returns total_space, free_space, used space """ gb = float(1024 * 1024 * 1024) sec_per_cl, byte_per_cl, free_cl, tot_cl = \ win32file.GetDiskFreeSpace(drive + ":\\") total_space = tot_cl * sec_per_cl * byte_per_cl free_space = free_cl * sec_per_cl * byte_per_cl return total_space / gb, free_space / gb, (total_space - free_space) / gb def watch(self): """ watch function will ping system continuouly for hardware change if it found that there is any usb device attched then this will log that usb information """ tmpdrives = self.get_drives() if len(Drives.drives) != len(tmpdrives): self.log.debug("Detected hardware changes") for dri in Drives.drives: if dri not in tmpdrives: self.log.debug("Drive removed: %s\\" % dri) for dri in tmpdrives: if dri not in Drives.drives: self.log.debug("New drive attached to system: %s\\" % dri) to_sp, free_sp, use_sp = self.get_drive_stats(dri) self.log.debug("Disk Space Information:") self.log.debug("Total Space: %s Bytes" % (to_sp)) self.log.debug("Free Space : %s Bytes" % (free_sp)) self.log.debug("Used Space : %s Bytes" % (use_sp)) Drives.drives = tmpdrives
def __init__(self, io_loop=None, **kwargs): TCPServer.__init__(self, io_loop=io_loop, **kwargs) Log.debug('==========io_loop:{}.'.format(io_loop))
class Memory: """ Memory """ def __init__(self, gb): """ :type gb: gb.GB """ # Logger self.logger = Log() # Communication with other components self.gb = gb # State initialization self.tile_set_1_only = None # These will be initialized self.tile_set_shared = None # below, this is just to self.tile_set_0_only = None # have them declared in self.tile_maps = None # the __init__ method # Cartridge bank 0, so nothing to initialize: 0x3FFF - 0x0000 # Cartridge bank N, so nothing to initialize: 0x7FFF - 0x4000 self._generate_tile_set_memory() # VRAM sets: 0x97FF - 0x8000 self._generate_tile_map_memory() # VRAM maps: 0x9FFF - 0x9800 self.external_ram = self._generate_memory_map((0xBFFF - 0xA000 + 1) * 4) # Maximum RAM size, with 4 banks self.internal_ram = self._generate_memory_map( 0xDFFF - 0xC000 + 1) # Internal RAM echo, so nothing to initialize: 0xFDFF - 0xE000 self.oam = self._generate_memory_map( 0xFE9F - 0xFE00 + 1) # Empty, so nothing to initialize: 0xFEFF - 0xFEA0 self.io = self._generate_memory_map( 0xFF7F - 0xFF00 + 1) self.hram = self._generate_memory_map( 0xFFFE - 0xFF80 + 1) self.ie = 0x00 # Single address, no array: 0xFFFF self.cartridge: bytes = None self.boot_rom: bytes = None self.boot_rom_loaded = False self.mbc: MBC = None def _generate_tile_set_memory(self): """ GameBoy VRAM memory stores 2 tile sets. Each tile set contains 255 tiles (8x8 images). However, the GameBoy does not have enough memory for two full sets, so they share half (128) their tiles. This method generates the data structure to handle this. """ self.tile_set_1_only = [self._generate_blank_tile() for _ in range(128)] # 8000-87FF self.tile_set_shared = [self._generate_blank_tile() for _ in range(128)] # 8800-8FFF self.tile_set_0_only = [self._generate_blank_tile() for _ in range(128)] # 9000-97FF @staticmethod def _generate_blank_tile(): """ Creates an empty matrix, representing one tile (8x8 pixels) """ return [[0]*8 for _ in range(8)] def _generate_tile_map_memory(self): """ GameBoy VRAM memory stores 2 tile maps. Each tile map contains 32x32 tile IDs, and differently from the tile sets, there is enough memory for two separate maps. This method generates the data structure to handle this. """ self.tile_maps = [ [[0]*32 for _ in range(32)], # Tile 0 comes first, uses memory 9800-9BFF [[0]*32 for _ in range(32)] # Tile 1 comes later, uses memory 9C00-9FFF ] def load_cartridge(self, cartridge_data: bytes): """ Stores reference to cartridge data, to be accessed later. Also instantiates the MBC specified in cartridge. :param cartridge_data: Cartridge data as bytes """ self.cartridge = cartridge_data mbc_type = self.cartridge[0x0147] self.mbc = MBC(mbc_type) self.load_boot_rom() self.boot_rom_loaded = (self.boot_rom is not None) def _read(self, address: int): """ Read a byte from a location mapped in memory, wherever it is. :param address: Address to read :return: Value at specified address """ if address <= 0x3FFF: # 0x0000 - 0x3FFF: Cartridge bank 0 if 0x0000 <= address <= 0x00FF and self.boot_rom_loaded: return self.boot_rom[address] return self.cartridge[address] elif address <= 0x7FFF: # 0x4000 - 0x7FFF: Cartridge bank N return self.cartridge[self.mbc.cartridge_bank_offset() + (address-0x4000)] elif address <= 0x9FFF: # 0x8000 - 0x9FFF: Video RAM if address <= 0x97FF: # Tile sets memory return self._read_tile_set(address) else: # Tile maps memory return self._read_tile_map(address) elif address <= 0xBFFF: # 0xA000 - 0xBFFF: External RAM if self.mbc.external_ram_is_enabled: return self.external_ram[self.mbc.external_ram_bank_offset() + (address-0xA000)] else: return 0x00 # TODO: Is this the correct behavior? elif address <= 0xDFFF: # 0xC000 - 0xDFFF: Internal RAM return self.internal_ram[address - 0xC000] elif address <= 0xFDFF: # 0xE000 - 0xFDFF: Internal RAM Echo return self.internal_ram[address - 0xE000] elif address <= 0xFE9F: # 0xFE00 - 0xFE9F: Object Attribute Memory (OAM) return self.oam[address - 0xFE00] elif address <= 0xFEFF: # 0xFEA0 - 0xFEFF: Empty area return 0x00 elif address <= 0xFF7F: # 0xFF00 - 0xFF7F: I/O Memory return self.io[address - 0xFF00] elif address <= 0xFFFE: # 0xFF80 - 0xFFFE: High RAM return self.hram[address - 0xFF80] elif address == 0xFFFF: # 0xFFFF: Interrupts Enable Register (IE) return self.ie def _read_tile_set(self, address: int): tile_line, tile_line_byte_to_read = self._find_tile_set(address) byte = 0x00 for pixel_value in tile_line: # pixel_value is 0-3, and we want the binary value from the desired position byte = (byte << 1) | ((pixel_value >> tile_line_byte_to_read) & 0b00000001) return byte def _read_tile_map(self, address: int): tile_map_line, tile_map_pos_number = self._find_tile_map(address) return tile_map_line[tile_map_pos_number] def _write(self, address: int, value: int): """ Writes a byte to a location mapped in memory, wherever it is. :param address: Address where data will be written :param value: Data to write """ if address <= 0x1FFF: # 0x0000 - 0x1FFF: MBC - Enable/Disable external RAM self.mbc.change_external_ram_status(value) if address <= 0x3FFF: # 0x2000 - 0x3FFF: MBC - Change cartridge bank mapped to N self.mbc.change_cartridge_bank(value) if address <= 0x5FFF: # 0x4000 - 0x5FFF: MBC - Change external RAM bank mapped self.mbc.change_ram_bank(value) if address <= 0x7FFF: # 0x6000 - 0x7FFF: MBC - Change ROM/RAM Mode self.mbc.change_banking_mode(value) if address <= 0x9FFF: # 0x8000 - 0x9FFF: Video RAM if address <= 0x97FF: # Tile sets memory return self._write_tile_set(address, value) else: # Tile maps memory return self._write_tile_map(address, value) elif address <= 0xBFFF: # 0xA000 - 0xBFFF: External RAM if self.mbc.external_ram_is_enabled: self.external_ram[address - 0xA000] = value elif address <= 0xDFFF: # 0xC000 - 0xDFFF: Internal RAM self.internal_ram[address - 0xC000] = value elif address <= 0xFDFF: # 0xE000 - 0xFDFF: Internal RAM Echo self.internal_ram[address - 0xE000] = value elif address <= 0xFE9F: # 0xFE00 - 0xFE9F: Object Attribute Memory (OAM) self.oam[address - 0xFE00] = value elif address <= 0xFEFF: # 0xFEA0 - 0xFEFF: Empty area is empty, so nothing to do return elif address <= 0xFF7F: # 0xFF00 - 0xFF7F: I/O Memory self.io[address - 0xFF00] = value if 0xFF40 <= address <= 0xFF47: self.gb.gpu.update_gpu_register(address, value) elif address == 0xFF50 and value == 1: self.boot_rom_loaded = False # Once the boot rom is unmapped it cannot be mapped again, so no "= True" elif address <= 0xFFFE: # 0xFF80 - 0xFFFE: High RAM self.hram[address - 0xFF80] = value elif address == 0xFFFF: # 0xFFFF: Interrupts Enable Register (IE) self.ie = value def _write_tile_set(self, address: int, value: int): tile_line, tile_line_byte_to_change = self._find_tile_set(address) # For each value in the tile line, retrieve the byte that will not be modified, and sum with the byte received for i in range(8): bit_to_keep = (tile_line[i] >> int(not tile_line_byte_to_change)) & 0b00000001 bit_to_change = (value >> (7-i)) & 0b00000001 new_value = (bit_to_keep << int(not tile_line_byte_to_change)) | (bit_to_change << tile_line_byte_to_change) tile_line[i] = new_value # Edit the existing list, not replace it, so shared tiles keep working def _write_tile_map(self, address: int, value: int): tile_map_line, tile_map_pos_number = self._find_tile_map(address) tile_map_line[tile_map_pos_number] = value def _find_tile_set(self, address: int): v_address = address - 0x8000 tile_number = v_address // 16 # length of each tile (8 lines * 2 bytes each = 16 bytes) if tile_number < 128: tile_number_fixed = tile_number tile_set = self.tile_set_1_only elif tile_number < 256: tile_number_fixed = tile_number - 128 tile_set = self.tile_set_shared else: tile_number_fixed = tile_number - 256 tile_set = self.tile_set_0_only tile_byte_number = v_address - (tile_number * 16) # Will get a value 0-15, since each GameBoy tile has 8 lines, tile_line_number = tile_byte_number // 2 # each made from 2 bytes. We are storing the tile line final tile_line_byte_to_change = tile_byte_number % 2 # value instead, so we need to convert it back to 2 bytes. tile_line = tile_set[tile_number_fixed][tile_line_number] return tile_line, tile_line_byte_to_change def _find_tile_map(self, address: int): v_address = address - 0x9800 tile_map_number = v_address // 32 if tile_map_number < 32: tile_map_number_fixed = tile_map_number tile_map = self.tile_maps[0] else: tile_map_number_fixed = tile_map_number - 32 tile_map = self.tile_maps[1] tile_map_pos_number = v_address - (tile_map_number * 32) return tile_map[tile_map_number_fixed], tile_map_pos_number @staticmethod def _generate_memory_map(size: int): """ Generate a memory area. See: https://docs.python.org/3/library/array.html :return: """ byte_array = array.array('B') # 'B' == "unsigned char" in C / "int" in Python, minimum size is 1 byte byte_array.extend((0x00,) * size) return byte_array def get_map(self, map_number: int): """ Helper method to retrieve a tile map """ return self.tile_maps[map_number] def get_tile(self, tile_set_number: int, tile_number: int): """ Helper method to retrieve a tile from a set """ if tile_number < 128: if tile_set_number == 0: return self.tile_set_0_only[tile_number] else: return self.tile_set_1_only[tile_number] else: # It's in the shared area; 'tile_number' is unsigned so we do not need to worry about that return self.tile_set_shared[tile_number-128] def load_boot_rom(self): """ Adds the GameBoy boot ROM to the beginning of the memory, so it is executed when the emulator starts. The boot ROM prepares the device by clearing memory areas and setting up some initial values. We could set the appropriate memory values without using the boot ROM and skip directly to game ROM execution, but it is more fun like this :) See: http://gbdev.gg8.se/wiki/articles/Gameboy_Bootstrap_ROM """ try: f = open("boot.rom", "rb") boot_rom = f.read() f.close() return boot_rom except FileNotFoundError: self.logger.info("Boot ROM not found, skipping it") self.write_8bit(0xFF05, 0x00) # TIMA self.write_8bit(0xFF06, 0x00) # TMA self.write_8bit(0xFF07, 0x00) # TAC self.write_8bit(0xFF10, 0x80) # NR10 self.write_8bit(0xFF11, 0xBF) # NR11 self.write_8bit(0xFF12, 0xF3) # NR12 self.write_8bit(0xFF14, 0xBF) # NR14 self.write_8bit(0xFF16, 0x3F) # NR21 self.write_8bit(0xFF17, 0x00) # NR22 self.write_8bit(0xFF19, 0xBF) # NR24 self.write_8bit(0xFF1A, 0x7F) # NR30 self.write_8bit(0xFF1B, 0xFF) # NR31 self.write_8bit(0xFF1C, 0x9F) # NR32 self.write_8bit(0xFF1E, 0xBF) # NR33 self.write_8bit(0xFF20, 0xFF) # NR41 self.write_8bit(0xFF21, 0x00) # NR42 self.write_8bit(0xFF22, 0x00) # NR43 self.write_8bit(0xFF23, 0xBF) # NR30 self.write_8bit(0xFF24, 0x77) # NR50 self.write_8bit(0xFF25, 0xF3) # NR51 self.write_8bit(0xFF26, 0xF1) # NR52 self.write_8bit(0xFF40, 0x91) # LCDC self.write_8bit(0xFF42, 0x00) # SCY self.write_8bit(0xFF43, 0x00) # SCX self.write_8bit(0xFF45, 0x00) # LYC self.write_8bit(0xFF47, 0xFC) # BGP self.write_8bit(0xFF48, 0xFF) # 0BP0 self.write_8bit(0xFF49, 0xFF) # 0BP1 self.write_8bit(0xFF50, 0x01) # Boot ROM unmap self.write_8bit(0xFF4A, 0x00) # WY self.write_8bit(0xFF4B, 0x00) # WX self.write_8bit(0xFFFF, 0x00) # IE return None def write_8bit(self, address: int, value: int): """ Writes the given value at the given memory address. :param address: Address to write :param value: Value to write """ self.logger.debug("writing 8-bit value %02X at address 0x%04X",value,address) self._write(address, value) def write_16bit(self, address: int, value: int): """ Writes 16-bit big-endian value at the given memory address. Memory is little-endian, so least significant byte goes at address, most significant byte goes at address+1. :param address: Address to write :param value: Value to write """ lsb = value & 0x00ff msb = (value >> 8) & 0x00ff self.write_8bit(address, lsb) self.write_8bit(address+1, msb) def read_8bit(self, address: int): """ Reads 8-bit value from the given address in the memory. :param address: Memory address to read data from :return: 8-bit value at the given memory address """ return self._read(address) def read_16bit(self, address: int): """ Reads 16-bit value from the given address in the memory. Least significant byte in address, most significant byte in address+1. :param address: Memory address to read data from :return: 16-bit value at the given memory address """ lsb = self._read(address) msb = self._read(address+1) return (msb << 8) | lsb @staticmethod def print_memory_map(memory_map: array): """ Prints the current memory map to console """ for i in range(0, len(memory_map), 16): mem_str = "0x{:04X} | ".format(i) for j in range(0, 16): mem_str += "{:02X} ".format((memory_map[i + j])) mem_str += "|" print(mem_str) def debug(self): """ Prints debug info to console. """ # self.logger.debug("VRAM:") # self._debug_memory_map(self.vram) # self.logger.debug("External RAM:") # self._debug_memory_map(self.external_ram) # self.logger.debug("Internal RAM:") # self._debug_memory_map(self.internal_ram) # self.logger.debug("OAM:") # self._debug_memory_map(self.oam) self.logger.debug("I/O:") self._debug_memory_map(self.io) # self.logger.debug("HRAM:") # self._debug_memory_map(self.hram) def _debug_memory_map(self, memory_map: array): custom_dict = {} for i in range(0, len(memory_map)): if memory_map[i] != 0: custom_dict["0x{:04X}".format(i)] = "{:02X}".format(memory_map[i]) self.logger.debug(custom_dict)
def GetMetrics(self, request, context): Log.debug("Came in GetMetrics") return self.pyinstance.get_metrics()
def ResetMetrics(self, request, context): Log.debug("Came in ResetMetrics") self.pyinstance.reset_metrics() return request
def GetFunctionStatus(self, request, context): Log.debug("Came in GetFunctionStatus") return self.pyinstance.get_function_status()
class CallMonitor(threading.Thread): def __init__(self, capture_monitor, box_name, call_service_port): threading.Thread.__init__(self) self._stop = threading.Event() self.capture_monitor = capture_monitor self.box_name = box_name self.call_service_port = call_service_port self.logger = Log().getLogger() self.logger.debug("CallMonitor(capture_monitor:'%s', box_name:'%s', call_service_port:'%s')." % (capture_monitor,box_name,call_service_port)) def init_connection(self): # Connect to the call monitor service over telnet. self.logger.info("Connect to the call monitor service on %s:%s." % (self.box_name,self.call_service_port)) try: self.tn = telnetlib.Telnet(self.box_name, self.call_service_port) except IOError: self.logger.error("Cannot create connection to the call monitor service on %s:%s." % (self.box_name,self.call_service_port)) self.tn = None return self.logger.info("Connected to the call monitor service on %s:%s." % (self.box_name,self.call_service_port)) def run(self): self.logger.debug("Thread started.") callers_count = 0 # Set the count of currently led calls to 0. call_id_map = {} self.init_connection() while self.tn and not self._stop.isSet(): self.logger.debug("Wait for call monitor status change.") line = "" while (not self._stop.isSet() and not line.endswith("\n")): try: line = line + self.tn.read_until("\n", timeout=5) # Wait until one line data was readed from the telnet connection. except AttributeError: self.logger.error("Cannot read input data from telnet service. Maybe the telnet connection is broken. Stop the service.") self._stop.set() if (self._stop.isSet() and self.tn): self.logger.debug("Close the connection to the telnet session.") self.tn.close() self.logger.debug("Close the connection to the telnet session finished.") self.tn = None self.logger.info("Connection to the call monitor service on %s:%s stopped." % (self.box_name,self.call_service_port)) continue if (self._stop.isSet()): break line = line.strip() if (not line): continue sline = line.split(";") event_time = datetime.datetime.now() # parse the oryginal telnet time. Example: 13.01.11 21:48:31 oryginal_event_time = time.strptime(sline[0], "%d.%m.%y %H:%M:%S") # 13.01.11 21:48:31 self.logger.debug("Telnet:'"+line+"'") # get the event command command = sline[1] if command == "RING": # It rings, an incoming call. callers_count += 1 # Increase the count of currently led calls. call_partner = sline[3] if not call_partner: call_partner = "Unknown" me = sline[4] if not me: me = "Unknown" self.capture_monitor.set_data("callevent.name", command) self.capture_monitor.set_data("acalls.number", callers_count) self.capture_monitor.set_data("lineport.name", sline[5]) self.capture_monitor.set_data("tocall", oryginal_event_time) self.capture_monitor.set_data("tcall", event_time) self.capture_monitor.set_callnumber("caller", call_partner) self.capture_monitor.set_callnumber("dialed", me) self.capture_monitor.set_callnumber("me", me) self.capture_monitor.set_callnumber("callpartner", call_partner) me_numbername = self.capture_monitor.get_call_numbername(me) callpartner_numbername = self.capture_monitor.get_call_numbername(call_partner) self.logger.info("Ring (ID:%s, ActiveCalls.:%s, Caller:%s, DialedNumber:%s, LinePort:%s)" % (sline[2],callers_count,callpartner_numbername,me_numbername,sline[5])) call_id_map[sline[2]] = [callpartner_numbername,me_numbername,sline[5]] elif command == "CALL": # Number is dialed, an outgoing call. callers_count += 1 # Increase the count of currently led calls. me = sline[4] if not me: me = "Unknown" call_partner = sline[5] if not call_partner: call_partner = "Unknown" self.capture_monitor.set_data("callevent.name", command) self.capture_monitor.set_data("acalls.number", callers_count) self.capture_monitor.set_data("lineport.name", sline[6]) self.capture_monitor.set_data("tocall", oryginal_event_time) self.capture_monitor.set_data("tcall", event_time) self.capture_monitor.set_callnumber("caller", me) self.capture_monitor.set_callnumber("dialed", call_partner) self.capture_monitor.set_callnumber("me", me) self.capture_monitor.set_callnumber("callpartner", call_partner) me_numbername = self.capture_monitor.get_call_numbername(me) callpartner_numbername = self.capture_monitor.get_call_numbername(call_partner) self.logger.info("Call (ID:%s, ActiveCalls.:%s, Caller:%s, DialedNumber:%s, LinePort:%s)" % (sline[2],callers_count,me_numbername,callpartner_numbername,sline[6])) call_id_map[sline[2]] = [me_numbername,callpartner_numbername,sline[6]] elif command == "CONNECT": # Conversation started. self.capture_monitor.set_data("toconn", oryginal_event_time) self.capture_monitor.set_data("tconn", event_time) self.logger.info("Connect (ID:%s, ActiveCalls.:%s, Caller:%s, DialedNumber:%s, LinePort:%s)" % (sline[2],callers_count,call_id_map[sline[2]][0],call_id_map[sline[2]][1],call_id_map[sline[2]][2])) continue # Don't increase currently led calls, because already done with CALL/RING. elif command == "DISCONNECT": # Call was ended. callers_count -= 1 # Decrease the count of currently led calls. self.capture_monitor.set_data("todisc", oryginal_event_time) self.capture_monitor.set_data("tdisc", event_time) self.capture_monitor.set_data("acalls.number", callers_count) self.logger.info("Disconnect (ID:%s, ActiveCalls.:%s, Caller:%s, DialedNumber:%s, LinePort:%s)" % (sline[2],callers_count,call_id_map[sline[2]][0],call_id_map[sline[2]][1],call_id_map[sline[2]][2])) if (callers_count < 0): self.logger.warning("There is more stopped calls than started. Data corrupt or program started while calling. Normalize ActiveCalls to '0'") callers_count = 0; self.capture_monitor.set_data("acalls.number", callers_count) else: continue if (self.capture_monitor is not None): if callers_count == 1: # There is at least 1 started call, start capture of data. self.logger.debug("There is at least 1 active call. Send start_capture event to the CaptureMonitor.") self.capture_monitor.start_capture(); elif callers_count == 0: # there are no more active calls self.logger.debug("There is no more active calls. Send stop_capture event to the CaptureMonitor.") self.capture_monitor.stop_capture(); self._stop.set() self.capture_monitor.stop() self.logger.debug("Thread stopped.") def stop (self): self.logger.debug("Received signal to stop the thread.") self._stop.set() def stopped (self): return self._stop.isSet()
class Register: """ GB Registers """ def __init__(self): # Logger self.logger = Log() # 8-bit registers (can be combined to read as 16-bit registers) self.A = 0x00 # Accumulator self.F = 0x00 # Flags self.B = 0x00 self.C = 0x00 self.D = 0x00 self.E = 0x00 self.H = 0x00 self.L = 0x00 # 16-bit registers self.SP = 0xFFFE # Stack Pointer self.PC = 0x0000 # Program Counter def skip_boot_rom(self): """ If the GameBoy boot ROM is skipped, set registers as if the boot process completed successfully. """ self.set_af(0x01B0) self.set_bc(0x0013) self.set_de(0x00D8) self.set_hl(0x014D) self.SP = 0xFFFE self.PC = 0x0100 # Get Flags def _get_flag(self, bit_position: int): """ Get specified flag bit. :param bit_position: Flag to return """ mask = 1 << bit_position return (self.F & mask) >> bit_position def get_z_flag(self): """ Get Zero flag. """ return self._get_flag(7) def get_n_flag(self): """ Get Subtract flag. """ return self._get_flag(6) def get_h_flag(self): """ Get Half Carry flag. """ return self._get_flag(5) def get_c_flag(self): """ Get Carry flag. """ return self._get_flag(4) # Set/reset Flags def _set_flag(self, bit_position: int, new_value: bool): """ Change specified flag bit in register F. :param bit_position: Bit to change :param new_value: New value for specified bit (True == 1; False == 0) """ mask = 1 << bit_position if (self.F & mask) != (new_value << bit_position): # If current value is != from new_value, flip current value self.F = self.F ^ mask def set_z_flag(self, new_value: bool): """ Set Zero flag to given value. :param new_value: New value for Zero flag. """ self._set_flag(7,new_value) def set_n_flag(self, new_value: bool): """ Set Subtract flag to given value. :param new_value: New value for Subtract flag. """ self._set_flag(6,new_value) def set_h_flag(self, new_value: bool): """ Set Half Carry flag to given value. :param new_value: New value for Half Carry flag. """ self._set_flag(5,new_value) def set_c_flag(self, new_value: bool): """ Set Carry flag to given value. :param new_value: New value for Carry flag. """ self._set_flag(4,new_value) # GET methods for 16-bit register combinations def get_af(self): """ Get AF value. :return: AF values as single 16-bit value """ return (self.A << 8) | self.F # A==high / F==low def get_bc(self): """ Get BC value. :return: BC values as single 16-bit value """ return (self.B << 8) | self.C # B==high / C==low def get_de(self): """ Get DE value. :return: DE values as single 16-bit value """ return (self.D << 8) | self.E # D==high / E==low def get_hl(self): """ Get HL value. :return: HL values as single 16-bit value """ return (self.H << 8) | self.L # H==high / L==low def set_a(self, d8: int): """ Sets A to d8 :param d8: Hex value to set """ self.A = d8 & 0xFF self.logger.debug("set register A = 0x{:02X}".format(self.A)) def set_f(self, d8: int): """ Sets F to d8 :param d8: Hex value to set """ self.F = d8 & 0xFF self.logger.debug("set register F = 0x{:02X}".format(self.F)) # SET methods for 16-bit register combinations def set_af(self, d16: int): """ Sets AF values to d16 :param d16: Hex value to set (assumes it is in big endian format) """ self.set_f(d16 & 0x00ff) # A==high / F==low self.set_a((d16 >> 8) & 0x00ff) def set_b(self, d8: int): """ Sets B to d8 :param d8: Hex value to set """ self.B = d8 & 0xFF self.logger.debug("set register B = 0x{:02X}".format(self.B)) def set_c(self, d8: int): """ Sets C to d8 :param d8: Hex value to set """ self.C = d8 & 0xFF self.logger.debug("set register C = 0x{:02X}".format(self.C)) def set_bc(self, d16: int): """ Sets BC values to d16 :param d16: Hex value to set (assumes it is in big endian format) """ self.set_c(d16 & 0x00ff) # B==high / C==low self.set_b((d16 >> 8) & 0x00ff) def set_d(self, d8: int): """ Sets D to d8 :param d8: Hex value to set """ self.D = d8 & 0xFF self.logger.debug("set register D = 0x{:02X}".format(self.D)) def set_e(self, d8: int): """ Sets E to d8 :param d8: Hex value to set """ self.E = d8 & 0xFF self.logger.debug("set register E = 0x{:02X}".format(self.E)) def set_de(self, d16: int): """ Sets DE values to d16 :param d16: Hex value to set (assumes it is in big endian format) """ self.set_e(d16 & 0x00ff) # D==high / E==low self.set_d((d16 >> 8) & 0x00ff) def set_h(self, d8: int): """ Sets H to d8 :param d8: Hex value to set """ self.H = d8 & 0xFF self.logger.debug("set register H = 0x{:02X}".format(self.H)) def set_l(self, d8: int): """ Sets L to d8 :param d8: Hex value to set """ self.L = d8 & 0xFF self.logger.debug("set register L = 0x{:02X}".format(self.L)) def set_hl(self, d16: int): """ Sets HL values to d16 :param d16: Hex value to set (assumes it is in big endian format) """ self.set_l(d16 & 0x00ff) # H==high / L==low self.set_h((d16 >> 8) & 0x00ff) def set_sp(self, d16: int): """ Sets SP to d16 :param d16: Hex value to set (assumes it is in big endian format) """ self.SP = d16 & 0xFFFF self.logger.debug("set register SP = 0x{:04X}".format(self.SP)) def set_pc(self, d16: int): """ Sets PC to d16 :param d16: Hex value to set (assumes it is in big endian format) """ self.PC = d16 & 0xFFFF self.logger.debug("set register PC = 0x{:04X}".format(self.PC)) def debug(self): """ Prints debug info to console. """ af = "{:04X}".format(self.get_af()) bc = "{:04X}".format(self.get_bc()) de = "{:04X}".format(self.get_de()) hl = "{:04X}".format(self.get_hl()) sp = "{:04X}".format(self.SP) pc = "{:04X}".format(self.PC) z = self.get_z_flag() n = self.get_n_flag() h = self.get_h_flag() c = self.get_c_flag() self.logger.debug("AF: %s\tBC: %s\tDE: %s\tHL: %s\tSP: %s\tPC: %s\tz: %s\tn: %s\th: %s\tc: %s", af, bc, de, hl, sp, pc, z, n, h, c)
class GB: """ GB components instantiation """ def __init__(self): self.logger = Log() # Create components self.cpu = CPU(self) self.memory = Memory(self) self.interrupts = Interrupts(self) self.screen = Screen(self) self.gpu = GPU(self) self.debug_mode = False self.step_mode = False def execute(self, cartridge_data: bytes, debug: bool = False, step: bool = False): """ Execution main loop. :param cartridge_data: game to execute :param debug: If will run in debug mode or not :param step: If it will stop after executing each loop or not. Requires debug==True. """ self.print_cartridge_info(cartridge_data) self.gpu.prepare() self.step_mode = step self.debug_mode = debug self.logger.setDebugMode(self.debug_mode) self.logger.info("Debug: %s\tStep: %s",self.debug_mode,self.step_mode) self.memory.load_cartridge(cartridge_data) if self.memory.boot_rom is None: self.cpu.register.skip_boot_rom() # Instantiates the emulator screen. It will assume control of the main thread, so the emulator main loop must be # triggered by the Screen itself, as a scheduled method call. self.screen.run() def print_cartridge_info(self, cartridge_data: bytes): """ Prints the cartridge header info. See: http://gbdev.gg8.se/files/docs/mirrors/pandocs.html#thecartridgeheader """ title = cartridge_data[0x0134:0x143].replace(b'\x00', b'').decode("ascii") self.logger.info("Title: %s", title) cartridge_type_dict = {0x00:"ROM ONLY",0x01:"MBC1",0x02:"MBC1+RAM",0x03:"MBC1+RAM+BATTERY", 0x05:"MBC2",0x06:"MBC2+BATTERY",0x08:"ROM+RAM",0x09:"ROM+RAM+BATTERY", 0x0B:"MMM01",0x0C:"MMM01+RAM",0x0D:"MMM01+RAM+BATTERY", 0x0F:"MBC3+TIMER+BATTERY",0x10:"MBC3+TIMER+RAM+BATTERY",0x11:"MBC3", 0x12:"MBC3+RAM",0x13:"MBC3+RAM+BATTERY",0x15:"MBC4",0x16:"MBC4+RAM", 0x17:"MBC4+RAM+BATTERY",0x19:"MBC5",0x1A:"MBC5+RAM",0x1B:"MBC5+RAM+BATTERY", 0x1C:"MBC5+RUMBLE",0x1D:"MBC5+RUMBLE+RAM",0x001E:"MBC5+RUMBLE+RAM+BATTERY", 0xFC:"POCKET CAMERA",0xFD:"BANDAI TAMA5",0xFE:"HuC3",0xFF:"HuC1+RAM+BATTERY"} self.logger.info("Cartridge: %s",cartridge_type_dict[cartridge_data[0x0147]]) rom_size = {0x00:"32KByte (no ROM banking)",0x01:"64KByte (4 banks)",0x02:"128KByte (8 banks)", 0x03:"256KByte (16 banks)",0x04:"512KByte (32 banks)", 0x05:"1MByte (64 banks) - only 63 banks used by MBC1", 0x06:"2MByte (128 banks) - only 125 banks used by MBC1", 0x07:"4MByte (256 banks)",0x52:"1.1MByte (72 banks)",0x53:"1.2MByte (80 banks)", 0x54:"1.5MByte (96 banks)"} self.logger.info("ROM Size: %s", rom_size[cartridge_data[0x0148]]) ram_size = {0x00:"None",0x01:"2 KBytes",0x02:"8 Kbytes",0x03:"32 KBytes (4 banks of 8KBytes each)"} self.logger.info("RAM Size: %s", ram_size[cartridge_data[0x0149]]) destination = {0x00:"Japanese",0x01:"Non-Japanese"} self.logger.info("Destination: %s", destination[cartridge_data[0x014A]]) self.logger.info("Version: %d", cartridge_data[0x014C]) def debug(self): """ Prints debug info to console. """ self.cpu.debug() self.memory.debug() # Makes execution really slow self.interrupts.debug() self.gpu.debug() self.logger.debug("---")
class Master(): def __init__(self, measuring): self._measuring = measuring self._board = Board() self._taskPool = TaskPool() self._log = Log('Master') self._log.debug('init') def work(self): self._log.debug('work') while self._move_cycle(): pass self._stop_workers() def _move_cycle(self): self._taskPool = TaskPool() self._print('CPU is thinking...') start_time = time.time() # Broadcast board data self._broadcast_board() # Send tasks to workers on request self._serve_tasks() column_quality = self._taskPool.calculate_quality() best_column_move = max(enumerate(column_quality), key=lambda x: x[1])[0] end_time = time.time() - start_time print (end_time if self._measuring else 'Thinking done in: %f\n' % end_time) # Print column quality for idx, quality in enumerate(column_quality): self._print('Column %d quality: %f' % (idx+1, quality)) self._print('Best column move %d\n' % (best_column_move+1)) # In case we are just measuring 1st move by CPU if self._measuring: self._stop_workers() return False self._board.play_move(BoardTag.CPU, best_column_move) self._board.print_board() self._print('') if self._board.check_if_finished(best_column_move)[0]: self._print_winner(BoardTag.CPU) return False human_column_move = self._human_turn() if human_column_move == -1: return False if self._board.check_if_finished(human_column_move)[0]: self._print_winner(BoardTag.HUMAN) return False return True def _broadcast_board(self): message = {'type': MessageType.BOARD_DATA, 'board': self._board} for x in xrange(1, comm_size): self._log.debug('BOARD_DATA to %d' % x) comm.send(message, dest=x) def _serve_tasks(self): active_workers = comm_size - 1 finished = False while not finished: self._log.debug('waiting for DATA_REQUEST') # Send tasks upon request message_status = MPI.Status() message = comm.recv(source=MPI.ANY_SOURCE, status=message_status) message_source = message_status.Get_source() if message['type'] == MessageType.TASK_REQUEST: self._log.debug('receives TASK_REQUEST from %d' % message_source) task = self._taskPool.next_task() if task is not None: message = {'type': MessageType.TASK_DATA, 'task': task} self._log.debug('sends TASK_DATA to %d' % message_source) comm.send(message, dest=message_source) else: self._log.debug('sends WAIT to %d' % message_source) comm.send({'type': MessageType.WAIT}, dest=message_source) active_workers -= 1 if active_workers == 0: finished = True elif message['type'] == MessageType.TASK_RESULT: self._log.debug('receives TASK_RESULT from %d' % message_source) self._taskPool.update_task(message['task'], message['result']) def _stop_workers(self): for x in xrange(1, comm_size): self._log.debug('STOP to %d' % x) comm.send({'type': MessageType.STOP}, dest=x) def _human_turn(self): # Take player move human_input = self._read_human_input()-1 if human_input == -1: print 'Human gave up! CPU WINS!' return human_input # play self._board.play_move(BoardTag.HUMAN, human_input) self._board.print_board() print return human_input @staticmethod def _read_human_input(): while True: print 'Human move:', human_input = raw_input() try: human_input = int(human_input) if human_input < 0 or human_input > 7: raise ValueError() return human_input except ValueError: print 'Invalid input! Enter 1-7 for column or 0 for end' @staticmethod def _print_winner(player): player_label = 'CPU' if player == BoardTag.CPU else 'Human' print '%s won!' % player_label def _print(self, message): if not self._measuring: print message
class CPU: """ CPU """ # The GameBoy CPU runs at 4194304hz, i.e. 4194304 cycles per second CLOCK_HZ = 4194304 # The special Divider Register (DIV, 0xFF04) must be incremented 16384 times per second, i.e. every 256 CPU cycles DIV_ADDRESS = 0xFF04 def __init__(self, gb): """ :type gb: gb.GB """ # Logger self.logger = Log() # Communication with other components self.gb = gb # Components exclusive to CPU self.register = Register() # State initialization self.halted = False # for OP 76 (HALT) self.stopped = False # for OP 10 (STOP) def execute(self): """ Execution main loop """ if self.gb.debug_mode: start = datetime.now() full_update_cycle_completed = False while not full_update_cycle_completed: opcode: int = None cycles_spent = 0 if not self.halted and not self.stopped: opcode = self.read_next_byte_from_cartridge() if self.gb.debug_mode: plus1 = "{:02X}".format(self.gb.memory.read_8bit(self.register.PC)) plus2 = "{:02X}".format(self.gb.memory.read_8bit(self.register.PC+1)) self.logger.debug("Executing 0x%04X: %02X [ %s , %s ]",self.register.PC-1,opcode,plus1,plus2) cycles_spent += op.execute(self.gb, opcode) cycles_spent += self.gb.interrupts.update(opcode) full_update_cycle_completed = self.gb.gpu.update(cycles_spent) if self.gb.debug_mode: self.gb.debug() if self.gb.step_mode: input() # TODO: redo # if cycles_spent >= 256: # current_div = self.memory.read_8bit(self.DIV_ADDRESS) # new_div = (current_div + (cycles_spent/256)) & 0xFF # TODO: what if value > FF? # self.memory.write_8bit(self.DIV_ADDRESS,new_div) # cycles_spent = cycles_spent % 256 if self.gb.debug_mode: end = datetime.now() delta = self.delta(start, end) print("total:", delta, "\tFPS:",1000.0/delta) def read_next_byte_from_cartridge(self): """ Read the next data from the ROM, increment Program Counter :return: 8-bit data read from ROM """ data = self.gb.memory.read_8bit(self.register.PC) self.register.PC += 1 return data def delta(self, a, b): diff = b-a return (diff.seconds * 1000.0) + (diff.microseconds / 1000.0) def debug(self): """ Prints debug info to console. """ self.register.debug()
class CaptureMonitor(threading.Thread): state_started = False next_stop_time = 0 next_start_time = 0 cap_file_path = "" def __init__(self, decode_work_queue, data_map, box_name, password, protocol, cap_folder, cap_file, login_required, default_login, sid_challenge, sid_login, start_str, stop_str, after_capture_time): threading.Thread.__init__(self) self._stop = threading.Event() self.decode_work_queue = decode_work_queue self.data_map = data_map self.box_name = box_name self.password = password self.protocol = protocol self.cap_folder = cap_folder self.cap_file = cap_file self.login_required = login_required self.default_login = default_login self.sid_challenge = sid_challenge self.sid_login = sid_login self.start_str = start_str self.stop_str = stop_str self.after_capture_time = after_capture_time self.SID = '' self.wait_condition = threading.Condition() self.logger = Log().getLogger() self.logger.debug("CaptureMonitor(decode_work_queue:'%s', data_map:'%s', box_name:'%s', password:'******', protocol:'%s', cap_folder:'%s', cap_file:'%s', login_required:'%s', default_login:'******', sid_challenge:'%s', sid_login:'******', start_str:'%s', stop_str:'%s', after_capture_time:'%s')" % (decode_work_queue,data_map,box_name,password,protocol,cap_folder,cap_file,login_required,default_login,sid_challenge,sid_login,start_str,stop_str,after_capture_time)) def run(self): self.logger.debug("Thread started.") if self.login_required: if (not self.init_login()): self.logger.debug("Could not login. Stop the capture thread.") self._stop.set() while not self._stop.isSet(): ################### ### pre_capture ### ################### # Wait until start_capture coomand was started... self.logger.debug("pre_capture acquire lock.") self.wait_condition.acquire() self.logger.debug("pre_capture acquire lock finished.") while not self._stop.isSet() and (self.state_started == False or self.next_start_time > time.time()): if self.next_start_time > time.time(): waittime = self.next_start_time - time.time() if (self.logger.isEnabledFor(logging.DEBUG)): self.logger.debug("pre_capture wait(%f)." % (waittime)) self.wait_condition.wait(waittime) if (self.logger.isEnabledFor(logging.DEBUG)): self.logger.debug("pre_capture wait(%f) finished." % (waittime)) else: if (self.logger.isEnabledFor(logging.DEBUG)): self.logger.debug("pre_capture wait().") self.wait_condition.wait() if (self.logger.isEnabledFor(logging.DEBUG)): self.logger.debug("pre_capture wait() finished.") self.logger.debug("pre_capture release lock.") self.wait_condition.release() self.logger.debug("pre_capture release lock finished.") if (self._stop.isSet()): logging.debug("The capture was not started. Not need to stop the capture. Can stop immediately.") break self.sub_start_capture() #################### ### post_capture ### #################### # Wait until stop_capture command was started... self.logger.debug("post_capture acquire lock.") self.wait_condition.acquire() self.logger.debug("post_capture acquire lock finished.") while not self._stop.isSet() and (self.state_started == True or self.next_stop_time > time.time()): if self.next_stop_time > time.time(): waittime = self.next_stop_time - time.time() if (self.logger.isEnabledFor(logging.DEBUG)): self.logger.debug("post_capture wait(%f)." % (waittime)) self.wait_condition.wait(waittime) if (self.logger.isEnabledFor(logging.DEBUG)): self.logger.debug("post_capture wait(%f) finished." % (waittime)) else: if (self.logger.isEnabledFor(logging.DEBUG)): self.logger.debug("post_capture wait().") self.wait_condition.wait() if (self.logger.isEnabledFor(logging.DEBUG)): self.logger.debug("post_capture wait() finished.") self.logger.debug("post_capture release lock.") self.wait_condition.release() self.logger.debug("post_capture release lock finished.") self.sub_stop_capture() if (self._stop.isSet()): if (self.decode_work_queue is not None): self.decode_work_queue.put(None) else: self._stop.set() self.logger.debug("Thread stopped.") def init_login(self): try: self.logger.debug("Login attempt to the the FritzBox (box_name:%s)" % (self.box_name)) # Try to get a session id SID conn_url = 'http://fritz.box/login_sid.lua' self.logger.debug("Call the challange token url (url:'%s')" % conn_url) self.sid = urllib.urlopen(conn_url) if self.sid.getcode() == 200: # Read and parse the response in order to get the challenge (not a full blown xml parser) readed_chalange_str = self.sid.read() challenge = re.search('<Challenge>(.*?)</Challenge>', readed_chalange_str).group(1) # Create a UTF-16LE string from challenge + '-' + password, non ISO-8859-1 characters will except here (e.g. EUR) challenge_bf = (challenge + '-' + self.password).decode('iso-8859-1').encode('utf-16le') # Calculate the MD5 hash m = hashlib.md5() m.update(challenge_bf) # Make a byte response string from challenge + '-' + md5_hex_value response_bf = challenge + '-' + m.hexdigest().lower() # Answer the challenge conn_url = 'http://fritz.box/login_sid.lua?username=root&response=' + response_bf self.logger.debug("Call the read seed token url (url:'%s', data:'%s')." % (conn_url,self.sid_login % response_bf)) login = urllib.urlopen(conn_url) if login.getcode() == 200: readed_login_str = login.read() self.SID = re.search('<SID>(.*?)</SID>', readed_login_str).group(1) if (self.SID == '0000000000000000'): self.logger.error("Could not login to the FritzBox: Not authorized.") else: self.logger.debug("Login OK (SID: %s)" % self.SID) return True else: self.logger.error("Could not login to the FritzBox: Unknown error") else: self.logger.error("Could not login to the FritzBox: Error 404.") except: # Legacy login command = urllib.urlopen(self.protocol + '://' + self.box_name + '/cgi-bin/webcm', self.default_login % self.password) response = command.read() # Right now I don't know how to check the result of a login operation. So I just search for the errorMessage if command.getcode() == 200: try: result = urllib.unquote(re.search('<p class="errorMessage">(.*?)</p>', response).group(1).decode('iso-8859-1')).replace(" "," ") except: result = '' self.logger.error('Login attempt was made, but something was wrong: %s' % result) return False def init_capture_file(self): # Create capfile folder folder = StringHelper.parse_string(self.cap_folder, self.data_map) folder = folder.replace("\\","/") if (not folder.endswith("/")): folder = folder+"/" file = StringHelper.parse_string(self.cap_file, self.data_map) self.cap_file_path = folder+file self.logger.debug("Initialize capture file (folder:%s, file:%s)." % (folder,file)) if not os.path.exists(folder): self.logger.debug("Destination folder:'%s' not exists. Create." % folder) os.makedirs(folder) def sub_start_capture(self): # Start tracer thread, wait for console input to stop if self.login_required and not self.init_login(): self.logger.debug("Could not login. Stop the capture thread.") self._stop.set() return self.set_data("tcaps",datetime.datetime.now()) self.logger.debug("data_map:%s" % (self.data_map)) self.init_capture_file() self.logger.info("Start capture (capture_file:'%s')." % (self.cap_file_path)) if self.SID != '': url = self.protocol + '://' + self.box_name + '/cgi-bin/capture_notimeout' + self.start_str + "&sid=%s" % self.SID else: url = self.protocol + '://' + self.box_name + '/cgi-bin/capture_notimeout' + self.start_str self.logger.debug("Send capture start request to the box (url:'%s', capture_file:'%s')." % (url, self.cap_file_path)) Tracer(url, self.cap_file_path).start() self.logger.debug("Send capture start request to the box finished (url:'%s', capture_file:'%s')." % (url, self.cap_file_path)) def sub_stop_capture(self): # Clean stop if self.login_required and not self.init_login(): self.logger.debug("Could not login. Stop the capture thread.") self._stop.set() return if self.SID != '': url = self.protocol + '://' + self.box_name + '/cgi-bin/capture_notimeout' + self.stop_str + "&sid=%s" % self.SID else: url = self.protocol + '://' + self.box_name + '/cgi-bin/capture_notimeout' + self.stop_str self.logger.debug("Send capture stop request to the box (url:'%s', capture_file:'%s')." % (url, self.cap_file_path)) urllib.urlopen(url) self.logger.debug("Send capture stop request to the box finished (url:'%s', capture_file:'%s')." % (url, self.cap_file_path)) self.logger.info("Capture finished (capture_file:'%s')." % (self.cap_file_path)) self.set_data("tcape",datetime.datetime.now()) if (self.decode_work_queue is not None): self.logger.debug("Add captured file '%s' to the decoding work queue." % (self.cap_file_path)) self.decode_work_queue.put(self.cap_file_path) def start_capture(self): self.wait_condition.acquire() self.logger.debug("start_capture called.") self.state_started = True self.wait_condition.notify_all() self.wait_condition.release() def stop_capture(self): self.wait_condition.acquire() self.logger.debug("stop_capture called.") self.state_started = False if self.after_capture_time > 0: self.next_stop_time = time.time() + self.after_capture_time self.wait_condition.notify_all() self.wait_condition.release() def stop (self): self.logger.debug("Received signal to stop the thread.") self._stop.set() self.wait_condition.acquire() if self.after_capture_time > 0: self.next_stop_time = time.time() + self.after_capture_time self.wait_condition.notify_all() self.wait_condition.release() def stopped (self): return self._stop.isSet() def set_callnumber(self, key, number): self.data_map[key+".number"] = number self.data_map[key+".name"] = "" self.data_map[key+".numbername"] = number if (self.data_map.has_key("pbook_number."+number)): self.data_map[key+".name"] = self.data_map.get("pbook_number."+number) self.data_map[key+".numbername"] = self.get_call_numbername(number) def get_call_numbername(self, number): if (not number): number = "Unknown" if (self.data_map.has_key("pbook_number."+number)): return number+"("+self.data_map.get("pbook_number."+number)+")" return number def set_data(self, key, value): self.data_map[key]=value