def main(): try: stdout.flush() finished_files_size, file_idx = 0, 0 SignalHandler( [signal.SIGALRM, signal.SIGPIPE, signal.SIGINT, signal.SIGTSTP]) files_path_list = client(socket.AF_INET, socket.SOCK_STREAM) # files_path_list = ["Data/1.txt", "Data/2.txt"] files_size = len(files_path_list) files_question_obj_list = create_files_question_with_multiprocess( files_path_list) while global_variables.pipe_flag and finished_files_size != files_size: question_str = files_question_obj_list[file_idx].get_question() try: if question_str == "": finished_files_size += 1 file_idx += 1 file_idx %= files_size else: finished_files_size = 0 send_to_pipe(question_str) if global_variables.file_signal_flag is True: global_variables.file_signal_flag = False file_idx += 1 file_idx %= files_size except IOError as e: if e.errno == errno.EPIPE: print("PythonBrokenPipeError: the pipe broken", file=stderr) else: print("PythonUncatchEror: ", str(e), file=stderr) if finished_files_size == files_size: print( "The program has finished reading the questions and therefore it ends", file=stderr) except Exception as e: print("RunPythonError:", str(e), file=stderr)
def start(self): self.stopper = threading.Event() self.threads = [] handler = SignalHandler(self.stopper, self.threads) #downloader = Downloader(1, 'main', self.stopper, self.q) thread = Post_overview(1, 'main', self.stopper, self.q, self.post_q) #for i in range(1,40): # post_detail = Post_detail(i, 'main', self.stopper, self.q, self.post_q) # self.threads.append(post_detail) self.threads.append(thread) #self.threads.append(downloader) signal.signal(signal.SIGINT, handler) for t in self.threads: t.start() for t in self.threads: t.join()
def main(args=sys.argv[1:]): args = parse_args() args.extra_prefs = parse_preferences(args.extra_prefs or []) if args.enable_fission: args.extra_prefs.update({ "fission.autostart": True, "dom.serviceWorkers.parent_intercept": True, "browser.tabs.documentchannel": True, }) if args.extra_prefs and args.extra_prefs.get("fission.autostart", False): args.enable_fission = True commandline.setup_logging("raptor", args, {"tbpl": sys.stdout}) LOG.info("Python version: %s" % sys.version) LOG.info("raptor-start") if args.debug_mode: LOG.info("debug-mode enabled") LOG.info("received command line arguments: %s" % str(args)) # if a test name specified on command line, and it exists, just run that one # otherwise run all available raptor tests that are found for this browser raptor_test_list = get_raptor_test_list(args, mozinfo.os) raptor_test_names = [ raptor_test["name"] for raptor_test in raptor_test_list ] # ensure we have at least one valid test to run if len(raptor_test_list) == 0: LOG.critical("test '{}' could not be found for {}".format( args.test, args.app)) sys.exit(1) LOG.info("raptor tests scheduled to run:") for next_test in raptor_test_list: LOG.info(next_test["name"]) if not args.browsertime: if args.app == "firefox": raptor_class = WebExtensionFirefox elif args.app in CHROMIUM_DISTROS: raptor_class = WebExtensionDesktopChrome else: raptor_class = WebExtensionAndroid else: def raptor_class(*inner_args, **inner_kwargs): outer_kwargs = vars(args) # peel off arguments that are specific to browsertime for key in outer_kwargs.keys(): if key.startswith("browsertime_"): value = outer_kwargs.pop(key) inner_kwargs[key] = value if args.app == "firefox" or args.app in CHROMIUM_DISTROS: klass = BrowsertimeDesktop else: klass = BrowsertimeAndroid return klass(*inner_args, **inner_kwargs) try: raptor = raptor_class( args.app, args.binary, run_local=args.run_local, noinstall=args.noinstall, installerpath=args.installerpath, obj_path=args.obj_path, gecko_profile=args.gecko_profile, gecko_profile_interval=args.gecko_profile_interval, gecko_profile_entries=args.gecko_profile_entries, symbols_path=args.symbols_path, host=args.host, power_test=args.power_test, cpu_test=args.cpu_test, memory_test=args.memory_test, live_sites=args.live_sites, cold=args.cold, is_release_build=args.is_release_build, debug_mode=args.debug_mode, post_startup_delay=args.post_startup_delay, activity=args.activity, intent=args.intent, interrupt_handler=SignalHandler(), enable_webrender=args.enable_webrender, extra_prefs=args.extra_prefs or {}, device_name=args.device_name, no_conditioned_profile=args.no_conditioned_profile, disable_perf_tuning=args.disable_perf_tuning, conditioned_profile_scenario=args.conditioned_profile_scenario, chimera=args.chimera, project=args.project, verbose=args.verbose) except Exception: traceback.print_exc() LOG.critical( "TEST-UNEXPECTED-FAIL: could not initialize the raptor test runner" ) os.sys.exit(1) success = raptor.run_tests(raptor_test_list, raptor_test_names) if not success: # if we have results but one test page timed out (i.e. one tp6 test page didn't load # but others did) we still dumped PERFHERDER_DATA for the successfull pages but we # want the overall test job to marked as a failure pages_that_timed_out = raptor.get_page_timeout_list() if pages_that_timed_out: for _page in pages_that_timed_out: message = [ ("TEST-UNEXPECTED-FAIL", "test '%s'" % _page["test_name"]), ("timed out loading test page", "waiting for pending metrics"), ] if _page.get("pending_metrics") is not None: LOG.warning("page cycle {} has pending metrics: {}".format( _page["page_cycle"], _page["pending_metrics"])) LOG.critical(" ".join("%s: %s" % (subject, msg) for subject, msg in message)) else: # we want the job to fail when we didn't get any test results # (due to test timeout/crash/etc.) LOG.critical( "TEST-UNEXPECTED-FAIL: no raptor test results were found for %s" % ", ".join(raptor_test_names)) os.sys.exit(1) # if we're running browsertime in the CI, we want to zip the result dir if args.browsertime and not args.run_local: result_dir = raptor.results_handler.result_dir() if os.path.exists(result_dir): LOG.info("Creating tarball at %s" % result_dir + ".tgz") with tarfile.open(result_dir + ".tgz", "w:gz") as tar: tar.add(result_dir, arcname=os.path.basename(result_dir)) LOG.info("Removing %s" % result_dir) shutil.rmtree(result_dir) # when running raptor locally with gecko profiling on, use the view-gecko-profile # tool to automatically load the latest gecko profile in profiler.firefox.com if args.gecko_profile and args.run_local: if os.environ.get("DISABLE_PROFILE_LAUNCH", "0") == "1": LOG.info( "Not launching profiler.firefox.com because DISABLE_PROFILE_LAUNCH=1" ) else: view_gecko_profile_from_raptor()
def __init__(self, condition=None): super(TCPIPHandler, self).__init__(condition) self.signal_handler = SignalHandler(self.send) self.bus_stats = BusStats() self._bind_callbacks()
class TCPIPHandler(Events): """ Main interface for all TCP/IP-handling. """ def __init__(self, condition=None): super(TCPIPHandler, self).__init__(condition) self.signal_handler = SignalHandler(self.send) self.bus_stats = BusStats() self._bind_callbacks() def _bind_callbacks(self): """ Set callbacks, triggered from GUI. """ Application.Callbacks.update(onConnect=self.request_connect) Application.Callbacks.update(onDisconnect=self.request_disconnect) def request_connect(self): """ User calls this method for connecting. """ self.request = Request.RUNNING self.reset_attempts() self._request_connect() def request_disconnect(self): """ User calls this method for disconnecting (also system shutdown). """ self.request = Request.STOPPED self._request_disconnect() def send(self, signal): """ Interface against other modules to send data through TCP/IP-socket. "signal" is 3-tuple hexstring: ("src", "dst", "data") """ data = gateway_protocol.create_frame(signal) self.handle_send(data) self.bus_stats.add_bytes(len(data)) def receive(self, data): """ Interface against other modules to receive data from TCP/IP-socket (callback from ThreadedSocket). Handles the raw bytes received from the socket and abstracts the overlaying TCP/IP-protocol away allowing only signals passing through to the signal "filter". "signal" is 3-tuple hexstring: ("src", "dst", "data") """ raw_signals = self.handle_receive(bytearray(data)) for data in raw_signals: self.signal_handler.receive(gateway_protocol.create_signal(data)) self.bus_stats.add_bytes(len(data))