def run_test_with_pkt_size(self, pkt_size, duration, test_value): """Run the test for a single packet size. Args: pkt_size (int): The packet size to test with. duration (int): The duration for each try. Returns: {lower_bound, upper_bound, measurement}. lower_bound (long): The lower bound of the search interval. upper_bound (long): The upper bound of the search interval. measurement (long): The maximum value in the interval that yields latency (dict): latency results success. """ logging.info("Testing with value %s", test_value) self.setup_test(pkt_size=pkt_size, speed=test_value) success, throughput, pkt_loss, lat = self.run_test(pkt_size, duration, test_value) self.teardown_test(pkt_size=pkt_size) if success: logging.verbose("Success! Increasing lower bound") else: logging.verbose("Failure... Decreasing upper bound") return dict( measurement=throughput, pkt_loss=pkt_loss, latency=lat )
def main(__interactive__: bool, force: bool) -> None: """ Updates the configuration if the program is to be run interactively, then install necessary items :param __interactive__: if the config has to be updated :param force: True to force reinstall of the programs, else will only install the ones that are not installed """ if __interactive__ and update.update(): logging.error("An error occurred, could not finish configuration") return 1 try: install(force) setup_coredumps() hooks.load_plugins() hooks.configure(force) except (exceptions.InstallationErrorException, exceptions.DistributionNotSupportedException) as exception: logging.error(exception) logging.error("Configure script failed. Please rerun it after correcting errors. You can add --default in order" " to skip questions") return 1 finally: logging.verbose("Cleaning environment")
def run(self, *args, **kwargs) -> int: """ Benchmarks the execution 20 times and stores the last 10 results (to avoid side effects) in self.trigger.result. Runs at most 100 times before deciding the run is a failure. :param args: additional arguments :param kwargs: additional keyword arguments :return: 0|1 on success|failure """ logging.verbose(self.trigger.cmd) results = [] tries = 0 while len(results) < self.expected_results and tries < self.maximum_tries: try: results += timeit.repeat(self.benchmark_helper, repeat=1, number=1) except subprocess.CalledProcessError: logging.warning("A trigger failed, retrying one more time") tries += 1 show_progress(len(results), self.expected_results, section="trigger") if tries >= 100: # We failed in 100 iterations return 1 logging.verbose("Run times : %(time)s secs", dict(time=results)) self.trigger.returned_information = results[self.expected_results - self.kept_runs:] return 0
def run(self): """ To run this program, we launch the command and wait until it is in a waiting state, which means it deadlocked. We will then kill it and return :return: 0|1|None on success|failure|unexpected result """ logging.verbose(self.cmd) proc = subprocess.Popen( self.cmd.split(" "), stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL, preexec_fn=self.__preexec_fn__, ) x = 0 counter = 0 while proc.poll() is None: if psutil.Process(proc.pid).status() == psutil.STATUS_SLEEPING: counter += 1 if counter >= 1000: proc.send_signal(11) time.sleep(1) self.clean() return self.check_success(1) else: time.sleep(0.01) continue x += 1 counter = 0 time.sleep(0.01) self.clean() return self.check_success(proc.wait())
def trigger_bug(bug: str, main_plugin: MainPlugin, **kwargs: dict) -> int: """ Trigger a bug against the main_plugin :param bug: the bug to trigger :param main_plugin: the plugin against which to trigger :param kwargs: additional keywords arguments to pass :return: 0|!0 on success| failure """ plugin_args = kwargs.copy() try: logging.info("Triggering %(bug)s", dict(bug=bug)) trigger = importlib.import_module("data.{}.trigger".format(bug)).Trigger() plugin_args.update({"main_plugin": main_plugin, "trigger": trigger}) if not os.path.exists(trigger.conf.getdir("install_directory")): raise ProgramNotInstalledException(trigger.conf.get("name")) pre_trigger_run(**plugin_args) plugin_args["error"] = trigger.run() error = check_trigger_success(**plugin_args) if error: logging.error("%(bug)s did not run successfully", dict(bug=bug)) return error post_trigger_run(**plugin_args) finally: logging.verbose("Cleaning environment") post_trigger_clean(**plugin_args)
def _add_result(self, assertion, result, msg): logging.verbose("- (%s) assertion: %s", 'pass' if result else 'FAIL', msg) # Retrieve test source for debugging and diagnostic information. The # caller frame of interest is 2 up: once for the assertion that called # _add_result() and once for the call to the assertion itself. caller = sys._getframe(2) lineno = caller.f_lineno # inspect.getsourcelines(...) returns an list of source lines, and the # line number of the start of the source line. # lineno retrieved above is used to pick the source line that called # one of the assertions. # Leading and trailing whitespace is stripped. source = inspect.getsourcelines(caller) source = source[0][lineno - source[1]] source = source.strip() self._results[-1]['results'].append(dict( assertion=assertion, result=result, msg=msg, lineno=caller.f_lineno, filename=path.splitext(path.basename(caller.f_code.co_filename))[0], stmt=source, ))
def master(comm, options): '''rank 0 will handle the program setup, and distribute tasks to all of the other ranks''' num_ranks = comm.Get_size() filename = "{0}/{1}{2}.dat" # create a list of files to pass to the ranks files = [ filename.format(options.dir, options.prefix, n) for n in range(0, options.numfiles) ] # if num_ranks is 1, then we exit... if num_ranks == 1: print "Need more than 1 rank Ted..." comm.Abort(1) for f in files: # wait for another rank to report in child = comm.recv(source=MPI.ANY_SOURCE) logging.verbose("Rank {0} reported in, sending: {1}".format(child, f)) # send filename to this rank if child: comm.send(f, dest=child) # ran out of files to create. tell ranks we're done i = 1 while i < num_ranks: child = comm.recv(source=MPI.ANY_SOURCE) comm.send('alldone', dest=child) i += 1 return
def prepare_sources(self) -> None: """ Extracts the file from self.src_path+self.conf["src_name"] to self.extract_dir """ logging.verbose("unpacking file in " + self.extract_dir) tar = tarfile.open(self.source_storage_path) tar.extractall(self.extract_dir) tar.close()
def test_verbose_hack(self): self.logger.verbose("foo") logging.verbose("foo") self.assertEqual(logging.VERBOSE, 15) if six.PY2: # There is no _levelNames attribute in Python 3 self.assertTrue("VERBOSE" in logging._levelNames) self.assertEqual(logging.getLevelName(15), "VERBOSE")
def sell_maker(self, amount, price): """Orders are always priced in CNY""" local_currency_price = self.fc.convert(price, "CNY", self.currency) local_currency_price = int(local_currency_price) logging.verbose("Sell maker %f BTC at %d %s (%d CNY) @%s" % (amount, local_currency_price, self.currency, price, self.name)) return self._sell_maker(amount, local_currency_price)
def sell(self, amount, price, client_id=None): """Orders are always priced in CNY""" local_currency_price = self.fc.convert(price, "CNY", self.currency) logging.verbose("Sell %f BTC at %f %s (%f CNY) @%s" % (amount, local_currency_price, self.currency, price, self.name)) if client_id: return self._sell(amount, local_currency_price, client_id) else: return self._sell(amount, local_currency_price)
def run(self, *args, **kwargs) -> int: """ Benchmarks the execution time of 20 runs and stores the last 10 results (to avoid side effects) in self.trigger.result. Runs at most 100 times before deciding the run is a failure. :param args: additional arguments :param kwargs: additional keyword arguments :return: 0|1 on success|failure """ results = [] tries = 0 while len(results) < self.expected_results and tries < self.maximum_tries: tries += 1 try: proc_start = self.trigger.Server(self.trigger.cmd) proc_start.start() time.sleep(self.trigger.delay) results_queue = multiprocessing.Queue() # pylint: disable=no-member self.triggers = [] for command in self.trigger.helper_commands: self.triggers.append( self.trigger.helper(command, results=results_queue, **self.trigger.named_helper_args) ) result = timeit.repeat(self.client_run, number=1, repeat=1) finally: with suppress(subprocess.CalledProcessError): launch_and_log(self.trigger.stop_cmd.split(" ")) for thread in self.triggers: thread.terminate() values = [] for _ in self.triggers: values.append(results_queue.get_nowait()) if self.trigger.check_success(values) != 0: logging.warning("Trigger did not work, retrying") continue results += result show_progress(len(results), self.expected_results, section="trigger") time.sleep(2) if tries >= 100: return 1 logging.verbose("Run times : {} secs".format(results)) self.trigger.returned_information = results[self.expected_results - self.kept_runs:] return 0
def auto_apkindex_package(args, arch, aport, apk, dry=False): """ Bump the pkgrel of a specific package if it is outdated in the given APKINDEX. :param arch: the architecture, e.g. "armhf" :param aport: parsed APKBUILD of the binary package's origin: {"pkgname": ..., "pkgver": ..., "pkgrel": ..., ...} :param apk: information about the binary package from the APKINDEX: {"version": ..., "depends": [...], ...} :param dry: don't modify the APKBUILD, just print the message :returns: True when there was an APKBUILD that needed to be changed. """ version_aport = aport["pkgver"] + "-r" + aport["pkgrel"] version_apk = apk["version"] pkgname = aport["pkgname"] # Skip when aport version != binary package version compare = pmb.parse.version.compare(version_aport, version_apk) if compare == -1: logging.warning("{}: skipping, because the aport version {} is lower" " than the binary version {}".format(pkgname, version_aport, version_apk)) return if compare == 1: logging.verbose("{}: skipping, because the aport version {} is higher" " than the binary version {}".format(pkgname, version_aport, version_apk)) return # Find missing depends depends = apk["depends"] logging.verbose("{}: checking depends: {}".format(pkgname, ", ".join(depends))) missing = [] for depend in depends: providers = pmb.parse.apkindex.providers(args, depend, arch, must_exist=False) if providers == {}: # We're only interested in missing depends starting with "so:" # (which means dynamic libraries that the package was linked # against) and packages for which no aport exists. if (depend.startswith("so:") or not pmb.build.other.find_aport(args, depend, False)): missing.append(depend) # Increase pkgrel if len(missing): package(args, pkgname, reason=", missing depend(s): " + ", ".join(missing), dry=dry) return True
def _get_order(self, order_id): res = self.client.get_order(int(order_id)) logging.verbose('get_order: %s' % res) if res['code'] == 600: res = self.orders[order_id] res['status'] = 'CLOSE' del self.orders[order_id] return res assert str(res['data']['id']) == str(order_id) return self._order_status(res['data'])
def opportunity( self, profit, volume, buyprice, kask, sellprice, kbid, perc, weighted_buyprice, weighted_sellprice, ): if profit < config.profit_thresh or perc < config.perc_thresh: logging.verbose("[TraderBot] Profit or profit percentage lower than" + " thresholds") return if kask not in self.clients: logging.warn( "[TraderBot] Can't automate this trade, client not " + "available: %s" % kask ) return if kbid not in self.clients: logging.warn( "[TraderBot] Can't automate this trade, " + "client not available: %s" % kbid ) return volume = min(config.max_tx_volume, volume) # Update client balance self.update_balance() max_volume = self.get_min_tradeable_volume( buyprice, self.clients[kask].usd_balance, self.clients[kbid].btc_balance ) volume = min(volume, max_volume, config.max_tx_volume) if volume < config.min_tx_volume: logging.warn( "Can't automate this trade, minimum volume transaction" + " not reached %f/%f" % (volume, config.min_tx_volume) ) logging.warn( "Balance on %s: %f USD - Balance on %s: %f BTC" % (kask, self.clients[kask].usd_balance, kbid, self.clients[kbid].btc_balance) ) return current_time = time.time() if current_time - self.last_trade < self.trade_wait: logging.warn( "[TraderBot] Can't automate this trade, last trade " + "occured %.2f seconds ago" % (current_time - self.last_trade) ) return self.potential_trades.append( [profit, volume, kask, kbid, weighted_buyprice, weighted_sellprice, buyprice, sellprice] )
def _get_balances(self): """Get balance""" res = self.client.get_userinfo() logging.verbose("kkex get_balances: %s" % res) entry = res['info']['funds'] for currency in ['BCH', 'BTC', 'ETH']: self.balance[currency] = float(entry['free'][currency]) + float(entry['freezed'][currency]) self.available[currency] = float(entry['free'][currency]) return res
def recurse(args, pkgnames, arch=None, in_apkindexes=True, in_aports=True, strict=False): """ Find all dependencies of the given pkgnames. :param in_apkindexes: look through all APKINDEX files (with the specified arch) :param in_aports: look through the aports folder :param strict: raise RuntimeError, when a dependency can not be found. """ logging.debug("Calculate depends of packages " + str(pkgnames) + ", arch: " + arch) logging.verbose("Search in_aports: " + str(in_aports) + ", in_apkindexes: " + str(in_apkindexes)) # Sanity check if not apkindex and not in_aports: raise RuntimeError("Set at least one of apkindex or aports to True.") todo = list(pkgnames) ret = [] while len(todo): # Skip already passed entries pkgname = todo.pop(0) if pkgname in ret: continue # Get depends logging.verbose("Getting depends of single package: " + pkgname) depends = None if in_aports: aport = pmb.build.find_aport(args, pkgname, False) if aport: logging.verbose("-> Found aport: " + aport) apkbuild = pmb.parse.apkbuild(args, aport + "/APKBUILD") depends = apkbuild["depends"] if depends is None and in_apkindexes: logging.verbose("-> Search through APKINDEX files") depends = apkindex(args, pkgname, arch) if depends is None and strict: raise RuntimeError( recurse_error_message( pkgname, in_aports, in_apkindexes)) # Append to todo/ret logging.verbose("-> Depends: " + str(depends)) if depends: todo += depends ret.append(pkgname) return ret
def cancel_local_orders(self, market): orders = self.brokers[market].get_orders_history() if not orders: return for order in orders: logging.verbose("Cancelling: %s %s @ %s" % (order['type'], order['amount'], order['price'])) while True: result = self.cancel_order(market, order['type'], order['order_id']); if not result: time.sleep(10) else: break
def copy_files(self, _files: list) -> None: """ Copy files to add at the end (configuration files, and so on) :param _files: the files to copy """ if not _files: return logging.verbose("Copying required files") for _file in _files: name, destination = _file.split("=>") shutil.copy2(os.path.join(self.additional_sources_path, name), os.path.join(self.install_dir, destination)) logging.verbose("Copying " + name + " to " + os.path.join(self.install_dir, destination))
def run_test(self, pkt_size, duration, value): cores_tagged = [ self.get_cpu_id(self._tester_cpu_map, 1, int(config.getOption("testerSocketId")), False), self.get_cpu_id(self._tester_cpu_map, 4, int(config.getOption("testerSocketId")), False), ] cores_plain = [ self.get_cpu_id(self._tester_cpu_map, 2, int(config.getOption("testerSocketId")), False), self.get_cpu_id(self._tester_cpu_map, 3, int(config.getOption("testerSocketId")), False), ] self._tester.stop_all() self._tester.reset_stats() self._tester.set_pkt_size(cores_tagged, pkt_size) self._tester.set_pkt_size(cores_plain, pkt_size-4) self._tester.set_speed(cores_tagged, value) ratio = 1.0 * (pkt_size-4 + 20) / (pkt_size + 20) self._tester.set_speed(cores_plain, value*ratio) self._tester.start_all() # Getting statistics to calculate PPS at right speed.... tsc_hz = self._tester.hz() sleep(2) rx_start, tx_start, tsc_start = self._tester.tot_stats() sleep(duration) # Get stats before stopping the cores. Stopping cores takes some time # and might skew results otherwise. rx_stop, tx_stop, tsc_stop = self._tester.tot_stats() lat_min, lat_max, lat_avg = self._tester.lat_stats(self.latency_cores()) latency = dict( latency_min=lat_min, latency_max=lat_max, latency_avg=lat_avg ) self._tester.stop_all() port_stats = self._tester.port_stats([0, 1, 2, 3]) rx_total = port_stats[6] tx_total = port_stats[7] can_be_lost = int(tx_total * float(config.getOption('toleratedLoss')) / 100.0) logging.verbose("RX: %d; TX: %d; dropped: %d (tolerated: %d)", rx_total, tx_total, tx_total - rx_total, can_be_lost) # calculate the effective throughput in Mpps tx = tx_stop - tx_start tsc = tsc_stop - tsc_start mpps = tx / (tsc/float(tsc_hz)) / 1000000 pps = (value / 100.0) * utils.line_rate_to_pps(pkt_size, 4) logging.verbose("Mpps configured: %f; Mpps effective %f", (pps/1000000.0), mpps) return (tx_total - rx_total <= can_be_lost), mpps, 100.0*(tx_total - rx_total)/float(tx_total), latency
def opportunity( self, profit, volume, buyprice, kask, sellprice, kbid, perc, weighted_buyprice, weighted_sellprice, ): if profit < config.profit_thresh or perc < config.perc_thresh: logging.verbose( "[TraderBot] Profit or profit percentage lower than" + " thresholds") return if kask not in self.clients: logging.warn("[TraderBot] Can't automate this trade, client not " + "available: %s" % kask) return if kbid not in self.clients: logging.warn("[TraderBot] Can't automate this trade, " + "client not available: %s" % kbid) return volume = min(config.max_tx_volume, volume) # Update client balance self.update_balance() max_volume = self.get_min_tradeable_volume( buyprice, self.clients[kask].usd_balance, self.clients[kbid].btc_balance) volume = min(volume, max_volume, config.max_tx_volume) if volume < config.min_tx_volume: logging.warn( "Can't automate this trade, minimum volume transaction" + " not reached %f/%f" % (volume, config.min_tx_volume)) logging.warn("Balance on %s: %f USD - Balance on %s: %f BTC" % (kask, self.clients[kask].usd_balance, kbid, self.clients[kbid].btc_balance)) return current_time = time.time() if current_time - self.last_trade < self.trade_wait: logging.warn("[TraderBot] Can't automate this trade, last trade " + "occured %.2f seconds ago" % (current_time - self.last_trade)) return self.potential_trades.append([ profit, volume, kask, kbid, weighted_buyprice, weighted_sellprice, buyprice, sellprice ])
def opportunity(self, profit, volume, buyprice, kask, sellprice, kbid, perc, weighted_buyprice, weighted_sellprice): if profit < config.profit_thresh or perc < config.perc_thresh: logging.verbose( "[TraderBot] Profit or profit percentage lower than" + " thresholds") return if kask not in self.clients: logging.warn("[TraderBot] Can't automate this trade, client not " + "available: %s" % kask) return if kbid not in self.clients: logging.warn("[TraderBot] Can't automate this trade, " + "client not available: %s" % kbid) return if volume < config.min_tx_volume: logging.warn( "Can't automate this trade, minimum volume transaction" + " not reached %f/%f" % (volume, config.min_tx_volume)) return max_volume = self.get_min_tradeable_volume( buyprice, self.clients[kask].pair2_balance, self.clients[kbid].pair1_balance) if max_volume < config.min_tx_volume or max_volume < volume: _message = ("Insufficient funds!\n%s: %.4f %s\n%s: %.4f %s" % (kask, self.clients[kask].pair2_balance, self.clients[kask].pair2_name, kbid, self.clients[kbid].pair1_balance, self.clients[kbid].pair1_name)) logging.warn(_message) send_message(_message) time.sleep(5) self.update_balance() return volume = min(volume, max_volume, config.max_tx_volume) current_time = time.time() if current_time - self.last_trade < self.trade_wait: logging.warn("[TraderBot] Can't automate this trade, last trade " + "occured %.2f seconds ago" % (current_time - self.last_trade)) return self.potential_trades.append([ profit, volume, kask, kbid, weighted_buyprice, weighted_sellprice, buyprice, sellprice ])
def clear_cache(args, path): """ Clear the APKINDEX parsing cache. :returns: True on successful deletion, False otherwise """ logging.verbose("Clear APKINDEX cache for: " + path) if path in args.cache["apkindex"]: del args.cache["apkindex"][path] return True else: logging.verbose("Nothing to do, path was not in cache:" + str(args.cache["apkindex"].keys())) return False
def place_orders(self, refer_bid_price, refer_ask_price): # Update client balance self.update_balance() max_bch_trade_amount = config.LIQUID_MAX_BCH_AMOUNT min_bch_trade_amount = config.LIQUID_MIN_BCH_AMOUNT liquid_max_diff = config.LIQUID_MAX_DIFF # excute trade if self.buying_len() < config.LIQUID_BUY_ORDER_PAIRS: bprice = refer_bid_price * (1 - config.LIQUID_INIT_DIFF) amount = round(max_bch_trade_amount * random.random(), 2) price = round(bprice * (1 - liquid_max_diff * random.random()), 5) #-10% random price base on sprice Qty = min(self.mm_broker.btc_balance / price, self.hedge_broker.bch_available) # Qty = min(Qty, config.LIQUID_BTC_RESERVE/price) if Qty < amount or amount < min_bch_trade_amount: logging.verbose("BUY amount (%s) not IN (%s, %s)" % (amount, min_bch_trade_amount, Qty)) else: self.new_order(self.mm_market, 'buy', amount=amount, price=price) if self.selling_len() < config.LIQUID_SELL_ORDER_PAIRS: sprice = refer_ask_price * (1 + config.LIQUID_INIT_DIFF) amount = round(max_bch_trade_amount * random.random(), 2) price = round(sprice * (1 + liquid_max_diff * random.random()), 5) # +10% random price base on sprice Qty = min(self.mm_broker.bch_available, self.hedge_broker.btc_available / price) # Qty = min(Qty, config.LIQUID_BCH_RESERVE) if Qty < amount or amount < min_bch_trade_amount: logging.verbose("SELL amount (%s) not IN (%s, %s)" % (amount, min_bch_trade_amount, Qty)) else: self.new_order(self.mm_market, 'sell', amount=amount, price=price) return
def get_channel_new(channel): """ Translate legacy channel names to the new ones. Legacy names are still supported for compatibility with old branches (pmb#2015). :param channel: name as read from pmaports.cfg or channels.cfg, like "edge", "v21.03" etc., or potentially a legacy name like "stable". :returns: name in the new format, e.g. "edge" or "v21.03" """ legacy_cfg = pmb.config.pmaports_channels_legacy if channel in legacy_cfg: ret = legacy_cfg[channel] logging.verbose(f"Legacy channel '{channel}' translated to '{ret}'") return ret return channel
def install_python_modules() -> None: """ Install necessary python modules for the scripts and aesthetics :raise subprocess.CalledProcessError """ logging.verbose("Installing python dependencies") requirements = os.path.join(constants.CONF_PATH, "requirements.pip") cmd = ["pip3", "install", "-r", requirements] # pylint: disable=no-member if (not (hasattr(sys, 'real_prefix') and sys.prefix != sys.real_prefix)) and (sys.prefix == sys.base_prefix): # we are not in a virtualenv. Let's install the packages as user cmd.insert(2, "--user") launch_and_log(cmd, error_msg="Could not install python module")
def run(self) -> None: """ The main program, handles everything """ with suppress(FileNotFoundError): shutil.rmtree(self.working_dir) with FileLock(os.path.join("/tmp/", "." + self.conf.get("name") + ".build")): if not self.download_sources(): self.force_installation = True if os.path.exists(self.install_dir): if not self.force_installation: logging.warning( "The install directory is not empty. %(name)s will not be installed", dict(name=self.conf["name"]) ) return 1 else: logging.verbose( "%(name)s was already installed, removing it before continuing", dict(name=self.conf["name"]) ) shutil.rmtree(self.install_dir) logging.info("Treating " + self.conf["display_name"]) self.prepare_sources() self.patch(self.conf.getlist("patches_pre_config", []), self.working_dir) self.configure() self.patch(self.conf.getlist("patches_post_config", []), self.working_dir) self.copy_files(self.conf.getlist("copy_post_config", [])) if self.conf.getboolean("make", True): self.make() self.install() if get_global_conf().getboolean("install", "llvm_bitcode") and ("bitcode_file" in self.conf.keys()): self.extract_bitcode() self.patch(self.conf.getlist("patches_post_install", []), self.install_dir) self.copy_files(self.conf.getlist("copy_post_install", [])) if os.path.exists(os.path.join(self.patches_path, self.conf["display_name"] + ".patch")): self.patch([self.conf["display_name"] + ".patch"], self.working_dir, True) logging.info("finished installing %(name)s", dict(name=self.conf["display_name"]))
def on_message(self, ws, message): data = json.loads(message) if 'params' in data: data = data['params'] _b_full_orderbook = data[0] if _b_full_orderbook: self.depth_data = data[1] else: self._update_orderbook(data[1]) self.depth_update_time = time.time() elif 'result' in data: if data['result'] == 'pong': logging.verbose('get coinex pong message')
def run(self, *args, run_number: int = 0, **kwargs) -> int: """ Benchmarks the number of requests per second an apache server can handle Runs at most 100 times before deciding the run is a failure :param args: additional arguments :param run_number: the number of time the benchmark has run :param kwargs: additional keyword arguments :return: 0|1|None on success|failure|unexpected result """ proc_start = self.trigger.Server(self.trigger.cmd) proc_start.start() time.sleep(self.trigger.delay) cmd = "ab -n 30000 -c 1 {}".format( self.trigger.benchmark_url).split(" ") logging.verbose(cmd) try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, **kwargs) except subprocess.CalledProcessError as exc: for line in exc.output.decode().split("\n"): logging.debug(line) return self.retry(*args, run_number=run_number, **kwargs) else: success = self.trigger.check_success() if success: return self.retry(*args, run_number=run_number, **kwargs) self.trigger.result = [] for line in output.decode().split("\n"): if line.startswith("Requests per second:"): self.trigger.returned_information = [ float(line.split(":")[1].strip().split(" ")[0]) ] with suppress(subprocess.CalledProcessError): launch_and_log(self.trigger.stop_cmd.split(" ")) if len(self.trigger.returned_information) == 0: return self.retry(*args, run_number=run_number, **kwargs) logging.verbose("Requests per second : {}".format( self.trigger.returned_information[0])) return success
def providers(args, package, arch=None, must_exist=True, indexes=None): """ Get all packages, which provide one package. :param package: of which you want to have the providers :param arch: defaults to native arch, only relevant for indexes=None :param must_exist: When set to true, raise an exception when the package is not provided at all. :param indexes: list of APKINDEX.tar.gz paths, defaults to all index files (depending on arch) :returns: list of parsed packages. Example for package="so:libGL.so.1": {"mesa-egl": block, "libhybris": block} block is the return value from parse_next_block() above. """ if not indexes: arch = arch or args.arch_native indexes = pmb.helpers.repo.apkindex_files(args, arch) ret = {} for path in indexes: # Skip indexes not providing the package index_packages = parse(args, path) if package not in index_packages: continue # Iterate over found providers for provider_pkgname, provider in index_packages[package].items(): # Skip lower versions of providers we already found version = provider["version"] if provider_pkgname in ret: version_last = ret[provider_pkgname]["version"] if pmb.parse.version.compare(version, version_last) == -1: logging.verbose(package + ": provided by: " + provider_pkgname + "-" + version + " in " + path + " (but " + version_last + " is" " higher)") continue # Add the provier to ret logging.verbose(package + ": provided by: " + provider_pkgname + "-" + version + " in " + path) ret[provider_pkgname] = provider if ret == {} and must_exist: logging.debug("Searched in APKINDEX files: " + ", ".join(indexes)) raise RuntimeError("Could not find package '" + package + "'!") return ret
def skip_already_built(args, pkgname, arch): """ Check if the package was already built in this session, and add it to the cache in case it was not built yet. :returns: True when it can be skipped or False """ if arch not in args.cache["built"]: args.cache["built"][arch] = [] if pkgname in args.cache["built"][arch]: logging.verbose(pkgname + ": already checked this session," " no need to build it or its dependencies") return True args.cache["built"][arch].append(pkgname) return False
def run(self) -> int: """ Runs the cmd program in a subprocess, with rlimit set and checks the output to be sure that it is the correct bug :return: 0|1|None on success|failure|unexpected result """ logging.verbose(self.cmd) error_code = 0 try: # noinspection PyTypeChecker launch_and_log(self.cmd, shell=True, preexec_fn=self.__preexec_fn__) except subprocess.CalledProcessError as exc: error_code = exc.returncode return self.check_success(error_code=error_code)
def place_orders(self, refer_bid_price, refer_ask_price, mm_bid_price, mm_ask_price): # Update client balance if self.buying_len() < 2*config.LIQUID_BUY_ORDER_PAIRS or self.selling_len() < 2 * config.LIQUID_SELL_ORDER_PAIRS: self.update_balance() liquid_max_diff = config.LIQUID_MAX_DIFF # excute trade if self.buying_len() < 2*config.LIQUID_BUY_ORDER_PAIRS: bprice = refer_bid_price*(1-config.LIQUID_INIT_DIFF) amount = round(self.max_trade_amount * random.random(), 2) price = round(bprice*(1 - liquid_max_diff*random.random()), 5) #-10% random price base on bprice Qty = min(self.mm_broker.available.get('BTC', 0) / price, self.hedge_broker.available.get(self.currency, 0)) if Qty < amount or amount < self.min_trade_amount: logging.verbose("BUY amount (%s) not IN (%s, %s)" % (amount, self.min_trade_amount, Qty)) else: if mm_ask_price > 0 and mm_ask_price < bprice: price = bprice if (mm_ask_price > 0 and mm_ask_price < bprice) or self.buying_len() < config.LIQUID_BUY_ORDER_PAIRS: self.new_order(self.mm_market, 'buy', amount=amount, price=price) if self.selling_len() < 2*config.LIQUID_SELL_ORDER_PAIRS: sprice = refer_ask_price*(1+config.LIQUID_INIT_DIFF) amount = round(self.max_trade_amount * random.random(), 2) price = round(sprice*(1 + liquid_max_diff*random.random()), 5) # +10% random price base on sprice Qty = min(self.mm_broker.available.get(self.currency, 0), self.hedge_broker.available.get('BTC', 0) / price) if Qty < amount or amount < self.min_trade_amount: logging.verbose("SELL amount (%s) not IN (%s, %s)" % (amount, self.min_trade_amount, Qty)) else: if mm_bid_price > 0 and mm_bid_price > sprice: price = sprice if (mm_bid_price > 0 and mm_bid_price > sprice) or self.selling_len() < config.LIQUID_SELL_ORDER_PAIRS: self.new_order(self.mm_market, 'sell', amount=amount, price=price) return
def is_necessary(args, arch, apkbuild, indexes=None): """ Check if the package has already been built. Compared to abuild's check, this check also works for different architectures. :param arch: package target architecture :param apkbuild: from pmb.parse.apkbuild() :param indexes: list of APKINDEX.tar.gz paths :returns: boolean """ # Get package name, version, define start of debug message package = apkbuild["pkgname"] version_new = apkbuild["pkgver"] + "-r" + apkbuild["pkgrel"] msg = "Build is necessary for package '" + package + "': " # Get old version from APKINDEX index_data = pmb.parse.apkindex.package(args, package, arch, False, indexes) if not index_data: logging.debug(msg + "No binary package available") return True # Can't build pmaport for arch: use Alpine's package (#1897) if arch and not pmb.helpers.pmaports.check_arches(apkbuild["arch"], arch): logging.verbose(f"{package}: build is not necessary, because pmaport" " can't be built for {arch}. Using Alpine's binary" " package.") return False # a) Binary repo has a newer version version_old = index_data["version"] if pmb.parse.version.compare(version_old, version_new) == 1: logging.warning("WARNING: package {}: aport version {} is lower than" " {} from the binary repository. {} will be used when" " installing {}. See also:" " <https://postmarketos.org/warning-repo2>" "".format(package, version_new, version_old, version_old, package)) return False # b) Aports folder has a newer version if version_new != version_old: logging.debug(f"{msg}Binary package out of date (binary: " f"{version_old}, aport: {version_new})") return True # Aports and binary repo have the same version. return False
def recurse(args, pkgnames, suffix="native"): """ Find all dependencies of the given pkgnames. :param suffix: the chroot suffix to resolve dependencies for. If a package has multiple providers, we look at the installed packages in the chroot to make a decision (see package_provider()). :returns: list of pkgnames: consists of the initial pkgnames plus all depends """ logging.debug("(" + suffix + ") calculate depends of " + ", ".join(pkgnames) + " (pmbootstrap -v for details)") # Iterate over todo-list until is is empty todo = list(pkgnames) ret = [] while len(todo): # Skip already passed entries pkgname_depend = todo.pop(0) if pkgname_depend in ret: continue # Get depends and pkgname from aports pkgnames_install = list(ret) + todo package = package_from_aports(args, pkgname_depend) package = package_from_index(args, pkgname_depend, pkgnames_install, package, suffix) # Nothing found if not package: logging.info("NOTE: Run 'pmbootstrap pkgrel_bump --auto' to mark" " packages with outdated dependencies for rebuild." " This will most likely fix this issue (soname" " bump?).") raise RuntimeError("Could not find package '" + pkgname_depend + "' in any aports folder or APKINDEX.") # Append to todo/ret (unless it is a duplicate) pkgname = package["pkgname"] if pkgname in ret: logging.verbose(pkgname + ": already found") else: depends = package["depends"] logging.verbose(pkgname + ": depends on: " + ",".join(depends)) if depends: todo += depends ret.append(pkgname) return ret
def check_success(self, error_code: int, *args, **kwargs) -> int: """ Checks for the success of the trigger result. :param error_code: the error code returned by the trigger :param args: additional arguments :param kwargs: additional keyword arguments :return: 0|1|None on success|expected failure|unexpected failure """ if error_code == self.expected_failure: return 1 if error_code == 0: return 0 logging.verbose("Got error code {}, expected {}".format(error_code, self.expected_failure)) return None
def check_arch_recurse(args, pkgname, arch): """ Recursively check if a package and its dependencies exist (binary repo) or can be built (pmaports) for a certain architecture. :param pkgname: name of the package :param arch: architecture to check against :returns: True when all the package's dependencies can be built or exist for the arch in question """ for pkgname_i in depends_recurse(args, pkgname, arch): if not check_arch(args, pkgname_i, arch): if pkgname_i != pkgname: logging.verbose(pkgname_i + ": (indirectly) depends on " + pkgname) logging.verbose(pkgname_i + ": can't be built for " + arch) return False return True
def download_sources(self) -> bool: if os.path.exists(self.source_storage_path): return True logging.verbose("Downloading %(file)s", dict(file=self.conf["url"])) logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("requests").setLevel(logging.WARNING) response = requests.get(self.conf["url"], stream=True) if response.status_code != requests.codes.ok: raise response.raise_for_status() os.makedirs(os.path.dirname(self.source_storage_path), exist_ok=True) with open(self.source_storage_path, "wb") as _file_: for chunk in response.iter_content(chunk_size=1024): if chunk: # this is to filter out keepalive chunks _file_.write(chunk)
def check_success(self, error_code: int, *args, **kwargs) -> int: """ Checks for the success of the trigger result. :param error_code: the error code returned by the trigger :param args: additional arguments :param kwargs: additional keyword arguments :return: 0|1|None on success|expected failure|unexpected failure """ if error_code == self.expected_failure: return 1 if error_code == 0: return 0 logging.verbose("Got error code {}, expected {}".format( error_code, self.expected_failure)) return None
def opportunity(self, profit, volume, buyprice, kask, sellprice, kbid, perc, weighted_buyprice, weighted_sellprice): if kask not in self.clients: logging.warn("[TraderBot] Can't automate this trade, client not available: %s" % kask) return if kbid not in self.clients: logging.warn("[TraderBot] Can't automate this trade, client not available: %s" % kbid) return if profit < self.profit_thresh or perc < self.perc_thresh: logging.verbose("[TraderBot] Profit or profit percentage(%0.4f/%0.4f) lower than thresholds(%s/%s)" % (profit, perc, self.profit_thresh, self.perc_thresh)) return else: logging.verbose("[TraderBot] Profit or profit percentage(%0.4f/%0.4f) higher than thresholds(%s/%s)" % (profit, perc, self.profit_thresh, self.perc_thresh)) if perc > 20: # suspicous profit, added after discovering btc-central may send corrupted order book logging.warn("Profit=%f seems malformed" % (perc, )) return # Update client balance self.update_balance() max_volume = self.get_min_tradeable_volume(buyprice, self.clients[kask].cny_balance, self.clients[kbid].btc_balance) volume = min(volume, max_volume, config.max_tx_volume) if volume < config.min_tx_volume: logging.warn("Can't automate this trade, minimum volume transaction"+ " not reached %f/%f" % (volume, config.min_tx_volume)) logging.warn("Balance on %s: %f CNY - Balance on %s: %f BTC" % (kask, self.clients[kask].cny_balance, kbid, self.clients[kbid].btc_balance)) return current_time = time.time() if current_time - self.last_trade < self.trade_wait: logging.warn("[TraderBot] Can't automate this trade, last trade " + "occured %.2f seconds ago" % (current_time - self.last_trade)) return self.potential_trades.append([profit, volume, kask, kbid, weighted_buyprice, weighted_sellprice, buyprice, sellprice])
def recurse(args, pkgnames, suffix="native"): """ Find all dependencies of the given pkgnames. :param suffix: the chroot suffix to resolve dependencies for. If a package has multiple providers, we look at the installed packages in the chroot to make a decision (see package_provider()). :returns: list of pkgnames: consists of the initial pkgnames plus all depends """ logging.debug(f"({suffix}) calculate depends of {', '.join(pkgnames)} " "(pmbootstrap -v for details)") # Iterate over todo-list until is is empty todo = list(pkgnames) ret = [] while len(todo): # Skip already passed entries pkgname_depend = todo.pop(0) if pkgname_depend in ret: continue # Get depends and pkgname from aports pkgnames_install = list(ret) + todo package = package_from_aports(args, pkgname_depend) package = package_from_index(args, pkgname_depend, pkgnames_install, package, suffix) # Nothing found if not package: raise RuntimeError(f"Could not find dependency '{pkgname_depend}' " "in any aports folder or APKINDEX. See:" " <https://postmarketos.org/depends>") # Append to todo/ret (unless it is a duplicate) pkgname = package["pkgname"] if pkgname in ret: logging.verbose(f"{pkgname}: already found") else: depends = package["depends"] logging.verbose(f"{pkgname}: depends on: {','.join(depends)}") if depends: todo += depends ret.append(pkgname) return ret
def create_executable(self, installer: Installer, extension: str=None, version_number: int=None, force: bool=False, *args, **kwargs) -> int: """ Creates a special executable to run for this plugin if needed. If a patch is supplied by the form "program-name-version-extension.patch", it will automatically get used to create a new version :param installer: the installer instance that is used :param extension: the extension to add to the binary, usually the plugin name :param version_number: if multiple version are required for a plugin, this will get appended to it :param force: force creation even if no patch is provided :param args: additional arguments :param kwargs: additional keyword arguments :return: None|0 if nothing happened or installation is successful """ extension = extension or self.extension executable_suffix = "{}-{}".format(extension, version_number) if version_number else extension for lib in installer.conf.getlist("libraries"): lib_installer = Installer.factory(installer.conf.get_library(lib), False) with ExtensionPatcherManager(lib_installer, extension) as lib_patcher: if lib_patcher.is_patched or force: lib_installer.configure() lib_installer.make() lib_installer.install() force = True with ExtensionPatcherManager(installer, extension) as patcher: if not patcher.is_patched and not force: logging.verbose("No need to create special executable for {}".format(extension)) return installer.make() executable = os.path.join(installer.working_dir, installer.conf.get("bitcode_file")) destination = "{}-{}".format(installer.conf.get_executable(), executable_suffix) logging.verbose("Copying {} to {}".format(executable, os.path.join(installer.install_dir, destination))) shutil.copy(executable, os.path.join(installer.install_dir, destination)) for lib in installer.conf.getlist("libraries"): lib_installer = Installer.factory(installer.conf.get_library(lib), False) if force: lib_installer.configure() lib_installer.make() lib_installer.install() return 0
def _get_balances(self): """Get balance""" res = self.client.balances() logging.verbose("bitfinex get_balances: %s" % res) for entry in res: if entry['type'] != 'exchange': continue currency = entry['currency'].upper() if currency not in ('BTC', 'BCH', 'ETH'): continue self.balance[currency] = float(entry['amount']) self.available[currency] = float(entry['available']) return res
def patch(self, patches: list, directory: str, reverse: bool=False, patches_path=None) -> None: """ Applies different patches to the sources or the installed files :param patches: list of patches to apply :param directory: the top directory where to apply these patches :param reverse: if the patch is to be reversed :param patches_path: the path where to find the patches. If not set, will use data/program_name/patches """ if not patches: return for _patch in patches: logging.verbose("Applying {}patch {}".format("Reverse " if reverse else "", _patch)) cmd = ["patch", "-p1", "-i", os.path.join(patches_path or self.patches_path, _patch)] if reverse: cmd.insert(2, "-R") helper.launch_and_log(cmd, cwd=directory, error_msg="A patch failed to apply")
def loadConfigurableDb(): ''' Equivalent to GaudiKernel.ConfigurableDb.loadConfigurableDb(), but does a deep search and executes the '*_confDb.py' files instead of importing them. ''' # find the '*_confDb.py' files that are not merged ones for p in sys.path: for f in [f for f in glob(os.path.join(p, '*', '*_confDb.py')) if 'merged' not in f and os.path.isfile(f)]: logging.verbose('Loading %s', f) try: execfile(f, {}, {}) except: # It may happen that the file is found but not completely # written, usually during parallel builds, but we do not care. pass # top up with the regular merged confDb (for the used projects) GaudiKernel.ConfigurableDb.loadConfigurableDb()
def replace_variable(apkbuild, value: str) -> str: def log_key_not_found(match): logging.verbose(f"{apkbuild['pkgname']}: key '{match.group(1)}' for" f" replacing '{match.group(0)}' not found, ignoring") # ${foo} for match in revar.finditer(value): try: logging.verbose("{}: replace '{}' with '{}'".format( apkbuild["pkgname"], match.group(0), apkbuild[match.group(1)])) value = value.replace(match.group(0), apkbuild[match.group(1)], 1) except KeyError: log_key_not_found(match) # $foo for match in revar2.finditer(value): try: newvalue = apkbuild[match.group(1)] logging.verbose("{}: replace '{}' with '{}'".format( apkbuild["pkgname"], match.group(0), newvalue)) value = value.replace(match.group(0), newvalue, 1) except KeyError: log_key_not_found(match) # ${var/foo/bar}, ${var/foo/}, ${var/foo} for match in revar3.finditer(value): try: newvalue = apkbuild[match.group(1)] search = match.group(2) replacement = match.group(3) if replacement is None: # arg 3 is optional replacement = "" newvalue = newvalue.replace(search, replacement, 1) logging.verbose("{}: replace '{}' with '{}'".format( apkbuild["pkgname"], match.group(0), newvalue)) value = value.replace(match.group(0), newvalue, 1) except KeyError: log_key_not_found(match) # ${foo#bar} rematch4 = revar4.finditer(value) for match in rematch4: try: newvalue = apkbuild[match.group(1)] substr = match.group(2) if newvalue.startswith(substr): newvalue = newvalue.replace(substr, "", 1) logging.verbose("{}: replace '{}' with '{}'".format( apkbuild["pkgname"], match.group(0), newvalue)) value = value.replace(match.group(0), newvalue, 1) except KeyError: log_key_not_found(match) return value
def new_order(self, market, type, maker_only=False, amount=None, price=None): if type == 'buy' or type == 'sell': if not price or not amount: print(price) print(amount) assert (False) if maker_only: if type == 'buy': order_id = self.brokers[market].buy_maker(amount, price) else: order_id = self.brokers[market].sell_maker(amount, price) else: if type == 'buy': order_id = self.brokers[market].buy_limit(amount, price) else: order_id = self.brokers[market].sell_limit(amount, price) if not order_id or order_id == -1: logging.warn("%s @%s %f/%f failed, %s" % (type, market, amount, price, order_id)) return None order = { 'market': market, 'order_id': order_id, 'price': price, 'amount': amount, 'deal_amount': 0, 'deal_index': 0, 'type': type, 'time': time.time() } self.orders.append(order) logging.verbose("submit order %s" % (order)) return order return None
def is_necessary_warn_depends(args, apkbuild, arch, force, depends_built): """ Check if a build is necessary, and warn if it is not, but there were dependencies built. :returns: True or False """ pkgname = apkbuild["pkgname"] ret = True if force else pmb.build.is_necessary(args, arch, apkbuild) if not ret and len(depends_built): # Warn of potentially outdated package logging.warning("WARNING: " + pkgname + " depends on rebuilt" + " package(s) " + ",".join(depends_built) + " (use" + " 'pmbootstrap build " + pkgname + " --force' if" + " necessary!)") logging.verbose(pkgname + ": build necessary: " + str(ret)) return ret
def run(self, *args, run_number: int=0, **kwargs) -> int: """ Benchmarks the number of requests per second an apache server can handle Runs at most 100 times before deciding the run is a failure :param args: additional arguments :param run_number: the number of time the benchmark has run :param kwargs: additional keyword arguments :return: 0|1|None on success|failure|unexpected result """ proc_start = self.trigger.Server(self.trigger.cmd) proc_start.start() time.sleep(self.trigger.delay) cmd = "ab -n 30000 -c 1 {}".format(self.trigger.benchmark_url).split(" ") logging.verbose(cmd) try: output = subprocess.check_output(cmd, stderr=subprocess.STDOUT, **kwargs) except subprocess.CalledProcessError as exc: for line in exc.output.decode().split("\n"): logging.debug(line) return self.retry(*args, run_number=run_number, **kwargs) else: success = self.trigger.check_success() if success: return self.retry(*args, run_number=run_number, **kwargs) self.trigger.result = [] for line in output.decode().split("\n"): if line.startswith("Requests per second:"): self.trigger.returned_information = [float(line.split(":")[1].strip().split(" ")[0])] with suppress(subprocess.CalledProcessError): launch_and_log(self.trigger.stop_cmd.split(" ")) if len(self.trigger.returned_information) == 0: return self.retry(*args, run_number=run_number, **kwargs) logging.verbose("Requests per second : {}".format(self.trigger.returned_information[0])) return success
def probe_wakeup(self, id): logging.debug("Function probe_wakeup()") try: # get group values for _ in range(3): response = self.serial_get_response(id+'A') if response != '': logging.verbose("Function probe_wakeup() - Exit") return True break time.sleep(0.1) logging.warning("Probe does not respond") return False except Exception as e: logging.critical("An exception was encountered in probe_wakeup(): %s" % str(e)) return False
def read_any_index(args, package, arch=None): """ Get information about a single package from any APKINDEX.tar.gz. :param arch: defaults to native architecture :returns: the same format as read() """ if not arch: arch = args.arch_native # Return first match for index in pmb.helpers.repo.apkindex_files(args, arch): index_data = read(args, package, index, False) if index_data: logging.verbose(package + ": found in " + index) return index_data logging.verbose(package + ": no match found in any APKINDEX.tar.gz!") return None
def pre_trigger_run(self, trigger: RawTrigger, *args, **kwargs) -> None: """ Updates the coredumps information in order to generate some correctly :param trigger: the trigger instance to use :param args: additional arguments :param kwargs: additional keyword arguments """ trigger_full_path = trigger.cmd.split(" ")[0] if os.path.exists("{}-{}".format(trigger_full_path, self.extension)): trigger.cmd = trigger.cmd.replace(trigger_full_path, "{}-{}".format(trigger_full_path, self.extension)) trigger.conf["executable"] = "{}-{}".format(trigger.conf.get("executable"), self.extension) os.makedirs(get_global_conf().getdir("trigger", "core_dump_location"), exist_ok=True) core_path = trigger.conf.get_core_path() logging.verbose("core_path: %(core_path)s", dict(core_path=core_path)) with suppress(OSError): logging.debug("attempting to delete old coredump at %(core_path)s", dict(core_path=core_path)) os.remove(core_path)
def run_test(self, pkt_size, duration, value): # Tester is sending packets at the required speed already after # setup_test(). Just get the current statistics, sleep the required # amount of time and calculate packet loss. # Getting statistics to calculate PPS at right speed.... tsc_hz = self._tester.hz() rx_start, tx_start, tsc_start = self._tester.tot_stats() sleep(duration) # Get stats before stopping the cores. Stopping cores takes some time # and might skew results otherwise. rx_stop, tx_stop, tsc_stop = self._tester.tot_stats() # report latency lat_min, lat_max, lat_avg = self._tester.lat_stats(self._rx_lat_cores) self._tester.stop(self._cpe_cores + self._inet_cores) # flush packets in NIC RX buffers so they are counted too when # when calculating the number of dropped packets. logging.verbose("Test ended. Flushing NIC buffers") self._tester.start(self._all_rx_cores) sleep(3) self._tester.stop(self._all_rx_cores) # calculate the effective throughput in Mpps rx = rx_stop - rx_start tsc = tsc_stop - tsc_start mpps = rx / (tsc/float(tsc_hz)) / 1000000 logging.verbose("MPPS: %f", mpps) rx_tot, tx_tot, drop_tot, _ = self._tester.rx_stats(self._all_stats_cores) can_be_lost = int(tx_tot * float(config.getOption('toleratedLoss')) / 100.0) logging.verbose("RX: %d; TX: %d; drop: %d; TX-RX: %d (tolerated: %d)", rx_tot, tx_tot, drop_tot, tx_tot - rx_tot, can_be_lost) return (tx_tot - rx_tot <= can_be_lost), mpps, 100.0*(tx_tot - rx_tot)/float(tx_tot)
def run(self) -> int: """ Main function. Calls every other one in order to make the bug trigger :return: 0|1|None on success|failure|unexpected event """ try: logging.verbose(self.cmd) proc_start = self.Server(self.cmd) # this is not a typo. Using cmd is REQUIRED for the sake of plugins proc_start.start() time.sleep(self.delay) triggers = [] results_queue = multiprocessing.Queue() # pylint: disable=no-member for command in self.helper_commands: # noinspection PyCallingNonCallable triggers.append(self.helper(command, results=results_queue, **self.named_helper_args)) for thread in triggers: thread.start() for thread in triggers: thread.join(self.timeout) for thread in triggers: thread.terminate() finally: with suppress(subprocess.CalledProcessError): launch_and_log(self.stop_cmd.split(" ")) results = [] for _ in triggers: with suppress(queue.Empty): results.append(results_queue.get_nowait()) time.sleep(self.delay) return self.check_success(results=results)
def run(self): """ Installs the programs and reports the value """ error = None try: for _installer in self.programs: try: if (not _installer.run()) and _installer.conf.get("executable", None): hooks.create_executables(installer=_installer) hooks.post_install_run(installer=_installer, **kwargs) except InstallationErrorException as exception: logging.error(exception.error_message) logging.error("Won't install %(program)s", dict(program=_installer.conf.get("name"))) error = constants.INSTALL_FAIL except Exception as exc: # pylint: disable=broad-except error = constants.INSTALL_FAIL logging.error(exc) logging.debug("".join(traceback.format_tb(exc.__traceback__))) finally: logging.verbose("Cleaning environment") hooks.post_install_clean(**kwargs) self.max_tasks.release() self.report_queue.put((error or 0, self.programs[0].conf.get("name")))
def writeArticle(self, articleId, articleDict): """ appends data to current chunk """ articleDict["articleId"]=articleId articleDict = self._removeSpecChar(articleDict) logging.log(5, "appending article info to %s: %s" % (self.articleFh.name, str(articleDict))) if len(articleDict)!=len(articleFields): logging.error("column counts between article dict and article objects don't match") dictFields = articleDict.keys() dictFields.sort() logging.error("columns are %s" % str(dictFields)) expFields = articleFields expFields.sort() logging.error("expected columns are %s" % str(expFields)) raise("Error") articleTuple = ArticleRec(**articleDict) # convert all fields to utf8 string, remove \n and \t articleTuple = listToUtf8Escape(articleTuple) line = "\t".join(articleTuple) self.articleFh.write(line+"\n") self.articlesWritten += 1 logging.verbose("%d articles written" % self.articlesWritten)
def configure(self) -> None: """ Configures the sources """ if self.conf["configure"] == "configure": cmd = [ os.path.join(self.sources_dir, "configure"), "--prefix={}".format(self.install_dir) ] elif self.conf["configure"] == "cmake": cmd = [ "cmake", self.sources_dir ] else: logging.verbose("{} does not need configuration".format(self.conf["display_name"])) return cmd += self.conf.getlist("configure_args", []) logging.info("Configuring %(name)s", dict(name=self.conf["display_name"])) self.env["WLLVM_CONFIGURE_ONLY"] = "1" helper.launch_and_log(cmd, cwd=self.working_dir, env=self.env, error_msg="Configuration failed") del self.env["WLLVM_CONFIGURE_ONLY"]