def process(self, config, coin): if config.DRYRUN: stop.process(self, config, coin[0]) pinfo = status.get_status(coin[0]) print('timeout 60 wait '+str(pinfo['pid'])) print('[ $? = 0 ] && ',end='') start.process(self, config, coin[1]) else: print("Stopping "+coin[0]['COIN']) stop.process(self, config, coin[0]) pinfo = status.get_status(coin[0]) t = 0 while pinfo is not None and t < 60: t += 1 print('Waiting ... '+str(t)+' '),;sys.stdout.flush() time.sleep(0.5) print ('\r'),;sys.stdout.flush() pinfo = status.get_status(coin[0]) if t >= 60: print("FAIL: Process mining "+coin[0]['COIN']+" did not stop!") return 1 print("Starting "+coin[1]['COIN']) start.process(self, config, coin[1]) return 0
def acquire_status(self, desired_status_string): # Don't need to do anything if we already have the status !! if mpc_status.get_status("mpc_temp_status") != desired_status_string: try: # wait to acquire lock from parallel workers with self.lock.acquire(timeout=timeout): # wait to aquire lock from any other code # e.g. PV uses this during identification/linking while mpc_status.get_status( "mpc_temp_status") != desired_status_string: time.sleep(np.random.rand() * 0.01) if mpc_status.get_status("mpc_temp_status") == '': mpc_status.set_status("mpc_temp_status", desired_status_string) time.sleep(np.random.rand() * 0.01) assert mpc_status.get_status("mpc_temp_status") == desired_status_string, \ f'Problem: mpc_temp_status = {mpc_status.get_status("mpc_temp_status")}' except Exception as e: print('Problem with *aquire_status()*') print(e) print('\t:', desired_status_string) return mpc_status.get_status("mpc_temp_status")
def initialize(self, config, coin): pinfo = status.get_status(coin[0]) if pinfo is None: print(coin[0]['COIN']+": There is no process mining "+coin[0]['COIN']) return 1 pinfo = status.get_status(coin[1]) if pinfo is not None and coin[0]['COIN'] is not coin[1]['COIN']: print(coin[1]['COIN']+": There is already a process mining "+coin[1]['COIN']) return 1 stop.initialize(self, config, coin[0]) start.initialize(self, config, coin[1]) return 0
def handle_status(config, args): if args['clear']: clear_status(config.slack, config.default_statuses, config.default_dnd) elif args['set']: try: status = config.statuses[args['<status>']] except KeyError: print( f'{args["<status>"]} is not a vaild status. Valid statuses are:' ) print_statuses_list(config.statuses) exit(1) set_status(config.slack, status, args['<time>']) elif args['show']: if args['<status>'] is None: print(get_status(config.slack)) else: try: print(config.statuses.get(args['<status>'])) except KeyError: print( f'{args["<status>"]} is not a vaild status. Valid statuses are:' ) print_statuses_list(config.statuses) exit(1) elif args['list']: print_statuses_list(config.statuses)
def gen_commit_message(dir): """ Generate commit message prompt text """ status = get_status(dir) if not status: raise ApplicationError("No files to commit") out = StringIO() print >> out print >> out print >> out print >> out, SEPARATOR_END % "MESSAGE" section = "FILES" print >> out, SEPARATOR_BEGIN % section print >> out for item in status: line = _format_status_item(item) print >> out, line print >> out print >> out, SEPARATOR_END % section return out.getvalue()
def status_loop(): #this will constantly check to see if status has changed show_status() global previous_status while True: if status.get_status() != previous_status: #only change lights if status has changed show_status()
def custom_sleep(duration, current_status): """custom_sleep to allow to check for change in status while sleeping""" count = int(duration / SLEEP_TIME) for x in range(0, count + 1): time.sleep(SLEEP_TIME) new_status = status.get_status(current_status) if new_status != current_status: raise StatusChangedException(new_status)
def _get_status(self, state): logger.debug("Getting status") try: self.status = get_status(self.backend) self.model = self.status.model self.label_width = self.status.media_width except: self.status = None
def main(stdscr): ''' Main function of the program ''' output = '' file_path = fileopenbox('Open account data csv') if file_path is None: terminate() return account_list = AccountList() with open(file_path, newline='') as csvfile: dialect = csv.Sniffer().sniff(csvfile.read(1024), delimiters=CSV_DELIMITERS) csvfile.seek(0) reader = csv.DictReader(csvfile, dialect=dialect) for row in reader: account_list.append(Account(row['username'], row['password'])) connection = Connection() while True: current = account_list.current() if current is None: terminate() return start_time = time.time() stdscr.clear() status = [] macro = None try: status = get_status(connection, current) try: macro = get_macro(status) output = do_macro(connection, macro, current) except CompletedAccount: account_list.complete() output = {'description': 'Completed'} except RequestException as err: output = {'error': 'RequestException: {0}'.format(err)} except BadResponseException: output = {'error': 'BadResponseException'} except LootRetrieveException: output = {'error': 'LootRetrieveException'} process_time = time.time() - start_time stdscr.addstr('{:<30}{:.5f}s\n'.format('Process time', process_time)) display_status(stdscr, status) stdscr.addstr('\n{:<30}{}\n'.format('Username', current.username)) stdscr.addstr('{:<30}{}\n'.format('Password', current.password)) stdscr.addstr('{:<30}{}\n'.format('Macro', macro)) stdscr.addstr('{:<30}{}\n'.format('Ouptut', output)) stdscr.addstr('\nctrl+shift+q to exit') stdscr.refresh() time.sleep(ACTION_INTERVAL)
def relinquish_status(self, desired_status_string): # Only attempt to change if the temp_status is what you think it is ... if mpc_status.get_status("mpc_temp_status") == desired_status_string: # wait to acquire lock from parallel workers with self.lock.acquire(timeout=timeout): # set empty status mpc_status.set_status("mpc_temp_status", "") return True else: return False
def process(self, config, arguments): if config.VERBOSE: config.logger.info('mon_miners.process()') pInfos = status.get_status(None, [os.getpid()], ['/usr/local/bin/monitor-miners']) if pInfos != None and len(pInfos) > 0: for pInfo in pInfos: config.logger.info( str(pInfo.get('pid')) + ' ' + ' '.join(pInfo.get('cmdline'))) else: config.logger.error( "There are no mining processes running at this time!") if arguments.get('-t') and fiboMeter.next(): textmsg.send(os.getenv('HOSTNAME') + ": Not mining!") return 1 return 0
def main(stdscr): ''' Main function of the program ''' efficiency = Efficiency() connection = Connection() while True: start_time = time.time() stdscr.clear() efficiency.change() status = get_status(connection) process_time = time.time() - start_time stdscr.addstr('{:<30}{:.2f}%\n'.format('Efficiency', efficiency.get_efficiency())) stdscr.addstr('{:<30}{:.5f}s\n'.format('Process time', process_time)) display_status(stdscr, status) stdscr.refresh() time.sleep(1)
def show_status(): global previous_status current_status = status.get_status() status_name = current_status['name'] for array in status.STATUSES: if array == status_name: light_type = current_status['lights'] if current_status['monocolour'] == 'no': length = current_status['length'] globals()[light_type](length) else: r = current_status['red'] g = current_status['green'] b = current_status['blue'] globals()[light_type](r, g, b) previous_status = current_status
def status_loop(default): """Constantly check to see if status has changed""" current_status = status.get_status(default) new_status = current_status while True: try: if new_status: current_status = new_status new_status = None show_status(current_status) custom_sleep(1, current_status) except StatusChangedException as e: print e.message new_status = e.message except Exception as e: print(e)
def disable(addons): """Disables one or more MicroK8s addons. For a list of available addons, run `microk8s status`. To see help for individual addons, run: microk8s disable ADDON -- --help """ is_cluster_locked() exit_if_no_permission() ensure_started() _, disabled_addons = get_status(get_available_addons(get_current_arch()), True) disabled_addons = {a['name'] for a in disabled_addons} xable('disable', addons, disabled_addons)
def enable(addons): """Enables a MicroK8s addon. For a list of available addons, run `microk8s status`. To see help for individual addons, run: microk8s enable ADDON -- --help """ is_cluster_locked() exit_if_no_permission() ensure_started() enabled_addons, _ = get_status(get_available_addons(get_current_arch()), True) enabled_addons = {a['name'] for a in enabled_addons} xable('enable', addons, enabled_addons)
def run(self): """Begins running the watcher (witch will run forever)""" while True: print("Checking...") self.last_check = datetime.now() till_next_check = self.check_rate current_status = status.get_status() # if we aren't connected, restart the router if current_status == "disconnected": till_next_check = self.max_restart_rate restart.restart() print("Restarted!") to_sleep = max( till_next_check - (datetime.now() - self.last_check).seconds, 0) time.sleep(to_sleep)
def api_status(): """Returns the current status data as JSON. e.g. { u'status': u'offline', u'callable_statuses_list': [ u'available', u'busy', u'engaged', \ u'alert', u'disturbable', u'partying', u'offline', u'brb' ] } """ status_dictionary = { 'status': status.get_status(), 'endpoint': url_for('refresh', _external=True), 'callable_statuses_list': status.read_statuses(), 'device_type': status.get_type() } return jsonify(status_dictionary)
def process(self, config, coin): global TAIL_LOG_FILES, CMD_LOG_FILES if isinstance(coin, list): # This happens with 'miners swap,logs old-coin:new-coin' coin = coin[1] time.sleep(0.1) miner = coin['MINER'] client = None if miner in config.SHEETS['Clients']: client = config.SHEETS['Clients'][miner] miner = client['EXECUTABLE'] if miner in start.MINER_TO_BINARY: miner = start.MINER_TO_BINARY[miner] # If no coins on command line, then list only those of currently running miners if config.ALL_COINS: pinfos = status.get_status(config.arguments['COIN']) if pinfos is None or len(pinfos) == 0: print('There are no processes mining anything.',file=sys.stderr) return config.ALL_MEANS_ONCE for pinfo in pinfos: WORKER_NAME = config.workerName(pinfo['coin']) for ext in ['.log','.err','.out']: logName = '/var/log/mining/'+WORKER_NAME+ext if os.path.isfile(logName): TAIL_LOG_FILES[logName] = 1 return config.ALL_MEANS_ONCE if miner.endswith('.service'): CMD_LOG_FILES = ['/bin/journalctl', '-f'] else: for ext in ['.log','.err','.out']: logName = '/var/log/mining/'+config.workerName(coin['COIN'])+ext if os.path.isfile(logName): TAIL_LOG_FILES[logName] = 1 else: if config.VERBOSE: print("There is no log file named '"+logName+"'") return 0
def enable(addons): """Enables a MicroK8s addon. For a list of available addons, run `microk8s status`. To see help for individual addons, run: microk8s enable ADDON -- --help """ if check_help_flag(addons): return is_cluster_locked() exit_if_no_permission() ensure_started() wait_for_ready(timeout=30) enabled_addons, _ = get_status(get_available_addons(get_current_arch()), True) enabled_addons = {a["name"] for a in enabled_addons} xable("enable", addons, enabled_addons)
def refresh(): the_new_status = request.form['new_status'] status.change_status(the_new_status) return status.get_status()
def get_status(): return status.get_status()
def process(self, config, coin, quiet=False): global OverclockConfig # volatile means this operation make changes in settings VOLATILE = not config.DRYRUN and not config.QUERY postfix = '-' + coin['COIN'] if config.ALL_COINS: postfix = '' if not config.FORCE and not config.DRYRUN and status.get_status( None) and VOLATILE: if not config.QUICK and not quiet: print( "A miner is currently running, so we are skipping overclocking (use -f to force)." ) return config.ALL_MEANS_ONCE gpu_stats = [] try: gpu_stats = GPUStatCollection.new_query() except NameError as ex: print('NameError: Cannot load GPUStatCollection.') print(ex) print("To fix this, do 'pip3 install gpustat'.") if not config.DRYRUN: return config.ALL_MEANS_ONCE except: if not config.DRYRUN: if config.PLATFORM != 'AMD' and not quiet: print('Except: Cannot load GPUStatCollection on platform=' + config.PLATFORM) ex = sys.exc_info() print(ex) elif not config.QUICK and not quiet: ### TODO: https://github.com/GPUOpen-Tools/GPA/blob/master/BUILD.md print("'miners overclock' is not implemented for AMD devices") return config.ALL_MEANS_ONCE normalizedDevices = read_overclock_yml() sudo_nvidia_settings = get_sudo_nvidia_settings(config) xauthority = '~/.Xauthority' if sudo_nvidia_settings: xauthority = '/var/lib/lightdm/.Xauthority' settings = 'DISPLAY=:0 XAUTHORITY=' + xauthority + ' ' + sudo_nvidia_settings + 'nvidia-settings -c :0' nvidia_pwrs = {} oper = '-a' if config.QUERY: if config.VERBOSE: oper = '-q' else: oper = '--terse -q' for gpu in gpu_stats: if gpu.uuid in normalizedDevices: dev = normalizedDevices[gpu.uuid] oc = dev.get('OverClock', {}) # default undervolt (e.g. power-limit), oc = oc.get(coin['COIN'], oc.get('___', '0,150')) # unless a coin-specific one is given oc, uv = oc.split(',') # old-way, deprecated until we've migrated all into conf/overclock.yml, then will be removed elif gpu.uuid.upper() in config.SHEETS['Overclock']: dev = config.SHEETS['Overclock'][gpu.uuid.upper()] uv = dev['UV'] # default undervolt (or watts-limit) if 'UV' + postfix in dev: # unless a coin-specific one is given uv = dev['UV' + postfix] oc = dev['OC'] # default overclock if 'OC' + postfix in dev: # unless a coin-specific one is given oc = dev['OC' + postfix] if oc: settings += ' ' + oper + ' "[gpu:' + str( gpu.index) + ']/GPUMemoryTransferRateOffset[3]' if not config.QUERY: settings += '=' + str(int(oc)) settings += '"' if uv: iuv = int(uv) if iuv in nvidia_pwrs: nvidia_pwrs[iuv].append(str(gpu.index)) else: nvidia_pwrs[iuv] = [str(gpu.index)] overclock_dryrun = os.getenv('LOG_RAMDISK', '/var/local/ramdisk') + '/overclock-dryrun.sh' with open(overclock_dryrun, 'w') as fh: if not config.QUERY: fh.write("echo '%s %i %s'\n\n" % ('Overclocking', len(gpu_stats), 'GPUs.')) fh.write('%s\n' % ('sudo nvidia-smi -pm 1')) for pwr in nvidia_pwrs: if not config.QUERY: cmd = "sudo nvidia-smi -i " + ','.join( nvidia_pwrs[pwr]) + " -pl " + str(pwr) fh.write('%s\n' % (cmd)) fh.write("\n") if config.VERBOSE: print(cmd) fh.write(settings) fh.write("\n") if config.VERBOSE: print(settings) os.chmod( overclock_dryrun, stat.S_IXUSR | stat.S_IXGRP | stat.S_IWUSR | stat.S_IWGRP | stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH) if os.getenv('MINERS_USER'): os.chown(overclock_dryrun, getpwnam(os.getenv('MINERS_USER')).pw_uid, -1) if config.DRYRUN: print("\nexport DISPLAY=:0\nexport XAUTHORITY=" + xauthority + "\n") with open(overclock_dryrun, 'r') as fh: print(fh.read().replace('-a', " \\\n -a")) else: overclock_filename = os.getenv('LOG_RAMDISK', '/var/local/ramdisk') + '/overclock.sh' if VOLATILE and not config.FORCE and os.path.isfile( overclock_filename) and filecmp.cmp(overclock_dryrun, overclock_filename): if not config.QUICK and not config.QUERY: timestamp = time.ctime(os.path.getctime(overclock_filename)) print( "Overclock settings are identical to those already set at '" + timestamp + "', so we are keeping them (use -f to force).") else: os.rename(overclock_dryrun, overclock_filename) os.system("/bin/bash " + overclock_filename) if config.VERBOSE: with open(overclock_dryrun, 'r') as fh: print(fh.read()) if os.path.isfile(overclock_dryrun): os.remove(overclock_dryrun) return config.ALL_MEANS_ONCE
from status import get_status # Importing the functions defined in the other script import time import os while True: # Infinite loop to constantly show the data (refreshed every 30 seconds) all_lines = get_status() # Get the status of all lines for line in all_lines: if all_lines[line] == 10: print(line + ": Good Service") elif all_lines[line] == 6: print(line + ": Severe Delays") elif all_lines[line] == 9: print(line + ": Minor Delays") elif all_lines[line] == 5: print(line + ": Part Closure") elif all_lines[line] == 20: print(line + ": Service Closed") elif all_lines[line] == 3: print(line + ": Part Suspended") else: print(line + ": Unknown Status") time.sleep(30) # Data will refresh every 30 seconds os.system("cls") # This will clean the command screen
def get(self): self.response.out.write(status.get_status())
def index(): return render_template('index.html', status=status.get_status())
def main(): parser = argparse.ArgumentParser(description='Run synthesis experiment.') parser.add_argument('-n', type=int, help='Number of repetitions', default=10) parser.add_argument('--filter', type=str, help='Filter which experiments to run', default="") parser.add_argument('--exclude', type=str, help='Exclude some experiments', default="") parser.add_argument('--exp_name', type=str, help='Name of this experiment', default="") parser.add_argument('--args', type=str, help='Arguments to be passed to mimic', default="") parser.add_argument('--metric', type=str, help='Which metric should be used during search? Comma-separated list', default="0") global argv argv = parser.parse_args() workdir = os.path.abspath(os.path.dirname(__file__) + "/../tests") n = argv.n metrics = map(lambda x: int(x), argv.metric.split(",")) global base_command if argv.args != "": base_command = base_command + " " + argv.args fncs = parse_functions(workdir, argv.filter, argv.exclude) # create a directory to store information global out out = workdir + "/out" if not os.path.exists(out): os.mkdir(out) timefordir = common.get_time(True) out = out + "/" + timefordir if argv.exp_name != "": out = out + "_" + argv.exp_name if os.path.exists(out): print "ERROR, out directory exists already: " + out sys.exit(1) os.mkdir(out) logfile = out + "/readme.txt" # run the experiment tasks = [] c = 0 print "" for f, i, m in [(f, i, m) for f in fncs for i in range(n) for m in metrics]: tasks.append((c, f, i, m)) c += 1 shuffle(tasks) # shuffle tasks results = [] print "Running experiment..." def get_details(): s = "" s += " function(s): %d" % len(fncs) s += "\n repetitions: %d" % n s += "\n output directory: %s" % out[out.find("/tests/")+1:] return s print get_details() common.fprint(logfile, "Arguments: " + " ".join(sys.argv) + "\n") common.fprinta(logfile, "Time: " + common.get_time() + "\n") common.fprinta(logfile, get_details() + "\n" + line + "\n") print line stat = status.get_status() stat.set_message("Running experiment...") stat.init_progress(len(tasks)) for c, f, i, m in tasks: stat.writeln("Running mimic for %s..." % (f.shortname)) res = run.mimic(f, metric=m, cleanup=0) stat.writeln(" done in %.2f seconds and %d searches" % (res.total_time, res.total_searches)) stat.inc_progress(force_update=True) results.append(res) jn = cPickle.dumps(results) common.fprint(out + "/result.pickle", jn) stat.end_progress() print line print "Finished experiment:" print get_details() common.fprinta(logfile, "Time: " + common.get_time() + "\n")