def home(): session.pop('screen_name', None) classifier = request.values.get('classifier') or "Support Vector Machine" pe, nee, neu, positives, negatives, neutrals, avPos, avNeg, getIt = classify__( classifier) call, call2, call3, call4, call5, cutt1, cutt2, cutt3, cutt4, cutt5, top_tweets = common_words( ) holdOn = caTe() categories = request.values.get('categories') categorizeTwwts = [] if (categories != None): categorizeTwwts = caTe(categories) span1 = span(cutt1) span2 = span(cutt2) span3 = span(cutt3) span4 = span(cutt4) span5 = span(cutt5) front, back = Track() return render_template('html/index.html', pgtitle='Home', postive=pe, negative=nee, neutral=neu, positives=positives, holdOn=categorizeTwwts, negatives=negatives, getIt=getIt, neutrals=neutrals, call=call, call2=call2, call3=call3, call4=call4, call5=call5, cutt1=cutt1, cutt2=cutt2, cutt3=cutt3, cutt4=cutt4, cutt5=cutt5, avPos=avPos, avNeg=avNeg, most_tweets=top_tweets, front=front, back=back, span1=span1, span2=span2, span3=span3, span4=span4, span5=span5)
def zero_just(string, length=3, color="525252"): return span("0" * (length - len(string)), fg=color)
GRADIENT = [("#F90000", 0.0), ("#F90000", 0.05), ("#EEF600", 0.20), ("#7CDC7A", 1.00)] if __name__ == "__main__": hdd = hdd_utilisation() mem = mem_utilisation() cpu_avg, cpu_max = cpu_utilisation() bat = battery_level() is_plugged = plugged_in() print( fa("f0a0"), # zero_just(f"{hdd}"), span(f"{hdd} ", fg=gradient_at(GRADIENT, 1 - (hdd / 100))), fa("f538"), # zero_just(f"{mem}"), span(f"{mem} ", fg=gradient_at(GRADIENT, 1 - mem / 100)), fa("f2db"), # "高", zero_just(f"{cpu_max}"), span(f"{cpu_max}", fg=gradient_at(GRADIENT, 1 - (cpu_max / 100))), "平", zero_just(f"{cpu_avg}"), span(f"{cpu_avg} ", fg=gradient_at(GRADIENT, 1 - (cpu_avg / 100))), fa("f1e6") if is_plugged else fa(battery_indicator(bat)), # zero_just(f"{bat}"), span(f"{bat}", fg=(gradient_at(GRADIENT, bat / 100) if not is_plugged else None)),
ifstat = subprocess.check_output(["ifstat", "-j"]).decode() ifstat = json.loads(ifstat)["kernel"][INTERFACE] return int(ifstat["rx_bytes"]), int(ifstat["tx_bytes"]) rx_last, tx_last = get_rx_tx() time.sleep(1) while True: rx_bytes, tx_bytes = get_rx_tx() rx_bytes, rx_last = rx_bytes - rx_last, rx_bytes tx_bytes, tx_last = tx_bytes - tx_last, tx_bytes rx_color = gradient_at(GRADIENT, min(log(rx_bytes + 1, 10) / log(RX_MAX, 10), 1)) tx_color = gradient_at(GRADIENT, min(log(tx_bytes + 1, 10) / log(TX_MAX, 10), 1)) #·10㎅ ··5㎆ print( fa("f102"), # span(xB(tx_bytes), fg=tx_color), " ", fa("f103"), # span(xB(rx_bytes), fg=rx_color), sep="") sys.stdout.flush() time.sleep(1)
def general_rlpa(policy_lib, delta, size, T, alg='none', beta=1.0): total_reward = 0 init_x = size // 2 init_y = size // 2 init_state = (init_x, init_y) regret = [] t = 1 i = 0 grid_world = GridWorld(size) agent = GeneralRLPAAgent(policy_lib, grid_world, init_state, beta=beta) if alg == 'MBIE-EB': agent.initialize_MBIEEB() elif alg == 'IS': agent.initialize_IS() optimal_mu = get_optimal_mu(grid_world) while t <= T: t_i = 0 T_i = 2 ** i H_hat = span(T_i) i += 1 while t_i <= T_i and t <= T and agent.has_policy(): print(t) agent.compute_bounds(H_hat, t, delta) agent.compute_best_policy() agent.current_policy.initialize_v() # use pol for static variables in an episode pol = agent.current_policy current_traj = [agent.max_B_key, []] while t_i <= T_i and t <= T and pol.v <= pol.n \ and pol.mu_hat - pol.R / (pol.n + pol.v) <= pol.c + \ complex_bound(H_hat, t, delta, pol.n, pol.v, pol.K): t_i += 1 last_state = agent.state agent.state, r = grid_world.take_action( agent.state, agent.take_action() ) agent.current_policy.v += 1.0 agent.current_policy.R += r total_reward += r if alg == 'MBIE-EB': agent.update_MBIEEB(last_state, r, t) elif alg == 'IS': current_traj[1] += [(last_state, r, agent.state)] regret += [optimal_mu - float(total_reward) / float(t)] t += 1 if t % 5 == 0: agent.state = (init_x, init_y) if alg == 'IS': agent.update_IS(current_traj) current_traj = [agent.max_B_key, []] agent.current_policy.K += 1 if pol.mu_hat - agent.current_policy.R / (pol.n + agent.current_policy.v) \ > pol.c + complex_bound( H_hat, t, agent.delta, pol.n, agent.current_policy.v, pol.K): agent.drop_current_policy() else: agent.current_policy.n += agent.current_policy.v agent.current_policy.mu_hat \ = agent.current_policy.R / agent.current_policy.n if alg == 'MBIE-EB': agent.use_MBIEEB(init_state, t) elif alg == 'IS' and t > 20: agent.compute_best_interpolated_pol() if not agent.has_policy(): print('agent does not have policy') return return regret
BLOCKS = [" ","▏","▎","▍","▌","▊","▉","█"] def bar(length, fraction): """ Creates Unicode Bargraph (left to right) """ return (BLOCKS[7] * int(length * fraction) + BLOCKS[int(length * 8 * fraction) % 8]).ljust(length)[:length] if "relative_x" in os.environ: percentage = round(int(os.getenv("relative_x", 0)) / int(os.getenv("width", 1)) * 100) subprocess.run(["amixer", "sset", "Master", f"{int(percentage)}%", "-q"]) else: query = subprocess.check_output(["amixer", "sget", "Master"]).decode() percentage = int(re.search(r"\[(\d+)%\]", query).groups()[0]) length = 30 GRADIENT = [("#4b4b4b", 0.0), ("#dfdfdf", 0.8), ("#dfdfdf", 1.0)] if __name__ == "__main__": _, length = sys.argv length = int(length) print("<span font='4'>", *[span(cha, fg=gradient_at(GRADIENT, i/length)) for i, cha in enumerate(bar(length, percentage / 100))], "</span>", sep="")