def block_devices(): global CONFIG _thread = threading.Timer(5, block_devices) _thread.daemon = True _thread.start() if CONFIG['DEFAULT']['crisis'] == 'on': if not os.path.exists("./crisis"): print("Blocking devices") os.system( "iptables -I INPUT -m mac --mac-source B4:9D:0B:63:79:31 -j REJECT" ) os.system( "iptables -I INPUT -m mac --mac-source B4:9D:0B:63:74:39 -j REJECT" ) os.system("conntrack --flush") open("./crisis", "w").close() else: if os.path.exists("./crisis"): print("Unblocking Devices") os.system( "iptables -D INPUT -m mac --mac-source B4:9D:0B:63:79:31 -j REJECT" ) os.system( "iptables -D INPUT -m mac --mac-source B4:9D:0B:63:74:39 -j REJECT" ) os.remove("./crisis")
def background_command(command, require_zero_status=False): """Executes a command in a separate thread, like running with '&' in the shell. If you want the program to die if the command eventually returns with nonzero status, then set require_zero_status to True. 'command' will be executed in 'shell' mode, so it's OK for it to contain pipes and other shell constructs. This function returns the Thread object created, just in case you want to wait for that specific command to finish. For example, you could do: thread = background_command('foo | bar') # do something else while waiting for it to finish thread.join() See also: - wait_for_background_commands(), which can be used at the end of the program to wait for all these commands to terminate. - execute_command() and get_command_stdout(), which allow you to execute commands in the foreground. """ p = subprocess.Popen(command, shell=True) thread = threading.Thread(target=background_command_waiter, args=(command, p, require_zero_status)) thread.daemon = True # make sure it exits if main thread is terminated abnormally. thread.start() return thread
def list_files_multithread(root, outfilename): # spawn consumer thread - write info to file consumerthread = threading.Thread(target = consumer, args = (outfilename,)) consumerthread.daemon = True #else cannot exit! consumerthread.start() # spawn producer threads - get info of the files threads = [] paths = [] for(thisdir, subshere, fileshere) in os.walk(root): paths.extend([os.path.join(thisdir, fname) for fname in fileshere]) if len(paths) > MAX_NUM_OF_FILES: # create a new thread to store information of files in this folder pathscopy = copy.deepcopy(paths) thread = threading.Thread(target = getinfo, args = (pathscopy,)) threads.append(thread) thread.start() del paths[0: len(paths)] # reset the paths total = len(threads) count = 0 for thread in threads: thread.join() count = count + 1 # print(count * 100 / total) consumerthread.join()
def startThreads(self, delay=0.0): # MANUALTEST: Must be tested manually "Start all the threads that have been installed using setupThread" # World.startThreads() # not public if delay > 0.0: when = delay / 10.0 print("Starting in " + str(when) + " seconds.") time.sleep(when) for thread in self._runnables: _thread.start()
def stress_test(self, iterations=1, threads=1): args = (iterations, ) self.mark_start() for _ in range(threads): thread = threading.Thread(target=self.run_thread, args=args) thread.start() while len(self.done) < (iterations * threads): dprint(len(self.done)) time.sleep(0.1) self.mark_finish() took = self.elapsed() self.print_gc_report()
def update_config(): global CONFIG _thread = threading.Timer(5, update_config) _thread.daemon = True _thread.start() print("### Updating config.") tmp_cfg = configparser.ConfigParser() tmp_cfg.read('config/config.ini') CONFIG = tmp_cfg
def process_sensor_mqtt(func, topic): global MQTT_CLIENT global CONFIG _thread = threading.Timer(int(CONFIG['DEFAULT'][func.__name__]), process_sensor_mqtt, [func, topic]) _thread.daemon = True _thread.start() sensor_value = func() print("## Publishing {} to topic {} ({}) ".format( sensor_value, topic, CONFIG['DEFAULT'][func.__name__])) MQTT_CLIENT.publish(topic, sensor_value)
def switch_leds(): global CONFIG _thread = threading.Timer(1, switch_leds) _thread.daemon = True _thread.start() if CONFIG['DEFAULT']['led_state'] == 'steady': if GPIO.input(LED_RING_PIN) == 1: GPIO.output(LED_RING_PIN, GPIO.input(LED_RING_PIN)) else: GPIO.output(LED_RING_PIN, not GPIO.input(LED_RING_PIN)) GPIO.output(LED_RING_PIN, not GPIO.input(LED_RING_PIN))
def __init__(self, myId, count, mutex): self.myId = myId self.count = count # per-thread state information self.mutex = mutex # shared objects, not globals threading.Thread.__init__(self) def run(self): # run provides thread logic for i in range(self.count): # still sync stdout access with self.mutex: print('[%s] => %s' % (self.myId, i)) stdoutmutex = threading.Lock() # same as thread.allocate_lock() threads = [] for i in range(10): thread = Mythread(i, 100, stdoutmutex) # make/start 10 threads thread.start() # starts run method in a thread threads.append(thread) for thread in threads: thread.join() # wait for thread exits print('Main thread exiting.') #************************************ """ four different ways to run an action in a thread; all print 4294967296, but prints should be synchronized with a mutex here to avoid overlap """ import threading, _thread def action(i): print(i ** 32)
def train(continu=True, depth=1, rand_min=0.2, rand_max=1.0, lr_max=8e-5, lr_min=1e-6, reg=0.005, num_threads=4): # add more parameters probably. if continu: nn = load_nn("latest") best_nn = load_nn("best") else: nn = NeuralNet() nn.init_net(input_size=386, output_size=1, hidden_size=1000, number_of_hidden=8) resets = 0 while abs(nn.predict(chess_ai.chess_to_nn_input(Chess()))) > .1: nn = NeuralNet() nn.init_net(input_size=386, output_size=1, hidden_size=1000, number_of_hidden=8) resets += 1 print("nn started. Start predict: ", nn.predict(chess_ai.chess_to_nn_input(Chess())), "after", resets, "resets") best_nn = copy.deepcopy(nn) # best_nn = load_nn("best") # crashes if no such nn is saved with open("logs/log.txt", "a") as log_file: log_file.write("\n=========== New Training Run ================\n") bool_dict = {"bool": True} ui_thread = threading.Thread(target=shutdown_gui_thread, args=(bool_dict, )) ui_thread.start() # thread.start_new_thread(shutdown_gui_thread, (bool_dict, )) # starts thread, which displays gui to allow for easy graceful shutdown. lr = lr_max result_counts = [1, 1, 1] count = 0 randomness = rand_max if continu: with open("saved_nns/progress.txt") as f: settings_dict = json.loads(f.read()) result_counts = settings_dict["result_counts"] count = settings_dict["count"] lr = settings_dict["lr"] lr_min = settings_dict["lr_min"] randomness = settings_dict["rand"] rand_min = settings_dict["rand_min"] # else: # result_counts = [1, 1, 1] # index 0: counts of black wins, index 1: Ties, index 2: white wins # count = 0 while bool_dict[ "bool"]: # This dict is given to the UI thread. This allows for stopping the training via UI result_list = [None] * num_threads threads = [] for i in range(num_threads): # print("Starting thread", i) thread = threading.Thread(target=run_one_game, args=(nn, randomness, depth, reg, result_counts, count + i, i, result_list, lr)) threads.append(thread) thread.daemon = True thread.start() for thread in threads: thread.join() # print("All threads done") # print("TODO: merge thread results and update") # TODO! avg_db = [ ] # Not actually taking the average. Just sum up (maybe reduce learning rate) avg_dw = [] for db, dw, res, print_string, cost_string in result_list: lr_factor = 1 - result_counts[res + 1] / sum(result_counts) # print(res, lr_factor, result_counts) if not avg_db: avg_db = db avg_dw = dw else: for i in range(len(avg_db)): avg_db[i] += db[i] * lr_factor avg_dw[i] += dw[i] * lr_factor result_counts[res + 1] += 1 if res == 1: cost_file_name = "logs/win_costs.csv" elif res == 0: cost_file_name = "logs/draw_costs.csv" else: cost_file_name = "logs/loss_costs.csv" print_string += ", lr_factor: " + str("{:.3f}".format(lr_factor)) with open("logs/detailed_log.txt", "a") as log_file: log_file.write(print_string + "\n") with open(cost_file_name, "a") as cost_file: cost_file.write(cost_string) print(print_string) for i in range(len(avg_db)): avg_db[i] = avg_db[i] * (1 / num_threads) avg_dw[i] = avg_dw[i] * (1 / num_threads) nn.update_from_gradients(avg_db, avg_dw, lr=lr) count += num_threads if count > 200: randomness = max( rand_min, randomness * 0.995 ) # Reduce randomness a little each run, until it becomes less than rand_min if count > 400: lr = max(lr_min, lr * 0.995) # sum_val = np.sum(chess.board) # avg_cost = total_cost / chess.turn_num if count >= 200 else total_cost # print_string = ("Game " + str(count) # + ", Avg Cost: " + str("{:.3f}".format(avg_cost[0])) # + ", Last cost: " + str("{:.3f}".format(last_cost[0])) # + ", Turns: " + str(chess.turn_num) # + ", Randomness:" + str("{:.2f}".format(randomness)) # + ", lr:" + str("{:.5f}".format(lr)) # + ", lr factor:" + str("{:.3f}".format(lr_factor)) # + ", Win:" + str(result) # + ", Sum: " + str(sum_val) # + ", Val:" + str("{:.4f}".format(val)) ) # print(print_string) # with open("logs/detailed_log.txt", "a") as log_file: # log_file.write(print_string + "\n") # if result == 1: # cost_file_name = "logs/win_costs.csv" # elif result == 0: # cost_file_name = "logs/draw_costs.csv" # else: # cost_file_name = "logs/loss_costs.csv" # with open(cost_file_name, "a") as cost_file: # cost_string = ( str(count) + "," # + str(chess.turn_num) + "," # + str(total_cost[0]) + "," # + str(avg_cost[0]) + "," # + str(last_cost[0]) + "\n" ) # cost_file.write(cost_string) # TODO SAVE COSTS FOR GRAPHING LATER # print("Game", count, ", Total Cost:", "{:.2f}".format(total_cost[0]), "Last cost: ", cost, "Turns: ", chess.turn_num, ", Randomness:", "{:.2f}".format(randomness), ", lr:", "{:.4f}".format(lr), "lr factor:", "{:.3f}".format(lr_factor), ", Winner:", chess.winner, ", Val:", "{:.5f}".format(val)) if count % 200 == 0: # every X games test and backup best_nn = test_and_backup(nn, best_nn, count) # test_and_backup(nn, best_nn, count) save_nn("latest", nn) with open("logs/log.txt", "a") as log_file: log_file.write("\n=========== Training Run Ended ================\n") settings = { "count": count, "result_counts": result_counts, "lr": lr, "lr_min": lr_min, "rand": randomness, "rand_min": rand_min } with open("saved_nns/progress.txt", "w") as f: f.write(json.dumps(settings))
def start_threads(count): for i in range(count): thread = threading.Thread(target=run_thread) thread.start()
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import json from _thread import start_new_thread as start from time import sleep from logger import logexception config = {} def getconfig(): global config try: with open('config.json') as fp: config = json.load(fp) except (Exception, OSError, ValueError) as err: logexception(err) def loopgetconfig(): while True: getconfig() sleep(180) start(loopgetconfig, ())
def test_start_thread_again(self): thread = threading.Thread() thread.start() self.assertRaises(RuntimeError, thread.start)
def test_daemonize_active_thread(self): thread = threading.Thread() thread.start() self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
for msgnum in range(nummessages): time.sleep(idnum) dataqueue.put('[producer id=%d, count=%d]' % (idnum, msgnum)) def consumer(idnum, dataqueue): while True: time.sleep(0.1) try: data = dataqueue.get(block=False) except queue.Empty: pass else: with safeprint: print('Consumer', idnum, 'got =>', data) if __name__ == '__main__': for i in range(numconsumers): thread = threading.Thread(target=consumer, args=(i, dataQueue)) thread.daemon = True # else cannot exit! thread.start() waitfor = [] for i in range(numproducers): thread = threading.Thread(target=producer, args=(i, dataQueue)) waitfor.append(thread) thread.start() for thread in waitfor: thread.join() # or time.sleep() long enough sleep print('Main thread exit.')
def consumer(idnum, dataqueue): while True: time.sleep(0.1) try: data = dataqueue.get(block=False) except queue.Empty: pass else: with safeprint: print('Consumer', idnum, 'got =>', data) if __name__ == '__main__': for i in range(numconsumers): thread = threading.Thread(target=consumer, args=(i, dataQueue)) thread.daemon = True # else cannot exit! thread.start() waitfor = [] for i in range(numproducers): thread = threading.Thread(target=producer, args=(i, dataQueue)) waitfor.append(thread) thread.start() for thread in waitfor: thread.join() # or time.sleep() long enough sleep print('Main thread exit.')
def test_start_thread_again(self): thread = threading.Thread() _thread.start() self.assertRaises(RuntimeError, _thread.start)
def test_daemonize_active_thread(self): thread = threading.Thread() _thread.start() self.assertRaises(RuntimeError, setattr, thread, "daemon", True)